Compare commits
28 commits
feature/ru
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
| e8ad5b5c23 | |||
| 6256c61a45 | |||
| 97a3f86ef9 | |||
| ee68c5be0e | |||
| 8650746d66 | |||
| 87d78e7605 | |||
| de211d2b71 | |||
| 20c963f6c8 | |||
| 6e13cef237 | |||
| 7a77ec87d3 | |||
| 22fbe53710 | |||
| 2a29f1fc82 | |||
| 65d189c990 | |||
| 26dda29c8d | |||
| 4793a96587 | |||
| 9ee5156dfc | |||
| 88d87bd25d | |||
| 7842e4eb3e | |||
| dd43b89086 | |||
| 71e0d31d8d | |||
| 6fb909cd07 | |||
| 99fdb12712 | |||
| f2e79d4cd0 | |||
| b57734db33 | |||
| c6e678188f | |||
| d30188e7d9 | |||
| b461dc39a7 | |||
| b53c7170eb |
118 changed files with 2934 additions and 556 deletions
5
.vscode/extensions.json
vendored
Normal file
5
.vscode/extensions.json
vendored
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
{
|
||||
"recommendations": [
|
||||
"zxh404.vscode-proto3"
|
||||
]
|
||||
}
|
||||
10
.vscode/settings.json
vendored
Normal file
10
.vscode/settings.json
vendored
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
{
|
||||
"protoc": {
|
||||
"compile_on_save": true,
|
||||
"options": [
|
||||
"--proto_path=${workspaceRoot}/tripod-id/proto",
|
||||
"--proto_path=${workspaceRoot}/core/proto",
|
||||
"--java_out=${workspaceRoot}/.tmp"
|
||||
]
|
||||
}
|
||||
}
|
||||
13
Cargo.toml
13
Cargo.toml
|
|
@ -26,7 +26,7 @@ caretta-sync-macros = { path="macros", optional = true}
|
|||
caretta-sync-core = {workspace = true, features = ["test"]}
|
||||
|
||||
[workspace]
|
||||
members = [ ".", "core", "macros", "cli", "mobile", "examples/*" , "bevy"]
|
||||
members = [ ".", "core", "macros", "cli", "mobile", "examples/*" , "bevy", "tripod-id"]
|
||||
resolver = "3"
|
||||
|
||||
[workspace.package]
|
||||
|
|
@ -38,18 +38,27 @@ repository = "https://forgejo.fireturlte.net/lazy-supplements"
|
|||
|
||||
[workspace.dependencies]
|
||||
bevy = { git = "https://github.com/bevyengine/bevy.git", rev="16ffdaea0daec11e4347d965f56c9c8e1122a488" }
|
||||
tripod-id = {path="./tripod-id", features=["prost", "rusqlite", "serde"]}
|
||||
chrono = "0.4.41"
|
||||
ciborium = "0.2.2"
|
||||
clap = { version = "4.5.38", features = ["derive"] }
|
||||
caretta-sync-core.path = "core"
|
||||
futures = { version = "0.3.31", features = ["executor"] }
|
||||
libp2p = { version = "0.55.0", features = ["macros", "mdns", "noise", "ping", "tcp", "tokio", "yamux" ] }
|
||||
rand = "0.8.5"
|
||||
rusqlite = "0.37.0"
|
||||
serde = { version = "1.0.219", features = ["derive"] }
|
||||
thiserror = "2.0.12"
|
||||
tokio = { version = "1.45.0", features = ["macros", "rt", "rt-multi-thread"] }
|
||||
tokio-stream = "0.1.17"
|
||||
tonic = "0.14.0"
|
||||
url = { version = "2.5.7", features = ["serde"] }
|
||||
uuid = { version = "1.17.0", features = ["v7"] }
|
||||
iroh = { version = "0.92.0", features = ["discovery-local-network", "discovery-pkarr-dht"] }
|
||||
prost = "0.14.1"
|
||||
prost-types = "0.14.1"
|
||||
tonic-prost-build = "0.14.0"
|
||||
tonic-prost = "0.14.0"
|
||||
|
||||
|
||||
[profile.dev]
|
||||
opt-level = 1
|
||||
|
|
|
|||
|
|
@ -15,7 +15,6 @@ ciborium.workspace = true
|
|||
clap.workspace = true
|
||||
dirs = "6.0.0"
|
||||
caretta-sync-core = { workspace = true, features = ["cli"] }
|
||||
libp2p.workspace = true
|
||||
serde.workspace = true
|
||||
thiserror.workspace = true
|
||||
tokio.workspace = true
|
||||
|
|
|
|||
|
|
@ -1,24 +0,0 @@
|
|||
use clap::Args;
|
||||
use caretta_sync_core::utils::runnable::Runnable;
|
||||
|
||||
use crate::cli::ConfigArgs;
|
||||
|
||||
use crate::cli::PeerArgs;
|
||||
|
||||
#[derive(Debug, Args)]
|
||||
pub struct DeviceAddCommandArgs {
|
||||
#[command(flatten)]
|
||||
peer: PeerArgs,
|
||||
#[arg(short, long)]
|
||||
passcode: Option<String>,
|
||||
#[command(flatten)]
|
||||
config: ConfigArgs
|
||||
}
|
||||
|
||||
impl Runnable for DeviceAddCommandArgs {
|
||||
fn run(self, app_name: &'static str) {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
0
cli/src/cli/device/auth/approve.rs
Normal file
0
cli/src/cli/device/auth/approve.rs
Normal file
0
cli/src/cli/device/auth/list.rs
Normal file
0
cli/src/cli/device/auth/list.rs
Normal file
0
cli/src/cli/device/auth/mod.rs
Normal file
0
cli/src/cli/device/auth/mod.rs
Normal file
0
cli/src/cli/device/auth/request.rs
Normal file
0
cli/src/cli/device/auth/request.rs
Normal file
|
|
@ -6,7 +6,6 @@ mod scan;
|
|||
|
||||
pub use add::DeviceAddCommandArgs;
|
||||
use caretta_sync_core::utils::runnable::Runnable;
|
||||
use libp2p::{Multiaddr, PeerId};
|
||||
pub use list::DeviceListCommandArgs;
|
||||
pub use ping::DevicePingCommandArgs;
|
||||
pub use remove::DeviceRemoveCommandArgs;
|
||||
|
|
|
|||
|
|
@ -1,8 +1,7 @@
|
|||
use std::marker::PhantomData;
|
||||
|
||||
use clap::Args;
|
||||
use caretta_sync_core::{config::Config, data::migration::DataMigrator, global::{CONFIG, DATABASE_CONNECTIONS}, server::ServerTrait, utils::runnable::Runnable};
|
||||
use libp2p::{noise, ping, swarm::{NetworkBehaviour, SwarmEvent}, tcp, yamux, Swarm};
|
||||
use caretta_sync_core::{config::Config, global::{CONFIG, LOCAL_DATABASE_CONNECTION}, server::ServerTrait, utils::runnable::Runnable};
|
||||
|
||||
use super::ConfigArgs;
|
||||
|
||||
|
|
@ -23,7 +22,7 @@ where
|
|||
#[tokio::main]
|
||||
async fn run(self, app_name: &'static str) {
|
||||
let config = CONFIG.get_or_init::<Config>(self.config.into_config(app_name).await).await;
|
||||
let _ = DATABASE_CONNECTIONS.get_or_init_unchecked(&config, DataMigrator).await;
|
||||
let _ = LOCAL_DATABASE_CONNECTION.get_or_init(&config.storage.get_local_database_path() );
|
||||
T::serve_all(config).await.unwrap();
|
||||
}
|
||||
}
|
||||
|
|
@ -13,18 +13,19 @@ test = ["dep:tempfile", ]
|
|||
|
||||
[dependencies]
|
||||
base64 = "0.22.1"
|
||||
tripod-id.workspace = true
|
||||
chrono.workspace = true
|
||||
chrono-tz = "0.10.3"
|
||||
ciborium.workspace = true
|
||||
clap = {workspace = true, optional = true}
|
||||
dirs = "6.0.0"
|
||||
futures.workspace = true
|
||||
libp2p.workspace = true
|
||||
libp2p-core = { version = "0.43.0", features = ["serde"] }
|
||||
libp2p-identity = { version = "0.2.11", features = ["ed25519", "peerid", "rand", "serde"] }
|
||||
prost = "0.14.1"
|
||||
prost-types = "0.14.1"
|
||||
iroh.workspace = true
|
||||
prost.workspace = true
|
||||
prost-types.workspace = true
|
||||
rusqlite = { workspace = true, features = ["bundled", "chrono", "uuid"] }
|
||||
serde.workspace = true
|
||||
sysinfo = "0.37.0"
|
||||
tempfile = { version = "3.20.0", optional = true }
|
||||
thiserror.workspace = true
|
||||
tokio.workspace = true
|
||||
|
|
@ -35,9 +36,10 @@ tracing = "0.1.41"
|
|||
tracing-subscriber = { version = "0.3.19", features = ["env-filter"] }
|
||||
uuid.workspace = true
|
||||
url.workspace = true
|
||||
sysinfo = "0.37.0"
|
||||
whoami = "1.6.1"
|
||||
rusqlite = { version = "0.37.0", features = ["bundled"] }
|
||||
rand.workspace = true
|
||||
ed25519-dalek = { version = "2.2.0", features = ["signature"] }
|
||||
tokio-stream.workspace = true
|
||||
|
||||
[target.'cfg(target_os="android")'.dependencies]
|
||||
jni = "0.21.1"
|
||||
|
|
@ -52,4 +54,4 @@ objc2-app-kit = "0.3.1"
|
|||
tempfile = "3.20.0"
|
||||
|
||||
[build-dependencies]
|
||||
tonic-prost-build = "0.14.0"
|
||||
tonic-prost-build.workspace = true
|
||||
|
|
|
|||
|
|
@ -1,4 +1,14 @@
|
|||
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
tonic_prost_build::compile_protos("proto/caretta_sync.proto")?;
|
||||
tonic_prost_build::configure()
|
||||
.extern_path(".tripod_id", "::tripod_id::prost")
|
||||
.compile_protos(
|
||||
&[
|
||||
"proto/caretta_sync/authorization_request/authorization_request.proto",
|
||||
"proto/caretta_sync/authorized_node/authorized_node.proto",
|
||||
"proto/caretta_sync/remote_node/remote_node.proto",
|
||||
|
||||
],
|
||||
&["proto", "../tripod-id/proto"]
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
|
@ -1,33 +0,0 @@
|
|||
syntax = "proto3";
|
||||
package caretta_sync;
|
||||
import "google/protobuf/timestamp.proto";
|
||||
|
||||
enum PeerListOrderBy {
|
||||
CREATED_AT = 0;
|
||||
UPDATED_AT = 1;
|
||||
PEER_ID = 2;
|
||||
}
|
||||
|
||||
service CachedPeerService {
|
||||
rpc List(CachedPeerListRequest) returns (CachedPeerListResponse);
|
||||
}
|
||||
|
||||
message CachedPeerListRequest {}
|
||||
|
||||
message CachedPeerMessage {
|
||||
uint32 number = 1;
|
||||
string peer_id = 2;
|
||||
google.protobuf.Timestamp created_at = 3;
|
||||
repeated CachedAddressMessage addresses = 4;
|
||||
}
|
||||
|
||||
message CachedAddressMessage {
|
||||
uint32 number = 1;
|
||||
google.protobuf.Timestamp created_at = 2;
|
||||
google.protobuf.Timestamp updated_at = 3;
|
||||
string multiaddress = 4;
|
||||
}
|
||||
|
||||
message CachedPeerListResponse {
|
||||
repeated CachedPeerMessage peers = 1;
|
||||
}
|
||||
|
|
@ -0,0 +1,9 @@
|
|||
syntax = "proto3";
|
||||
package caretta_sync.authorization_request;
|
||||
|
||||
import "caretta_sync/authorization_request/identifier.proto";
|
||||
|
||||
message AcceptRequest {
|
||||
Identifier authorization_request = 1;
|
||||
string passcode = 2;
|
||||
}
|
||||
|
|
@ -0,0 +1,8 @@
|
|||
syntax = "proto3";
|
||||
package caretta_sync.authorization_request;
|
||||
|
||||
import "caretta_sync/authorized_node/info.proto";
|
||||
|
||||
message AcceptResponse {
|
||||
caretta_sync.authorized_node.Info authorized_node_info = 1;
|
||||
}
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
syntax = "proto3";
|
||||
package caretta_sync.authorization_request;
|
||||
|
||||
import "caretta_sync/authorization_request/accept_request.proto";
|
||||
import "caretta_sync/authorization_request/accept_response.proto";
|
||||
import "caretta_sync/authorization_request/info_request.proto";
|
||||
import "caretta_sync/authorization_request/info_response.proto";
|
||||
import "caretta_sync/authorization_request/list_request.proto";
|
||||
import "caretta_sync/authorization_request/list_response.proto";
|
||||
import "caretta_sync/authorization_request/reject_request.proto";
|
||||
import "caretta_sync/authorization_request/reject_response.proto";
|
||||
import "caretta_sync/authorization_request/send_request.proto";
|
||||
import "caretta_sync/authorization_request/send_response.proto";
|
||||
|
||||
|
||||
|
||||
service AuthorizationRequest {
|
||||
rpc Send(SendRequest) returns (SendResponse);
|
||||
rpc Accept(AcceptRequest) returns (AcceptResponse);
|
||||
rpc Reject(RejectRequest) returns (RejectResponse);
|
||||
rpc Info(InfoRequest) returns (InfoResponse);
|
||||
rpc List(stream ListRequest) returns (stream ListResponse);
|
||||
}
|
||||
|
|
@ -0,0 +1,12 @@
|
|||
syntax = "proto3";
|
||||
package caretta_sync.authorization_request;
|
||||
|
||||
import "caretta_sync/common/uuid.proto";
|
||||
import "tripod_id/double.proto";
|
||||
|
||||
message Identifier {
|
||||
oneof identifier_value {
|
||||
caretta_sync.common.Uuid uuid = 1;
|
||||
tripod_id.Double id = 2;
|
||||
}
|
||||
}
|
||||
16
core/proto/caretta_sync/authorization_request/info.proto
Normal file
16
core/proto/caretta_sync/authorization_request/info.proto
Normal file
|
|
@ -0,0 +1,16 @@
|
|||
syntax = "proto3";
|
||||
package caretta_sync.authorization_request;
|
||||
|
||||
import "caretta_sync/iroh/public_key.proto";
|
||||
import "caretta_sync/authorization_request/status.proto";
|
||||
import "tripod_id/double.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
|
||||
message Info {
|
||||
tripod_id.Double id = 1;
|
||||
caretta_sync.iroh.PublicKey public_key = 2;
|
||||
google.protobuf.Timestamp created_at = 3;
|
||||
google.protobuf.Timestamp closed_at = 6;
|
||||
Status status = 4;
|
||||
}
|
||||
|
||||
|
|
@ -0,0 +1,8 @@
|
|||
syntax = "proto3";
|
||||
package caretta_sync.authorization_request;
|
||||
|
||||
import "caretta_sync/authorization_request/identifier.proto";
|
||||
|
||||
message InfoRequest {
|
||||
Identifier request = 1;
|
||||
}
|
||||
|
|
@ -0,0 +1,9 @@
|
|||
syntax = "proto3";
|
||||
package caretta_sync.authorization_request;
|
||||
|
||||
import "caretta_sync/authorization_request/info.proto";
|
||||
|
||||
|
||||
message InfoResponse{
|
||||
Info info = 1;
|
||||
}
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
syntax = "proto3";
|
||||
package caretta_sync.authorization_request;
|
||||
|
||||
message ListRequest {}
|
||||
|
|
@ -0,0 +1,8 @@
|
|||
syntax = "proto3";
|
||||
package caretta_sync.authorization_request;
|
||||
|
||||
import "caretta_sync/authorization_request/info.proto";
|
||||
|
||||
message ListResponse {
|
||||
Info request_info = 1;
|
||||
}
|
||||
|
|
@ -0,0 +1,8 @@
|
|||
syntax = "proto3";
|
||||
package caretta_sync.authorization_request;
|
||||
|
||||
import "caretta_sync/authorization_request/identifier.proto";
|
||||
|
||||
message RejectRequest {
|
||||
Identifier authorization_request = 1;
|
||||
}
|
||||
|
|
@ -0,0 +1,8 @@
|
|||
syntax = "proto3";
|
||||
package caretta_sync.authorization_request;
|
||||
|
||||
import "caretta_sync/authorization_request/info.proto";
|
||||
|
||||
message RejectResponse {
|
||||
Info request_info = 1;
|
||||
}
|
||||
|
|
@ -0,0 +1,8 @@
|
|||
syntax = "proto3";
|
||||
package caretta_sync.authorization_request;
|
||||
|
||||
import "caretta_sync/remote_node/identifier.proto";
|
||||
|
||||
message SendRequest {
|
||||
caretta_sync.remote_node.Identifier remote_node = 1;
|
||||
}
|
||||
|
|
@ -0,0 +1,9 @@
|
|||
syntax = "proto3";
|
||||
package caretta_sync.authorization_request;
|
||||
|
||||
import "caretta_sync/remote_node/info.proto";
|
||||
|
||||
message SendResponse {
|
||||
caretta_sync.remote_node.Info remote_node_info = 1;
|
||||
string passcode = 2;
|
||||
}
|
||||
10
core/proto/caretta_sync/authorization_request/status.proto
Normal file
10
core/proto/caretta_sync/authorization_request/status.proto
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
syntax = "proto3";
|
||||
package caretta_sync.authorization_request;
|
||||
|
||||
enum Status {
|
||||
UNSPECIFIED = 0;
|
||||
SENT = 1;
|
||||
RECEIVED = 2;
|
||||
ACCEPTED = 3;
|
||||
REJECTED = 4;
|
||||
}
|
||||
|
|
@ -0,0 +1,14 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package caretta_sync.authorized_node;
|
||||
|
||||
import "caretta_sync/authorized_node/info_request.proto";
|
||||
import "caretta_sync/authorized_node/info_response.proto";
|
||||
import "caretta_sync/authorized_node/list_request.proto";
|
||||
import "caretta_sync/authorized_node/list_response.proto";
|
||||
|
||||
|
||||
service AuthorizedNode {
|
||||
rpc Info(InfoRequest) returns (InfoResponse);
|
||||
rpc List(stream ListRequest) returns (stream ListResponse);
|
||||
}
|
||||
14
core/proto/caretta_sync/authorized_node/identifier.proto
Normal file
14
core/proto/caretta_sync/authorized_node/identifier.proto
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package caretta_sync.authorized_node;
|
||||
|
||||
import "tripod_id/single.proto";
|
||||
import "caretta_sync/iroh/public_key.proto";
|
||||
|
||||
|
||||
message Identifier {
|
||||
oneof identifier_value {
|
||||
tripod_id.Single id = 1;
|
||||
caretta_sync.iroh.PublicKey public_key = 2;
|
||||
}
|
||||
}
|
||||
12
core/proto/caretta_sync/authorized_node/info.proto
Normal file
12
core/proto/caretta_sync/authorized_node/info.proto
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package caretta_sync.authorized_node;
|
||||
|
||||
import "tripod_id/single.proto";
|
||||
import "caretta_sync/iroh/public_key.proto";
|
||||
|
||||
message Info {
|
||||
tripod_id.Single id = 1;
|
||||
caretta_sync.iroh.PublicKey public_key = 2;
|
||||
string note = 3;
|
||||
}
|
||||
|
|
@ -0,0 +1,9 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package caretta_sync.authorized_node;
|
||||
|
||||
import "caretta_sync/authorized_node/identifier.proto";
|
||||
|
||||
message InfoRequest {
|
||||
Identifier node = 1;
|
||||
}
|
||||
|
|
@ -0,0 +1,8 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package caretta_sync.authorized_node;
|
||||
import "caretta_sync/authorized_node/info.proto";
|
||||
|
||||
message InfoResponse {
|
||||
Info node_info = 1;
|
||||
}
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package caretta_sync.authorized_node;
|
||||
|
||||
message ListRequest{}
|
||||
|
|
@ -0,0 +1,9 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package caretta_sync.authorized_node;
|
||||
|
||||
import "caretta_sync/authorized_node/info.proto";
|
||||
|
||||
message ListResponse {
|
||||
Info node_info = 1;
|
||||
}
|
||||
8
core/proto/caretta_sync/common/url.proto
Normal file
8
core/proto/caretta_sync/common/url.proto
Normal file
|
|
@ -0,0 +1,8 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package caretta_sync.common;
|
||||
|
||||
// protobuf message of url::Url.
|
||||
message Url {
|
||||
string url = 1;
|
||||
}
|
||||
9
core/proto/caretta_sync/common/uuid.proto
Normal file
9
core/proto/caretta_sync/common/uuid.proto
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package caretta_sync.common;
|
||||
|
||||
// protobuf message of uuid::Uuid
|
||||
message Uuid {
|
||||
uint64 high_bits = 1;
|
||||
uint64 low_bits = 2;
|
||||
}
|
||||
26
core/proto/caretta_sync/iroh/connection_type.proto
Normal file
26
core/proto/caretta_sync/iroh/connection_type.proto
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
syntax = "proto3";
|
||||
package caretta_sync.iroh;
|
||||
|
||||
import "caretta_sync/common/url.proto";
|
||||
import "caretta_sync/net/socket_addr.proto";
|
||||
|
||||
// A protobuf message of iroh::ConnectionType
|
||||
message ConnectionType {
|
||||
message Direct {
|
||||
caretta_sync.net.SocketAddr direct_value = 1;
|
||||
}
|
||||
message Relay {
|
||||
caretta_sync.common.Url relay_value = 1;
|
||||
}
|
||||
message Mixed {
|
||||
caretta_sync.net.SocketAddr socket_addr = 1;
|
||||
caretta_sync.common.Url relay_url = 2;
|
||||
}
|
||||
message None{}
|
||||
oneof connection_type_value {
|
||||
Direct direct = 1;
|
||||
Relay relay = 2;
|
||||
Mixed mixed = 3;
|
||||
None none = 4;
|
||||
}
|
||||
}
|
||||
18
core/proto/caretta_sync/iroh/control_msg.proto
Normal file
18
core/proto/caretta_sync/iroh/control_msg.proto
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
syntax = "proto3";
|
||||
package caretta_sync.iroh;
|
||||
|
||||
// The message of iroh::endpoint::ControlMsg.
|
||||
// To ensure compatiility with irof::endpoint::ControlMsg,
|
||||
// it's implemented as a message rather than an enum to accommodate the posibility
|
||||
// that values may be added to the enum in rust in the future.
|
||||
message ControlMsg {
|
||||
message Ping {}
|
||||
message Pong {}
|
||||
message CallMeMayBe {}
|
||||
|
||||
oneof control_msg_vaue {
|
||||
Ping ping = 1;
|
||||
Pong pong = 2;
|
||||
CallMeMayBe call_me_maybe = 3;
|
||||
}
|
||||
}
|
||||
30
core/proto/caretta_sync/iroh/direct_addr_info.proto
Normal file
30
core/proto/caretta_sync/iroh/direct_addr_info.proto
Normal file
|
|
@ -0,0 +1,30 @@
|
|||
syntax = "proto3";
|
||||
package caretta_sync.iroh;
|
||||
|
||||
import "google/protobuf/duration.proto";
|
||||
import "caretta_sync/iroh/control_msg.proto";
|
||||
import "caretta_sync/iroh/source.proto";
|
||||
import "caretta_sync/net/socket_addr.proto";
|
||||
|
||||
// A protobuf message of iroh::endpoint::DirectAddrInfo
|
||||
message DirectAddrInfo {
|
||||
// A protobuf message of (Duration, ControlMsg)
|
||||
message DurationControlMsg {
|
||||
|
||||
google.protobuf.Duration duration = 1;
|
||||
ControlMsg control_msg = 2;
|
||||
}
|
||||
|
||||
// A protobuf message of (iroh::Source, Duration)
|
||||
message SourceDuration {
|
||||
Source source = 1;
|
||||
google.protobuf.Duration duration = 2;
|
||||
}
|
||||
|
||||
caretta_sync.net.SocketAddr addr = 1;
|
||||
google.protobuf.Duration latency = 2;
|
||||
DurationControlMsg last_control = 3;
|
||||
google.protobuf.Duration last_payload = 4;
|
||||
google.protobuf.Duration last_alive = 5;
|
||||
repeated SourceDuration sources = 6;
|
||||
}
|
||||
8
core/proto/caretta_sync/iroh/public_key.proto
Normal file
8
core/proto/caretta_sync/iroh/public_key.proto
Normal file
|
|
@ -0,0 +1,8 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package caretta_sync.iroh;
|
||||
|
||||
// protobuf message of iroh::PublicKey.
|
||||
message PublicKey {
|
||||
bytes key = 1;
|
||||
}
|
||||
12
core/proto/caretta_sync/iroh/relay_url_info.proto
Normal file
12
core/proto/caretta_sync/iroh/relay_url_info.proto
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
syntax = "proto3";
|
||||
package caretta_sync.iroh;
|
||||
|
||||
import "google/protobuf/duration.proto";
|
||||
import "caretta_sync/common/url.proto";
|
||||
|
||||
// A protobuf message of iroh::RelayUrlInfo
|
||||
message RelayUrlInfo {
|
||||
caretta_sync.common.Url relay_url = 1;
|
||||
google.protobuf.Duration last_alive = 2;
|
||||
google.protobuf.Duration latency = 3;
|
||||
}
|
||||
18
core/proto/caretta_sync/iroh/remote_info.proto
Normal file
18
core/proto/caretta_sync/iroh/remote_info.proto
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
syntax = "proto3";
|
||||
package caretta_sync.iroh;
|
||||
|
||||
import "caretta_sync/iroh/public_key.proto";
|
||||
import "caretta_sync/iroh/relay_url_info.proto";
|
||||
import "caretta_sync/iroh/direct_addr_info.proto";
|
||||
import "caretta_sync/iroh/connection_type.proto";
|
||||
import "google/protobuf/duration.proto";
|
||||
|
||||
// A messege of iroh::RemoteInfo.
|
||||
message RemoteInfo {
|
||||
PublicKey node_id = 3;
|
||||
RelayUrlInfo relay_url = 4;
|
||||
repeated DirectAddrInfo addrs = 5;
|
||||
ConnectionType conn_type = 6;
|
||||
google.protobuf.Duration latency = 7;
|
||||
google.protobuf.Duration last_used = 8;
|
||||
}
|
||||
23
core/proto/caretta_sync/iroh/source.proto
Normal file
23
core/proto/caretta_sync/iroh/source.proto
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
syntax = "proto3";
|
||||
package caretta_sync.iroh;
|
||||
|
||||
message Source {
|
||||
message Saved{}
|
||||
message Udp{}
|
||||
message Relay{}
|
||||
message App{}
|
||||
message Discovery{
|
||||
string value = 1;
|
||||
}
|
||||
message NamedApp {
|
||||
string value = 1;
|
||||
}
|
||||
oneof source_value {
|
||||
Saved saved = 1;
|
||||
Udp udp = 2;
|
||||
Relay relay = 3;
|
||||
App app = 4;
|
||||
Discovery discovery = 5;
|
||||
NamedApp named_app = 6;
|
||||
};
|
||||
}
|
||||
7
core/proto/caretta_sync/net/ipv4_addr.proto
Normal file
7
core/proto/caretta_sync/net/ipv4_addr.proto
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
syntax = "proto3";
|
||||
package caretta_sync.net;
|
||||
|
||||
message Ipv4Addr {
|
||||
uint32 bits = 1;
|
||||
}
|
||||
|
||||
7
core/proto/caretta_sync/net/ipv6_addr.proto
Normal file
7
core/proto/caretta_sync/net/ipv6_addr.proto
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
syntax = "proto3";
|
||||
package caretta_sync.net;
|
||||
|
||||
message Ipv6Addr {
|
||||
uint64 high_bits = 1;
|
||||
uint64 low_bits = 2;
|
||||
}
|
||||
19
core/proto/caretta_sync/net/socket_addr.proto
Normal file
19
core/proto/caretta_sync/net/socket_addr.proto
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package caretta_sync.net;
|
||||
|
||||
import "caretta_sync/net/socket_addr_v4.proto";
|
||||
import "caretta_sync/net/socket_addr_v6.proto";
|
||||
|
||||
// Protobuf message of std::net::SocketAddr.
|
||||
message SocketAddr {
|
||||
oneof socket_addr_value {
|
||||
SocketAddrV4 v4 = 1;
|
||||
SocketAddrV6 v6 = 2;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
9
core/proto/caretta_sync/net/socket_addr_v4.proto
Normal file
9
core/proto/caretta_sync/net/socket_addr_v4.proto
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
syntax = "proto3";
|
||||
package caretta_sync.net;
|
||||
|
||||
import "caretta_sync/net/ipv4_addr.proto";
|
||||
|
||||
message SocketAddrV4 {
|
||||
Ipv4Addr ip = 1;
|
||||
uint32 port = 2;
|
||||
}
|
||||
9
core/proto/caretta_sync/net/socket_addr_v6.proto
Normal file
9
core/proto/caretta_sync/net/socket_addr_v6.proto
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
syntax = "proto3";
|
||||
package caretta_sync.net;
|
||||
|
||||
import "caretta_sync/net/ipv6_addr.proto";
|
||||
|
||||
message SocketAddrV6 {
|
||||
Ipv6Addr ip = 1;
|
||||
uint32 port = 2;
|
||||
}
|
||||
13
core/proto/caretta_sync/remote_node/identifier.proto
Normal file
13
core/proto/caretta_sync/remote_node/identifier.proto
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
syntax = "proto3";
|
||||
package caretta_sync.remote_node;
|
||||
|
||||
import "caretta_sync/iroh/public_key.proto";
|
||||
import "tripod_id/double.proto";
|
||||
|
||||
|
||||
message Identifier {
|
||||
oneof identifier_value {
|
||||
tripod_id.Double id = 1;
|
||||
caretta_sync.iroh.PublicKey public_key = 2;
|
||||
}
|
||||
}
|
||||
19
core/proto/caretta_sync/remote_node/info.proto
Normal file
19
core/proto/caretta_sync/remote_node/info.proto
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
syntax = "proto3";
|
||||
package caretta_sync.remote_node;
|
||||
|
||||
import "caretta_sync/iroh/remote_info.proto";
|
||||
import "tripod_id/double.proto";
|
||||
|
||||
message Info {
|
||||
tripod_id.Double public_id = 1;
|
||||
caretta_sync.iroh.RemoteInfo remote_info = 2;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
16
core/proto/caretta_sync/remote_node/info_request.proto
Normal file
16
core/proto/caretta_sync/remote_node/info_request.proto
Normal file
|
|
@ -0,0 +1,16 @@
|
|||
syntax = "proto3";
|
||||
package caretta_sync.remote_node;
|
||||
|
||||
import "caretta_sync/remote_node/identifier.proto";
|
||||
|
||||
message InfoRequest {
|
||||
Identifier remote_node = 1;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
8
core/proto/caretta_sync/remote_node/info_response.proto
Normal file
8
core/proto/caretta_sync/remote_node/info_response.proto
Normal file
|
|
@ -0,0 +1,8 @@
|
|||
syntax = "proto3";
|
||||
package caretta_sync.remote_node;
|
||||
|
||||
import "caretta_sync/remote_node/info.proto";
|
||||
|
||||
message InfoResponse {
|
||||
Info remote_node_info = 1;
|
||||
}
|
||||
4
core/proto/caretta_sync/remote_node/list_request.proto
Normal file
4
core/proto/caretta_sync/remote_node/list_request.proto
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
syntax = "proto3";
|
||||
package caretta_sync.remote_node;
|
||||
|
||||
message ListRequest{}
|
||||
8
core/proto/caretta_sync/remote_node/list_response.proto
Normal file
8
core/proto/caretta_sync/remote_node/list_response.proto
Normal file
|
|
@ -0,0 +1,8 @@
|
|||
syntax = "proto3";
|
||||
package caretta_sync.remote_node;
|
||||
|
||||
import "caretta_sync/remote_node/info.proto";
|
||||
|
||||
message ListResponse {
|
||||
Info remote_node_info = 1;
|
||||
}
|
||||
12
core/proto/caretta_sync/remote_node/remote_node.proto
Normal file
12
core/proto/caretta_sync/remote_node/remote_node.proto
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
syntax = "proto3";
|
||||
package caretta_sync.remote_node;
|
||||
|
||||
import "caretta_sync/remote_node/info_request.proto";
|
||||
import "caretta_sync/remote_node/info_response.proto";
|
||||
import "caretta_sync/remote_node/list_request.proto";
|
||||
import "caretta_sync/remote_node/list_response.proto";
|
||||
|
||||
service RemoteNode {
|
||||
rpc Info(InfoRequest) returns (InfoResponse);
|
||||
rpc List(stream ListRequest) returns (stream ListResponse);
|
||||
}
|
||||
134
core/src/config/iroh.rs
Normal file
134
core/src/config/iroh.rs
Normal file
|
|
@ -0,0 +1,134 @@
|
|||
use std::{net::{IpAddr, Ipv4Addr}, ops, path::{Path, PathBuf}};
|
||||
|
||||
use base64::{prelude::BASE64_STANDARD, Engine};
|
||||
#[cfg(feature="cli")]
|
||||
use clap::Args;
|
||||
use futures::StreamExt;
|
||||
use iroh::{Endpoint, SecretKey};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::{fs::File, io::{AsyncReadExt, AsyncWriteExt}};
|
||||
use tracing_subscriber::EnvFilter;
|
||||
|
||||
|
||||
use crate::{
|
||||
config::PartialConfig,
|
||||
error::Error, utils::{emptiable::Emptiable, mergeable::Mergeable}
|
||||
};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct IrohConfig {
|
||||
pub enable: bool,
|
||||
pub secret_key: SecretKey,
|
||||
pub use_n0_discovery_service: bool,
|
||||
}
|
||||
|
||||
impl IrohConfig {
|
||||
async fn into_endpoint(config: Self) -> Result<Option<Endpoint>, crate::error::Error> {
|
||||
if config.enable {
|
||||
let mut endpoint = Endpoint::builder()
|
||||
.secret_key(config.secret_key)
|
||||
.discovery_dht()
|
||||
.discovery_local_network();
|
||||
if config.use_n0_discovery_service {
|
||||
endpoint = endpoint.discovery_n0();
|
||||
}
|
||||
Ok(Some(endpoint.bind().await?))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<PartialIrohConfig> for IrohConfig {
|
||||
type Error = crate::error::Error;
|
||||
fn try_from(raw: PartialIrohConfig) -> Result<IrohConfig, Self::Error> {
|
||||
Ok(IrohConfig {
|
||||
enable: raw.enable.ok_or(Error::MissingConfig("iroh.enable"))?,
|
||||
secret_key: raw.secret_key.ok_or(Error::MissingConfig("iroh.secret_key"))?,
|
||||
use_n0_discovery_service: raw.use_n0_discovery_service.ok_or(Error::MissingConfig("iroh.use_n0_discovery_service"))?
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
#[cfg_attr(feature="cli",derive(Args))]
|
||||
#[derive(Clone, Debug, Deserialize, Serialize)]
|
||||
pub struct PartialIrohConfig {
|
||||
#[cfg_attr(feature="cli",arg(long="p2p_enable"))]
|
||||
pub enable: Option<bool>,
|
||||
#[cfg_attr(feature="cli",arg(long))]
|
||||
pub secret_key: Option<SecretKey>,
|
||||
#[cfg_attr(feature="cli",arg(long))]
|
||||
pub use_n0_discovery_service: Option<bool>,
|
||||
}
|
||||
|
||||
impl PartialIrohConfig {
|
||||
pub fn with_new_secret_key(mut self) -> Self {
|
||||
let mut rng = rand::rngs::OsRng;
|
||||
self.secret_key = Some(SecretKey::generate(&mut rng));
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl From<IrohConfig> for PartialIrohConfig {
|
||||
fn from(config: IrohConfig) -> Self {
|
||||
Self {
|
||||
enable: Some(config.enable),
|
||||
secret_key: Some(config.secret_key),
|
||||
use_n0_discovery_service: Some(config.use_n0_discovery_service)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for PartialIrohConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
enable: Some(true),
|
||||
secret_key: None,
|
||||
use_n0_discovery_service: Some(true)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Emptiable for PartialIrohConfig {
|
||||
fn empty() -> Self {
|
||||
Self{
|
||||
enable: None,
|
||||
secret_key: None,
|
||||
use_n0_discovery_service: None
|
||||
}
|
||||
}
|
||||
|
||||
fn is_empty(&self) -> bool {
|
||||
self.enable.is_none() && self.secret_key.is_none() && self.use_n0_discovery_service.is_none()
|
||||
}
|
||||
}
|
||||
|
||||
impl Mergeable for PartialIrohConfig {
|
||||
fn merge(&mut self, mut other: Self) {
|
||||
if let Some(x) = other.enable.take() {
|
||||
let _ = self.enable.insert(x);
|
||||
};
|
||||
if let Some(x) = other.secret_key.take() {
|
||||
let _ = self.secret_key.insert(x);
|
||||
};
|
||||
if let Some(x) = other.use_n0_discovery_service.take() {
|
||||
let _ = self.use_n0_discovery_service.insert(x);
|
||||
};
|
||||
}
|
||||
}
|
||||
impl Mergeable for Option<PartialIrohConfig> {
|
||||
fn merge(&mut self, mut other: Self) {
|
||||
match other.take() {
|
||||
Some(x) => {
|
||||
if let Some(y) = self.as_mut() {
|
||||
y.merge(x);
|
||||
} else {
|
||||
let _ = self.insert(x);
|
||||
}
|
||||
},
|
||||
None => {}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
@ -1,16 +1,16 @@
|
|||
pub mod error;
|
||||
mod storage;
|
||||
mod p2p;
|
||||
mod iroh;
|
||||
mod rpc;
|
||||
|
||||
use std::{path::Path, default::Default};
|
||||
use std::{default::Default, fs::File, io::{Read, Write}, path::Path};
|
||||
use crate::{utils::{emptiable::Emptiable, mergeable::Mergeable}};
|
||||
pub use error::ConfigError;
|
||||
use serde::{de::DeserializeOwned, Deserialize, Serialize};
|
||||
|
||||
use tokio::{io::{AsyncReadExt, AsyncWriteExt}};
|
||||
pub use storage::{StorageConfig, PartialStorageConfig};
|
||||
pub use p2p::{P2pConfig, PartialP2pConfig};
|
||||
pub use iroh::{IrohConfig, PartialIrohConfig};
|
||||
pub use rpc::*;
|
||||
|
||||
#[cfg(feature="cli")]
|
||||
|
|
@ -18,7 +18,7 @@ use clap::Args;
|
|||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Config {
|
||||
pub p2p: P2pConfig,
|
||||
pub iroh: IrohConfig,
|
||||
pub storage: StorageConfig,
|
||||
pub rpc: RpcConfig,
|
||||
}
|
||||
|
|
@ -29,9 +29,9 @@ impl AsRef<StorageConfig> for Config {
|
|||
}
|
||||
}
|
||||
|
||||
impl AsRef<P2pConfig> for Config {
|
||||
fn as_ref(&self) -> &P2pConfig {
|
||||
&self.p2p
|
||||
impl AsRef<IrohConfig> for Config {
|
||||
fn as_ref(&self) -> &IrohConfig {
|
||||
&self.iroh
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -46,17 +46,17 @@ impl TryFrom<PartialConfig> for Config {
|
|||
fn try_from(value: PartialConfig) -> Result<Self, Self::Error> {
|
||||
Ok(Self{
|
||||
rpc: value.rpc.ok_or(crate::error::Error::MissingConfig("rpc"))?.try_into()?,
|
||||
p2p: value.p2p.ok_or(crate::error::Error::MissingConfig("p2p"))?.try_into()?,
|
||||
iroh: value.iroh.ok_or(crate::error::Error::MissingConfig("p2p"))?.try_into()?,
|
||||
storage: value.storage.ok_or(crate::error::Error::MissingConfig("storage"))?.try_into()?
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg_attr(feature="cli", derive(Args))]
|
||||
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
|
||||
#[derive(Clone, Debug, Deserialize, Serialize)]
|
||||
pub struct PartialConfig {
|
||||
#[cfg_attr(feature="cli", command(flatten))]
|
||||
pub p2p: Option<PartialP2pConfig>,
|
||||
pub iroh: Option<PartialIrohConfig>,
|
||||
#[cfg_attr(feature="cli", command(flatten))]
|
||||
pub storage: Option<PartialStorageConfig>,
|
||||
#[cfg_attr(feature="cli", command(flatten))]
|
||||
|
|
@ -66,7 +66,7 @@ pub struct PartialConfig {
|
|||
impl PartialConfig {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
p2p : Some(PartialP2pConfig::empty().with_new_private_key()),
|
||||
iroh : Some(PartialIrohConfig::empty().with_new_secret_key()),
|
||||
storage: Some(PartialStorageConfig::empty()),
|
||||
rpc: Some(PartialRpcConfig::empty()),
|
||||
}
|
||||
|
|
@ -77,16 +77,16 @@ impl PartialConfig {
|
|||
pub fn into_toml(&self) -> Result<String, toml::ser::Error> {
|
||||
toml::to_string(self)
|
||||
}
|
||||
pub async fn read_or_create<T>(path: T) -> Result<Self, ConfigError>
|
||||
pub fn read_or_create<T>(path: T) -> Result<Self, ConfigError>
|
||||
where
|
||||
T: AsRef<Path>
|
||||
{
|
||||
if !path.as_ref().exists() {
|
||||
Self::new().write_to(&path).await?;
|
||||
Self::new().write_to(&path)?;
|
||||
}
|
||||
Self::read_from(&path).await
|
||||
Self::read_from(&path)
|
||||
}
|
||||
pub async fn read_from<T>(path:T) -> Result<Self, ConfigError>
|
||||
pub fn read_from<T>(path:T) -> Result<Self, ConfigError>
|
||||
where
|
||||
T: AsRef<Path>
|
||||
{
|
||||
|
|
@ -94,15 +94,15 @@ impl PartialConfig {
|
|||
if let Some(x) = path.as_ref().parent() {
|
||||
std::fs::create_dir_all(x)?;
|
||||
};
|
||||
let _ = File::create(&path).await?;
|
||||
let _ = File::create(&path)?;
|
||||
}
|
||||
let mut file = File::open(path.as_ref()).await?;
|
||||
let mut file = File::open(path.as_ref())?;
|
||||
let mut content = String::new();
|
||||
file.read_to_string(&mut content).await?;
|
||||
file.read_to_string(&mut content)?;
|
||||
let config: Self = toml::from_str(&content)?;
|
||||
Ok(config)
|
||||
}
|
||||
pub async fn write_to<T>(&self, path:T) -> Result<(), ConfigError>
|
||||
pub fn write_to<T>(&self, path:T) -> Result<(), ConfigError>
|
||||
where
|
||||
T: AsRef<Path>
|
||||
{
|
||||
|
|
@ -110,15 +110,15 @@ impl PartialConfig {
|
|||
if let Some(x) = path.as_ref().parent() {
|
||||
std::fs::create_dir_all(x)?;
|
||||
};
|
||||
let _ = File::create(&path).await?;
|
||||
let _ = File::create(&path)?;
|
||||
}
|
||||
let mut file = File::create(&path).await?;
|
||||
file.write_all(toml::to_string(self)?.as_bytes()).await?;
|
||||
let mut file = File::create(&path)?;
|
||||
file.write_all(toml::to_string(self)?.as_bytes())?;
|
||||
Ok(())
|
||||
}
|
||||
pub fn default(app_name: &'static str) -> Self {
|
||||
Self {
|
||||
p2p: Some(PartialP2pConfig::default()),
|
||||
iroh: Some(PartialIrohConfig::default()),
|
||||
rpc: Some(PartialRpcConfig::default(app_name)),
|
||||
storage: Some(PartialStorageConfig::default(app_name)),
|
||||
}
|
||||
|
|
@ -128,7 +128,7 @@ impl PartialConfig {
|
|||
impl From<Config> for PartialConfig {
|
||||
fn from(value: Config) -> Self {
|
||||
Self {
|
||||
p2p: Some(value.p2p.into()),
|
||||
iroh: Some(value.iroh.into()),
|
||||
storage: Some(value.storage.into()),
|
||||
rpc: Some(value.rpc.into())
|
||||
}
|
||||
|
|
@ -138,20 +138,20 @@ impl From<Config> for PartialConfig {
|
|||
impl Emptiable for PartialConfig {
|
||||
fn empty() -> Self {
|
||||
Self {
|
||||
p2p: None,
|
||||
iroh: None,
|
||||
storage: None,
|
||||
rpc: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn is_empty(&self) -> bool {
|
||||
self.p2p.is_empty() && self.rpc.is_empty() && self.storage.is_empty()
|
||||
self.iroh.is_empty() && self.rpc.is_empty() && self.storage.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
impl Mergeable for PartialConfig {
|
||||
fn merge(&mut self, other: Self) {
|
||||
self.p2p.merge(other.p2p);
|
||||
self.iroh.merge(other.iroh);
|
||||
self.rpc.merge(other.rpc);
|
||||
self.storage.merge(other.storage);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,181 +0,0 @@
|
|||
use std::{net::{IpAddr, Ipv4Addr}, ops, path::{Path, PathBuf}};
|
||||
|
||||
use base64::{prelude::BASE64_STANDARD, Engine};
|
||||
#[cfg(feature="cli")]
|
||||
use clap::Args;
|
||||
use futures::StreamExt;
|
||||
use libp2p::{identity::{self, DecodingError, Keypair}, noise, ping, swarm::SwarmEvent, tcp, yamux, Swarm};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::{fs::File, io::{AsyncReadExt, AsyncWriteExt}};
|
||||
use tracing_subscriber::EnvFilter;
|
||||
|
||||
|
||||
use crate::{
|
||||
config::PartialConfig,
|
||||
error::Error, p2p, utils::{emptiable::Emptiable, mergeable::Mergeable}
|
||||
};
|
||||
|
||||
static DEFAULT_P2P_LISTEN_IPS: &[IpAddr] = &[IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))];
|
||||
static DEFAULT_P2P_PORT: u16 = 0;
|
||||
|
||||
fn keypair_to_base64(keypair: &Keypair) -> String {
|
||||
let vec = match keypair.to_protobuf_encoding() {
|
||||
Ok(x) => x,
|
||||
Err(_) => unreachable!(),
|
||||
};
|
||||
BASE64_STANDARD.encode(vec)
|
||||
}
|
||||
|
||||
fn base64_to_keypair(base64: &str) -> Result<Keypair, Error> {
|
||||
let vec = BASE64_STANDARD.decode(base64)?;
|
||||
Ok(Keypair::from_protobuf_encoding(&vec)?)
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct P2pConfig {
|
||||
pub private_key: Keypair,
|
||||
pub listen_ips: Vec<IpAddr>,
|
||||
pub port: u16,
|
||||
}
|
||||
|
||||
impl P2pConfig {
|
||||
async fn try_into_swarm (self) -> Result<Swarm<p2p::Behaviour>, Error> {
|
||||
let mut swarm = libp2p::SwarmBuilder::with_existing_identity(self.private_key)
|
||||
.with_tokio()
|
||||
.with_tcp(
|
||||
tcp::Config::default(),
|
||||
noise::Config::new,
|
||||
yamux::Config::default,
|
||||
)?
|
||||
.with_behaviour(|keypair| p2p::Behaviour::try_from(keypair).unwrap())?
|
||||
.build();
|
||||
swarm.listen_on("/ip4/0.0.0.0/tcp/0".parse()?)?;
|
||||
Ok(swarm)
|
||||
}
|
||||
pub async fn launch_swarm(self) -> Result<(), Error>{
|
||||
let mut swarm = self.try_into_swarm().await?;
|
||||
loop{
|
||||
let swarm_event = swarm.select_next_some().await;
|
||||
tokio::spawn(async move{
|
||||
match swarm_event {
|
||||
SwarmEvent::NewListenAddr { address, .. } => println!("Listening on {address:?}"),
|
||||
SwarmEvent::Behaviour(event) => {
|
||||
println!("{event:?}");
|
||||
event.run().await;
|
||||
},
|
||||
_ => {}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<PartialP2pConfig> for P2pConfig {
|
||||
type Error = crate::error::Error;
|
||||
fn try_from(raw: PartialP2pConfig) -> Result<P2pConfig, Self::Error> {
|
||||
Ok(P2pConfig {
|
||||
private_key: base64_to_keypair(&raw.private_key.ok_or(Error::MissingConfig("secret"))?)?,
|
||||
listen_ips: raw.listen_ips.ok_or(Error::MissingConfig("listen_ips"))?,
|
||||
port: raw.port.ok_or(Error::MissingConfig("port"))?
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg_attr(feature="cli",derive(Args))]
|
||||
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
|
||||
pub struct PartialP2pConfig {
|
||||
#[cfg_attr(feature="cli",arg(long))]
|
||||
pub private_key: Option<String>,
|
||||
#[cfg_attr(feature="cli",arg(long))]
|
||||
pub listen_ips: Option<Vec<IpAddr>>,
|
||||
#[cfg_attr(feature="cli",arg(long))]
|
||||
pub port: Option<u16>,
|
||||
}
|
||||
impl PartialP2pConfig {
|
||||
pub fn with_new_private_key(mut self) -> Self {
|
||||
self.private_key = Some(keypair_to_base64(&Keypair::generate_ed25519()));
|
||||
self
|
||||
}
|
||||
pub fn init_private_key(&mut self) {
|
||||
let _ = self.private_key.insert(keypair_to_base64(&Keypair::generate_ed25519()));
|
||||
}
|
||||
}
|
||||
|
||||
impl From<P2pConfig> for PartialP2pConfig {
|
||||
fn from(config: P2pConfig) -> Self {
|
||||
Self {
|
||||
private_key: Some(keypair_to_base64(&config.private_key)),
|
||||
listen_ips: Some(config.listen_ips),
|
||||
port: Some(config.port)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for PartialP2pConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
private_key: None,
|
||||
listen_ips: Some(Vec::from(DEFAULT_P2P_LISTEN_IPS)),
|
||||
port: Some(DEFAULT_P2P_PORT),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Emptiable for PartialP2pConfig {
|
||||
fn empty() -> Self {
|
||||
Self{
|
||||
private_key: None,
|
||||
listen_ips: None,
|
||||
port: None
|
||||
}
|
||||
}
|
||||
|
||||
fn is_empty(&self) -> bool {
|
||||
self.private_key.is_none() && self.listen_ips.is_none() && self.port.is_none()
|
||||
}
|
||||
}
|
||||
|
||||
impl Mergeable for PartialP2pConfig {
|
||||
fn merge(&mut self, mut other: Self) {
|
||||
if let Some(x) = other.private_key.take() {
|
||||
let _ = self.private_key.insert(x);
|
||||
};
|
||||
if let Some(x) = other.listen_ips.take() {
|
||||
let _ = self.listen_ips.insert(x);
|
||||
};
|
||||
if let Some(x) = other.port.take() {
|
||||
let _ = self.port.insert(x);
|
||||
};
|
||||
}
|
||||
}
|
||||
impl Mergeable for Option<PartialP2pConfig> {
|
||||
fn merge(&mut self, mut other: Self) {
|
||||
match other.take() {
|
||||
Some(x) => {
|
||||
if let Some(y) = self.as_mut() {
|
||||
y.merge(x);
|
||||
} else {
|
||||
let _ = self.insert(x);
|
||||
}
|
||||
},
|
||||
None => {}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use libp2p::identity;
|
||||
use super::*;
|
||||
|
||||
|
||||
#[tokio::test]
|
||||
async fn parse_keypair() {
|
||||
let keypair = identity::Keypair::generate_ed25519();
|
||||
let keypair2 = base64_to_keypair(&keypair_to_base64(&keypair)).unwrap();
|
||||
|
||||
assert_eq!(keypair.public(), keypair2.public());
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -3,7 +3,6 @@ use std::{net::{IpAddr, Ipv4Addr, SocketAddr, TcpListener}, path::PathBuf, str::
|
|||
use clap::Args;
|
||||
use url::Url;
|
||||
use crate::{config::PartialConfig, utils::{emptiable::Emptiable, mergeable::Mergeable}};
|
||||
use libp2p::mdns::Config;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::config::error::ConfigError;
|
||||
|
|
|
|||
|
|
@ -3,10 +3,10 @@ use std::path::PathBuf;
|
|||
#[cfg(feature="cli")]
|
||||
use clap::Args;
|
||||
|
||||
use rusqlite::Connection;
|
||||
#[cfg(any(test, feature="test"))]
|
||||
use tempfile::tempdir;
|
||||
use crate::{config::{ConfigError, PartialConfig}, utils::{emptiable::Emptiable, get_binary_name, mergeable::Mergeable}};
|
||||
use libp2p::mdns::Config;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
|
|
@ -16,6 +16,15 @@ pub struct StorageConfig {
|
|||
}
|
||||
|
||||
impl StorageConfig {
|
||||
pub fn get_global_data_directory(&self) -> PathBuf {
|
||||
self.data_directory.join("global")
|
||||
}
|
||||
pub fn get_global_root_document_path(&self) -> PathBuf {
|
||||
self.data_directory.join("global.bin")
|
||||
}
|
||||
pub fn get_local_data_directory(&self) -> PathBuf {
|
||||
self.data_directory.join("local")
|
||||
}
|
||||
pub fn get_local_database_path(&self) -> PathBuf {
|
||||
self.data_directory.join("local.sqlite")
|
||||
}
|
||||
|
|
@ -31,6 +40,7 @@ impl TryFrom<PartialStorageConfig> for StorageConfig {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg_attr(feature="cli", derive(Args))]
|
||||
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
|
||||
pub struct PartialStorageConfig {
|
||||
|
|
|
|||
0
core/src/data/local/authorization_request/common.rs
Normal file
0
core/src/data/local/authorization_request/common.rs
Normal file
80
core/src/data/local/authorization_request/mod.rs
Normal file
80
core/src/data/local/authorization_request/mod.rs
Normal file
|
|
@ -0,0 +1,80 @@
|
|||
//! Structs about authorization.
|
||||
|
||||
mod sent;
|
||||
mod received;
|
||||
|
||||
use std::os::unix::raw::time_t;
|
||||
|
||||
use tripod_id::Double;
|
||||
use chrono::{DateTime, Local, NaiveDateTime};
|
||||
use iroh::{NodeId, PublicKey};
|
||||
pub use sent::*;
|
||||
pub use received::*;
|
||||
use rusqlite::{params, types::FromSqlError, Connection};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::data::local::LocalRecord;
|
||||
|
||||
|
||||
/// Request of node authentication.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct AuthorizationRequestRecord {
|
||||
id: u32,
|
||||
uuid: Uuid,
|
||||
public_id: Double,
|
||||
peer_id: u32,
|
||||
created_at: DateTime<Local>,
|
||||
closed_at: Option<DateTime<Local>>,
|
||||
}
|
||||
|
||||
impl LocalRecord for AuthorizationRequestRecord {
|
||||
|
||||
const TABLE_NAME: &str = "authorization_request";
|
||||
const SELECT_COLUMNS: &[&str] = &[
|
||||
"id",
|
||||
"uuid",
|
||||
"public_id",
|
||||
"peer_id",
|
||||
"created_at",
|
||||
"closed_at"
|
||||
];
|
||||
const INSERT_COLUMNS: &[&str] = &[
|
||||
"uuid",
|
||||
"public_id",
|
||||
"peer_id",
|
||||
"created_at"
|
||||
];
|
||||
|
||||
type InsertParams<'a> = (&'a Double, &'a [u8;32], &'a NaiveDateTime);
|
||||
type SelectValues = (u32, Uuid, Double, PublicKey, NaiveDateTime, Option<NaiveDateTime>);
|
||||
fn from_row(row: &rusqlite::Row<'_>) -> Result<Self, rusqlite::Error> {
|
||||
let created_at: NaiveDateTime = row.get(4)?;
|
||||
let closed_at: Option<NaiveDateTime> = row.get(5)?;
|
||||
Ok(Self {
|
||||
id: row.get(0)?,
|
||||
uuid: row.get(1)?,
|
||||
public_id: row.get(2)?,
|
||||
peer_id: row.get(3)?,
|
||||
created_at: created_at.and_utc().into(),
|
||||
closed_at: closed_at.map(|x| x.and_utc().into())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl From<(u32, Uuid, Double, u32, NaiveDateTime, Option<NaiveDateTime>)> for AuthorizationRequestRecord {
|
||||
fn from(value: (u32, Uuid, Double, u32, NaiveDateTime, Option<NaiveDateTime>)) -> Self {
|
||||
Self {
|
||||
id: value.0,
|
||||
uuid: value.1,
|
||||
public_id: value.2,
|
||||
peer_id: value.3,
|
||||
created_at: value.4.and_utc().into(),
|
||||
closed_at: value.5.map(|x| x.and_utc().into())
|
||||
}
|
||||
}
|
||||
}
|
||||
impl<'a> From<&'a rusqlite::Row<'_>> for AuthorizationRequestRecord {
|
||||
fn from(value: &'a rusqlite::Row<'_>) -> Self {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
41
core/src/data/local/authorization_request/received.rs
Normal file
41
core/src/data/local/authorization_request/received.rs
Normal file
|
|
@ -0,0 +1,41 @@
|
|||
use caretta_id::{DoubleId, SingleId};
|
||||
use chrono::{DateTime, Local, NaiveDateTime};
|
||||
use iroh::{NodeId, PublicKey};
|
||||
|
||||
use crate::{data::local::LocalRecord, global::LOCAL_DATABASE_CONNECTION};
|
||||
|
||||
/// Response of node authentication.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ReceivedAuthorizationRequestRecord {
|
||||
id: u32,
|
||||
authorization_request_id: u32,
|
||||
peer_note: String,
|
||||
}
|
||||
|
||||
impl LocalRecord for ReceivedAuthorizationRequestRecord {
|
||||
const TABLE_NAME: &str = "received_authorization_request";
|
||||
|
||||
const SELECT_COLUMNS: &[&str] = &[
|
||||
"id",
|
||||
"authorization_request_id",
|
||||
"peer_note"
|
||||
];
|
||||
|
||||
const INSERT_COLUMNS: &[&str] = &[
|
||||
"authorization_request_id",
|
||||
"peer_note"
|
||||
];
|
||||
|
||||
type InsertParams<'a> = (&'a u32, &'a str);
|
||||
|
||||
fn from_row(row: &rusqlite::Row<'_>) -> Result<Self, rusqlite::Error> {
|
||||
Ok(Self {
|
||||
id: row.get(0)?,
|
||||
authorization_request_id: row.get(1)?,
|
||||
peer_note: row.get(2)?
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
||||
39
core/src/data/local/authorization_request/sent.rs
Normal file
39
core/src/data/local/authorization_request/sent.rs
Normal file
|
|
@ -0,0 +1,39 @@
|
|||
use caretta_id::SingleId;
|
||||
use chrono::{DateTime, Local, NaiveDateTime};
|
||||
use iroh::{NodeId, PublicKey};
|
||||
use rusqlite::types::FromSqlError;
|
||||
|
||||
use crate::{data::local::LocalRecord, global::LOCAL_DATABASE_CONNECTION};
|
||||
|
||||
/// Request of node authentication.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct SentAuthorizationRequestRecord {
|
||||
id: u32,
|
||||
authorization_request_id: u32,
|
||||
passcode: String,
|
||||
}
|
||||
|
||||
impl LocalRecord for SentAuthorizationRequestRecord {
|
||||
|
||||
const TABLE_NAME: &str = "sent_authorization_request";
|
||||
const SELECT_COLUMNS: &[&str] = &[
|
||||
"id",
|
||||
"authorization_request_id",
|
||||
"passcode",
|
||||
];
|
||||
const INSERT_COLUMNS: &[&str] = &[
|
||||
"authorization_request_id",
|
||||
"passcode"
|
||||
];
|
||||
|
||||
type InsertParams<'a> = (&'a u32, &'a str);
|
||||
|
||||
fn from_row(row: &rusqlite::Row<'_>) -> Result<Self, rusqlite::Error> {
|
||||
Ok(Self{
|
||||
id: row.get(0)?,
|
||||
authorization_request_id: row.get(0)?,
|
||||
passcode: row.get(2)?
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -6,8 +6,11 @@ use tracing::{event, Level};
|
|||
pub fn migrate(con: &mut Connection) -> Result<(), Error>{
|
||||
let version: u32 = con.pragma_query_value(None,"user_version", |row| row.get(0)).expect("Failed to get user_version");
|
||||
if version < 1 {
|
||||
let tx = con.transaction()?;
|
||||
event!(Level::INFO, "Migrate local db to version 1");
|
||||
v1::migrate(con)?;
|
||||
v1::migrate(&tx)?;
|
||||
tx.pragma_update(None, "user_version", 1)?;
|
||||
tx.commit()?;
|
||||
event!(Level::INFO, "Migration done.");
|
||||
}
|
||||
Ok(())
|
||||
|
|
|
|||
|
|
@ -1,38 +1,45 @@
|
|||
use rusqlite::{Error, Connection};
|
||||
use rusqlite::{Connection, Error, Transaction};
|
||||
|
||||
pub fn migrate(con: &mut Connection) -> Result<(), Error>{
|
||||
let tx = con.transaction()?;
|
||||
pub fn migrate(tx: &Transaction) -> Result<(), Error>{
|
||||
tx.execute_batch(
|
||||
"BEGIN;
|
||||
CREATE TABLE peer (
|
||||
"CREATE TABLE remote_node (
|
||||
id INTEGER PRIMARY KEY,
|
||||
public_id INTEGER NOT NULL UNIQUE,
|
||||
public_key BLOB UNIQUE NOT NULL
|
||||
);
|
||||
CREATE TABLE authorization_request (
|
||||
id INTEGER PRIMARY KEY,
|
||||
libp2p_peer_id TEXT UNIQUE NOT NULL,
|
||||
uuid BLOB NOT NULL UNIQUE,
|
||||
public_id INTEGER NOT NULL UNIQUE,
|
||||
remote_node_id INTEGER NOT NULL UNIQUE,
|
||||
created_at TEXT NOT NULL,
|
||||
updated_at TEXT NOT NULL,
|
||||
closed_at TEXT,
|
||||
FOREIGN KEY(remote_node_id) REFERENCES remote_node(id)
|
||||
);
|
||||
CREATE INDEX idx_peer_created_at ON peer(created_at);
|
||||
CREATE INDEX idx_peer_updated_at ON peer(updated_at);
|
||||
CREATE TABLE address (
|
||||
id INTEGER PRIMARY KEY,
|
||||
peer_id INTEGER NOT NULL,
|
||||
multiaddr TEXT UNIQUE NOT NULL,
|
||||
created_at TEXT NOT NULL,
|
||||
updated_at TEXT NOT NULL,
|
||||
protocol TEXT NOT NULL,
|
||||
FOREIGN KEY(peer_id) REFERENCES peer(id)
|
||||
CREATE TABLE received_authorization_request (
|
||||
id INTEGER PRIMARY KEY,
|
||||
authorization_request_id INTEGER NOT NULL UNIQUE,
|
||||
node_note TEXT,
|
||||
FOREIGN KEY(authorization_request_id) REFERENCES authorization_request(id)
|
||||
);
|
||||
CREATE INDEX idx_address_created_at ON address(created_at);
|
||||
CREATE INDEX idx_address_updated_at ON address(updated_at);
|
||||
CREATE TABLE authorized_peer (
|
||||
id INTEGER PRIMARY KEY,
|
||||
peer_id INTEGER NOT NULL UNIQUE,
|
||||
synced_at TEXT,
|
||||
created_at TEXT NOT NULL,
|
||||
updated_at TEXT NOT NULL,
|
||||
FOREIGN KEY(peer_id) REFERENCES peer(id)
|
||||
)",
|
||||
CREATE TABLE sent_authorization_request (
|
||||
id INTEGER PRIMARY KEY,
|
||||
authorization_request_id INTEGER NOT NULL UNIQUE,
|
||||
passcode TEXT NOT NULL,
|
||||
FOREIGN KEY(authorization_request_id) REFERENCES authorization_request(id)
|
||||
);
|
||||
CREATE TABLE authorized_remote_node (
|
||||
id INTEGER PRIMARY KEY,
|
||||
uuid BLOB UNIQUE NOT NULL,
|
||||
public_id INTEGER NOT NULL UNIQUE,
|
||||
public_key BLOB NOT NULL UNIQUE,
|
||||
note TEXT NOT NULL,
|
||||
last_synced_at TEXT,
|
||||
last_sent_version_vector BLOB,
|
||||
created_at TEXT NOT NULL,
|
||||
updated_at TEXT NOT NULL
|
||||
);",
|
||||
)?;
|
||||
tx.pragma_update(None, "user_version", 1)?;
|
||||
tx.commit()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
|
@ -1,56 +1,121 @@
|
|||
// mod authorization_request;
|
||||
mod remote_node;
|
||||
pub use remote_node::RemoteNodeRecord;
|
||||
pub mod migration;
|
||||
|
||||
use std::{cell::OnceCell, path::Path, sync::{LazyLock, OnceLock}};
|
||||
use std::{cell::OnceCell, convert::Infallible, iter::Map, path::Path, sync::{LazyLock, OnceLock}};
|
||||
|
||||
use migration::migrate;
|
||||
use rusqlite::{ffi::Error, Connection};
|
||||
use rusqlite::{ffi::Error, params, types::FromSql, Connection, MappedRows, OptionalExtension, Params, Row, ToSql};
|
||||
|
||||
use crate::{config::StorageConfig, global::CONFIG};
|
||||
use crate::{config::StorageConfig, global::{CONFIG, LOCAL_DATABASE_CONNECTION}};
|
||||
|
||||
static INITIALIZE_PARENT_DIRECTORY_RESULT: OnceLock<()> = OnceLock::new();
|
||||
// pub use authorization_request::*;
|
||||
pub type LocalRecordError = rusqlite::Error;
|
||||
|
||||
static MIGRATE_RESULT: OnceLock<()> = OnceLock::new();
|
||||
|
||||
fn initialize_parent_directory<P>(path: &P)
|
||||
where
|
||||
P: AsRef<Path>,
|
||||
{
|
||||
*INITIALIZE_PARENT_DIRECTORY_RESULT.get_or_init(|| {
|
||||
let path2: &Path = path.as_ref();
|
||||
if let Some(x) = path2.parent() {
|
||||
if !x.exists() {
|
||||
std::fs::create_dir_all(x).expect("Parent directory of the local database must be created.");
|
||||
}
|
||||
/// a struct of id for local database record.
|
||||
pub type LocalRecordId = u32;
|
||||
|
||||
/// a struct for the record without id before inserted
|
||||
pub type NoLocalRecordId = rusqlite::types::Null;
|
||||
|
||||
|
||||
|
||||
/// A id struct
|
||||
/// Model trait for local database data.
|
||||
/// use LOCAL_DATABASE_CONNECTION for database connection.
|
||||
pub trait LocalRecord: Sized {
|
||||
const TABLE_NAME: &str;
|
||||
const COLUMNS: &[&str];
|
||||
|
||||
/// Tuple form of the record.
|
||||
/// the order of field must be same as COLUMNS.
|
||||
type RowValues;
|
||||
}
|
||||
pub trait SelectableLocalRecord: LocalRecord<RowValues: TryInto<Self>> {
|
||||
|
||||
const SELECT_STATEMENT: LazyLock<String> = LazyLock::new(|| {
|
||||
String::from("SELECT ") + &Self::COLUMNS.join(", ") + " FROM " + Self::TABLE_NAME
|
||||
});
|
||||
|
||||
const SELECT_PLACEHOLDER: LazyLock<String> = LazyLock::new(|| {
|
||||
let mut result : Vec<String> = Vec::new();
|
||||
for i in 0..Self::COLUMNS.len() {
|
||||
result.push(String::from("?") + &(i+1).to_string());
|
||||
}
|
||||
})
|
||||
}
|
||||
result.join(", ")
|
||||
});
|
||||
|
||||
fn migrate_once(conn: &mut Connection) -> () {
|
||||
*MIGRATE_RESULT.get_or_init(|| {
|
||||
migrate(conn).expect("Local database migration should be done correctly")
|
||||
})
|
||||
|
||||
}
|
||||
pub trait LocalDatabaseConnection: Sized {
|
||||
fn from_path<P>(path: &P) -> Self
|
||||
where
|
||||
P: AsRef<Path>;
|
||||
fn from_storage_config(config: &StorageConfig) -> Self {
|
||||
Self::from_path(&config.get_local_database_path())
|
||||
}
|
||||
fn from_global_storage_config() -> Self {
|
||||
Self::from_storage_config(&CONFIG.get_unchecked().storage)
|
||||
}
|
||||
}
|
||||
|
||||
impl LocalDatabaseConnection for Connection {
|
||||
fn from_path<P>(path: &P) -> Self
|
||||
where
|
||||
P: AsRef<Path>
|
||||
|
||||
fn get_one_where<P>(where_statement: &str, params: P) -> Result<Self, rusqlite::Error>
|
||||
where P: Params
|
||||
{
|
||||
initialize_parent_directory(path);
|
||||
let mut conn = Connection::open(path).expect("local database connection must be opened without error");
|
||||
migrate_once(&mut conn);
|
||||
conn
|
||||
let connection = LOCAL_DATABASE_CONNECTION.get_unchecked();
|
||||
Ok(connection.query_row(
|
||||
&(String::new() + &Self::SELECT_STATEMENT + " " + where_statement),
|
||||
params,
|
||||
Self::from_row
|
||||
)?)
|
||||
}
|
||||
|
||||
fn get_one_by_field<T>(field_name: &str, field_value: T) -> Result<Self, rusqlite::Error>
|
||||
where
|
||||
T: ToSql
|
||||
{
|
||||
let connection = LOCAL_DATABASE_CONNECTION.get_unchecked();
|
||||
Ok(connection.query_row(
|
||||
&([&Self::SELECT_STATEMENT, "FROM", Self::TABLE_NAME, "WHERE", field_name,"= ?1"].join(" ")),
|
||||
params![field_value],
|
||||
Self::from_row
|
||||
)?)
|
||||
}
|
||||
fn get_one_by_id(id: u32) -> Result<Self, rusqlite::Error> {
|
||||
Self::get_one_by_field("id", id )
|
||||
}
|
||||
fn from_row(row: &Row<'_>) -> Result<Self, rusqlite::Error>;
|
||||
fn get_all() -> Result<Vec<Self>, rusqlite::Error> {
|
||||
let connection = LOCAL_DATABASE_CONNECTION.get_unchecked();
|
||||
let mut stmt = connection.prepare(&("SELECT ".to_string() + &Self::COLUMNS.join(", ") + " FROM " + Self::TABLE_NAME))?;
|
||||
let rows = stmt.query_map(
|
||||
[],
|
||||
Self::from_row
|
||||
)?;
|
||||
let mut result= Vec::new();
|
||||
for row in rows {
|
||||
result.push(row?);
|
||||
}
|
||||
Ok(result)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
pub trait InsertableLocalRecord: LocalRecord<RowValues: From<Self> + Params> {
|
||||
type LocalRecord: Sized + SelectableLocalRecord;
|
||||
|
||||
/// Place holder for insertion.
|
||||
/// Generated from Columns
|
||||
const INSERT_PLACEHOLDER: LazyLock<String> = LazyLock::new(|| {
|
||||
let mut result : Vec<String> = Vec::new();
|
||||
for i in 0..Self::COLUMNS.len() {
|
||||
result.push(String::from("?") + &(i+1).to_string());
|
||||
}
|
||||
result.join(", ")
|
||||
});
|
||||
/// Insert and get the inserted record.
|
||||
fn insert(self) -> Result<Self::LocalRecord, rusqlite::Error>{
|
||||
let params= Self::RowValues::from(self);
|
||||
let connection = LOCAL_DATABASE_CONNECTION.get_unchecked();
|
||||
|
||||
Ok(connection.query_row(
|
||||
&[
|
||||
"INSERT INTO ", Self::TABLE_NAME, "(" , &Self::COLUMNS.join(", "), ")",
|
||||
"VALUES (" , &*Self::INSERT_PLACEHOLDER , ")",
|
||||
"RETURNING", &Self::COLUMNS.join(", ")
|
||||
].join(" "),
|
||||
params,
|
||||
Self::LocalRecord::from_row
|
||||
)?)
|
||||
}
|
||||
}
|
||||
120
core/src/data/local/remote_node.rs
Normal file
120
core/src/data/local/remote_node.rs
Normal file
|
|
@ -0,0 +1,120 @@
|
|||
//! Structs about cached remote_node.
|
||||
|
||||
use std::os::unix::raw::time_t;
|
||||
|
||||
use tripod_id::Double;
|
||||
use chrono::{DateTime, Local, NaiveDateTime};
|
||||
use iroh::{NodeId, PublicKey};
|
||||
use rusqlite::{params, types::{FromSqlError, Null}, Connection};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{data::local::{self, InsertableLocalRecord, LocalRecord, LocalRecordId, NoLocalRecordId, SelectableLocalRecord}, global::LOCAL_DATABASE_CONNECTION};
|
||||
|
||||
/// RemoteNode information cached in local database.
|
||||
///
|
||||
/// - Currently this only contain local uid and public key (=node id) of iroh.
|
||||
/// - This is a junction table enable to use caretta-id to specify items in the UI, especially on the CLI.
|
||||
/// - Actual remote_node information is managed by iroh endpoint and not contained in this model.
|
||||
/// - Once a remote_node is authorized, it is assigned a global (=synced) ID as authorized_remote_node so essentially this local id targets unauthorized remote_nodes.
|
||||
///
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct RemoteNodeRecord<T> {
|
||||
|
||||
/// serial primary key.
|
||||
pub id: T,
|
||||
|
||||
/// public tripod id of remote_node.
|
||||
/// this id is use only the node itself and not synced so another node has different local_remote_node_id even if its public_key is same.
|
||||
pub public_id: Double,
|
||||
|
||||
/// Iroh public key
|
||||
pub public_key: PublicKey,
|
||||
}
|
||||
|
||||
impl RemoteNodeRecord<LocalRecordId> {
|
||||
pub fn get_or_insert_by_public_key(public_key: &PublicKey) -> Result<Self, rusqlite::Error> {
|
||||
match Self::get_by_public_key(public_key) {
|
||||
Ok(x) => Ok(x),
|
||||
Err(rusqlite::Error::QueryReturnedNoRows) => {
|
||||
let new = RemoteNodeRecord{
|
||||
id: NoLocalRecordId{},
|
||||
public_id: rand::random(),
|
||||
public_key: public_key.clone()
|
||||
};
|
||||
Ok(new.insert()?)
|
||||
},
|
||||
Err(e) => Err(e)
|
||||
}
|
||||
|
||||
}
|
||||
pub fn get_by_public_id(public_id: &Double) -> Result<Self, rusqlite::Error> {
|
||||
Self::get_one_where("WHERE public_id = ?1", (public_id,))
|
||||
}
|
||||
pub fn get_by_public_key(public_key: &PublicKey) -> Result<Self, rusqlite::Error> {
|
||||
Self::get_one_where("WHERE public_Key = ?1", (public_key.as_bytes(),))
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> LocalRecord for RemoteNodeRecord<T> {
|
||||
const TABLE_NAME: &str = "remote_node";
|
||||
const COLUMNS: &[&str] = &[
|
||||
"id",
|
||||
"public_id",
|
||||
"public_key"
|
||||
];
|
||||
|
||||
type RowValues = (T, Double, [u8;32]);
|
||||
}
|
||||
|
||||
impl SelectableLocalRecord for RemoteNodeRecord<LocalRecordId> {
|
||||
fn from_row(row: &rusqlite::Row<'_>) -> Result<Self, rusqlite::Error> {
|
||||
Ok(Self {
|
||||
id: row.get(0)?,
|
||||
public_id: row.get(1)?,
|
||||
public_key: PublicKey::from_bytes(&row.get(2)?).map_err(|e| FromSqlError::Other(Box::new(e)))?
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<(LocalRecordId, Double, [u8;32])> for RemoteNodeRecord<LocalRecordId> {
|
||||
type Error = rusqlite::Error;
|
||||
fn try_from(value: (LocalRecordId, Double, [u8;32])) -> Result<Self, Self::Error> {
|
||||
Ok(Self {
|
||||
id: value.0,
|
||||
public_id: value.1,
|
||||
public_key: PublicKey::from_bytes(&value.2).map_err(|x| FromSqlError::Other(Box::new(x)))?
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl InsertableLocalRecord for RemoteNodeRecord<NoLocalRecordId> {
|
||||
type LocalRecord = RemoteNodeRecord<LocalRecordId>;
|
||||
|
||||
}
|
||||
|
||||
impl From<RemoteNodeRecord<NoLocalRecordId>> for (NoLocalRecordId, Double, [u8;32]){
|
||||
fn from(value: RemoteNodeRecord<NoLocalRecordId>) -> Self {
|
||||
(value.id, value.public_id, value.public_key.as_bytes().to_owned())
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use iroh::SecretKey;
|
||||
|
||||
use crate::tests::TEST_CONFIG;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn insert_get_remote_node_record() {
|
||||
LOCAL_DATABASE_CONNECTION.get_or_init(&TEST_CONFIG.storage.get_local_database_path());
|
||||
let key = SecretKey::generate(&mut rand::rngs::OsRng);
|
||||
let pubkey = key.public();
|
||||
let record = RemoteNodeRecord::get_or_insert_by_public_key(&pubkey).unwrap();
|
||||
assert_eq!(record, RemoteNodeRecord::get_by_public_id(&record.public_id).unwrap());
|
||||
assert_eq!(record, RemoteNodeRecord::get_by_public_key(&record.public_key).unwrap());
|
||||
}
|
||||
}
|
||||
|
|
@ -1,4 +1,7 @@
|
|||
use std::ffi::OsString;
|
||||
use std::{array::TryFromSliceError, ffi::OsString};
|
||||
use tonic::Status;
|
||||
|
||||
use crate::proto::ProtoDeserializeError;
|
||||
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum Error {
|
||||
|
|
@ -10,35 +13,51 @@ pub enum Error {
|
|||
CiborSerialize(#[from] ciborium::ser::Error<std::io::Error>),
|
||||
#[error("Config error: {0}")]
|
||||
Config(#[from] crate::config::error::ConfigError),
|
||||
#[error("Dial Error: {0}")]
|
||||
Dial(#[from] libp2p::swarm::DialError),
|
||||
#[error("Decoding identity error: {0}")]
|
||||
IdentityDecoding(#[from] libp2p::identity::DecodingError),
|
||||
#[error("Infallible: {0}")]
|
||||
Infallible(#[from] std::convert::Infallible),
|
||||
#[error("IO Error: {0}")]
|
||||
Io(#[from]std::io::Error),
|
||||
#[error("Iroh bind error: {0}")]
|
||||
IrohBind(#[from] iroh::endpoint::BindError),
|
||||
#[error("mandatory config `{0}` is missing")]
|
||||
MissingConfig(&'static str),
|
||||
#[error("Multiaddr error: {0}")]
|
||||
Multiaddr(#[from] libp2p::multiaddr::Error),
|
||||
#[error("Noise error: {0}")]
|
||||
Noise(#[from] libp2p::noise::Error),
|
||||
#[error("Parse OsString error: {0:?}")]
|
||||
OsStringConvert(std::ffi::OsString),
|
||||
#[cfg(feature="cli")]
|
||||
#[error("Parse args error: {0}")]
|
||||
ParseCommand(#[from] clap::Error),
|
||||
#[error("Signature error: {0}")]
|
||||
Signature(#[from] ed25519_dalek::SignatureError),
|
||||
#[error("slice parse error: {0}")]
|
||||
SliceTryFrom(#[from] TryFromSliceError),
|
||||
#[error("toml deserialization error: {0}")]
|
||||
TomlDe(#[from] toml::de::Error),
|
||||
#[error("toml serialization error: {0}")]
|
||||
TomlSer(#[from] toml::ser::Error),
|
||||
#[error("Transport error: {0}")]
|
||||
Transport(#[from]libp2p::TransportError<std::io::Error>)
|
||||
#[error("protobuf serialization error: {0}")]
|
||||
ProtoSerialize(#[from] crate::proto::ProtoSerializeError),
|
||||
#[error("protobuf deserialization error: {0}")]
|
||||
ProtoDeserialize(#[from] crate::proto::ProtoDeserializeError),
|
||||
#[error("Local record error: {0}")]
|
||||
LocalRecord(#[from] crate::data::local::LocalRecordError),
|
||||
#[error("Tripod id error: {0}")]
|
||||
TripodId(#[from] tripod_id::Error),
|
||||
}
|
||||
|
||||
impl From<std::ffi::OsString> for Error {
|
||||
fn from(s: OsString) -> Error {
|
||||
Self::OsStringConvert(s)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Error> for Status {
|
||||
fn from(value: Error) -> Self {
|
||||
match value {
|
||||
Error::ProtoDeserialize(x) => { match x {
|
||||
ProtoDeserializeError::MissingField(x) => Self::invalid_argument(format!("{} is required", x)),
|
||||
_ => Status::unimplemented("Unimplemented protobuf deserialize error status")
|
||||
}},
|
||||
_ => Status::unimplemented("Unimplemented error status")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -2,7 +2,7 @@
|
|||
use tempfile::TempDir;
|
||||
use tokio::sync::OnceCell;
|
||||
|
||||
use crate::{config::{Config, ConfigError, PartialP2pConfig, PartialRpcConfig, PartialStorageConfig, StorageConfig}, error::Error};
|
||||
use crate::{config::{Config, ConfigError, PartialIrohConfig, PartialRpcConfig, PartialStorageConfig, StorageConfig}, error::Error};
|
||||
|
||||
pub static CONFIG: GlobalConfig = GlobalConfig::const_new();
|
||||
pub struct GlobalConfig {
|
||||
|
|
|
|||
27
core/src/global/iroh_endpoint.rs
Normal file
27
core/src/global/iroh_endpoint.rs
Normal file
|
|
@ -0,0 +1,27 @@
|
|||
use std::sync::OnceLock;
|
||||
|
||||
use iroh::Endpoint;
|
||||
|
||||
pub static IROH_ENDPOINT: GlobalIrohEndpoint = GlobalIrohEndpoint::const_new();
|
||||
|
||||
pub struct GlobalIrohEndpoint {
|
||||
inner: OnceLock<Endpoint>
|
||||
}
|
||||
|
||||
impl GlobalIrohEndpoint {
|
||||
const fn const_new() -> Self {
|
||||
Self {
|
||||
inner: OnceLock::new()
|
||||
}
|
||||
}
|
||||
pub fn get_or_init(&self, endpoint: &Endpoint) -> Endpoint {
|
||||
self.inner.get_or_init(|| endpoint.clone()).clone()
|
||||
}
|
||||
pub fn get(&self) -> Option<Endpoint> {
|
||||
self.inner.get().map(|x| x.clone())
|
||||
}
|
||||
pub fn get_unchecked(&self) -> Endpoint {
|
||||
self.get().expect("Global Iroh Endpoint must be initialized before use")
|
||||
}
|
||||
}
|
||||
|
||||
49
core/src/global/local_database_connection.rs
Normal file
49
core/src/global/local_database_connection.rs
Normal file
|
|
@ -0,0 +1,49 @@
|
|||
use std::{fs::create_dir_all, path::{Path, PathBuf}, sync::OnceLock};
|
||||
|
||||
use rusqlite::Connection;
|
||||
|
||||
use crate::{data::local::migration::migrate, error::Error};
|
||||
|
||||
pub static LOCAL_DATABASE_CONNECTION: GlobalLocalDatabaseConnection = GlobalLocalDatabaseConnection::const_new();
|
||||
|
||||
pub struct GlobalLocalDatabaseConnection {
|
||||
path: OnceLock<PathBuf>
|
||||
}
|
||||
|
||||
fn path_to_connection_or_panic<P>(path: &P) -> Connection
|
||||
where
|
||||
P: AsRef<Path>
|
||||
{
|
||||
Connection::open(path.as_ref()).expect("Failed to open database connection for local data")
|
||||
}
|
||||
|
||||
impl GlobalLocalDatabaseConnection {
|
||||
const fn const_new() -> Self {
|
||||
Self {
|
||||
path: OnceLock::new()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_or_init<P>(&self, path: &P) -> Connection
|
||||
where
|
||||
P: AsRef<Path>,
|
||||
{
|
||||
path_to_connection_or_panic(self.path.get_or_init(|| {
|
||||
let path = path.as_ref();
|
||||
let parent = path.parent().expect("Database path should have parent directory");
|
||||
create_dir_all(parent).expect("Failed to create parent directory of database");
|
||||
let mut conn = path_to_connection_or_panic(&path);
|
||||
migrate(&mut conn).expect("Local database migration should be done correctly");
|
||||
path.to_path_buf()
|
||||
}))
|
||||
}
|
||||
|
||||
pub fn get(&self) -> Option<Connection> {
|
||||
self.path.get().map(|path| {
|
||||
path_to_connection_or_panic(path)
|
||||
})
|
||||
}
|
||||
pub fn get_unchecked(&self) -> Connection {
|
||||
self.get().expect("Global database for local data mulst be initialized before use")
|
||||
}
|
||||
}
|
||||
|
|
@ -1,20 +1,22 @@
|
|||
use std::{any::type_name, collections::HashMap, net::{IpAddr, Ipv4Addr}, path::{Path, PathBuf}, sync::LazyLock};
|
||||
|
||||
use crate::{config::{P2pConfig, PartialP2pConfig, StorageConfig}, error::Error };
|
||||
use libp2p::{swarm::SwarmEvent, Multiaddr, PeerId};
|
||||
use crate::{config::{StorageConfig}, error::Error };
|
||||
use tokio::sync::{OnceCell, RwLock, RwLockReadGuard, RwLockWriteGuard};
|
||||
|
||||
mod config;
|
||||
mod iroh_endpoint;
|
||||
mod local_database_connection;
|
||||
|
||||
pub use config::*;
|
||||
pub use iroh_endpoint::*;
|
||||
pub use local_database_connection::*;
|
||||
use uuid::{ContextV7, Timestamp, Uuid};
|
||||
|
||||
pub fn generate_uuid() -> Uuid {
|
||||
Uuid::new_v7(Timestamp::now(ContextV7::new()))
|
||||
}
|
||||
|
||||
pub static DEFAULT_LISTEN_IPS: &[IpAddr] = &[IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))];
|
||||
|
||||
|
||||
fn uninitialized_message<T>(var: T) -> String {
|
||||
format!("{} is uninitialized!", &stringify!(var))
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -2,9 +2,7 @@ pub mod config;
|
|||
pub mod data;
|
||||
pub mod error;
|
||||
pub mod global;
|
||||
pub mod p2p;
|
||||
pub mod proto;
|
||||
pub mod rpc;
|
||||
#[cfg(any(test, feature="test"))]
|
||||
pub mod tests;
|
||||
pub mod utils;
|
||||
|
|
|
|||
|
|
@ -1,4 +0,0 @@
|
|||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum P2pError {
|
||||
|
||||
}
|
||||
|
|
@ -1,104 +0,0 @@
|
|||
pub mod error;
|
||||
use chrono::Local;
|
||||
use libp2p::{ identity::Keypair, mdns, ping, swarm, Multiaddr, PeerId};
|
||||
use sea_orm::{prelude::DateTimeUtc, ActiveModelTrait, ActiveValue::Set, ColumnTrait, EntityTrait, ModelTrait, QueryFilter};
|
||||
use tracing::{event, Level};
|
||||
|
||||
use crate::{cache::entity::{CachedPeerActiveModel, CachedAddressActiveModel, CachedAddressColumn, CachedAddressEntity, CachedAddressModel, CachedPeerColumn, CachedPeerEntity, CachedPeerModel}, data::value::{MultiaddrValue, PeerIdValue}, error::Error, global::DATABASE_CONNECTIONS};
|
||||
|
||||
#[derive(swarm::NetworkBehaviour)]
|
||||
#[behaviour(to_swarm = "Event")]
|
||||
pub struct Behaviour {
|
||||
pub mdns: mdns::tokio::Behaviour,
|
||||
pub ping: ping::Behaviour,
|
||||
}
|
||||
|
||||
impl TryFrom<&Keypair> for Behaviour {
|
||||
type Error = Error;
|
||||
fn try_from(keypair: &Keypair) -> Result<Self, Error> {
|
||||
Ok(Self {
|
||||
mdns: mdns::tokio::Behaviour::new(
|
||||
mdns::Config::default(),
|
||||
keypair.public().into(),
|
||||
)?,
|
||||
ping: libp2p::ping::Behaviour::new(ping::Config::new()),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum Event {
|
||||
Mdns(mdns::Event),
|
||||
Ping(ping::Event),
|
||||
}
|
||||
|
||||
impl Event {
|
||||
pub async fn run(&self)
|
||||
{
|
||||
match self {
|
||||
Self::Mdns(x) => {
|
||||
match x {
|
||||
mdns::Event::Discovered(e) => {
|
||||
for peer in e.iter() {
|
||||
event!(Level::TRACE, "Peer discovered via mdns: {}, {}", &peer.0, &peer.1);
|
||||
match try_get_or_insert_cached_peer(&peer.0, &peer.1).await {
|
||||
Ok(_) => {},
|
||||
Err(e) => {
|
||||
event!(Level::WARN, "{:?}", e);
|
||||
}
|
||||
};
|
||||
}
|
||||
},
|
||||
_ => {},
|
||||
}
|
||||
},
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
impl From<mdns::Event> for Event {
|
||||
fn from(event: mdns::Event) -> Self {
|
||||
Self::Mdns(event)
|
||||
}
|
||||
}
|
||||
impl From<ping::Event> for Event {
|
||||
fn from(event: ping::Event) -> Self {
|
||||
Self::Ping(event)
|
||||
}
|
||||
}
|
||||
|
||||
async fn try_get_or_insert_cached_peer(peer_id: &PeerId, peer_addr: &Multiaddr) -> Result<(CachedPeerModel, CachedAddressModel), Error> {
|
||||
match (
|
||||
CachedPeerEntity::find().filter(CachedPeerColumn::PeerId.eq(PeerIdValue::from(peer_id.clone()))).one(DATABASE_CONNECTIONS.get_cache_unchecked()).await?,
|
||||
CachedAddressEntity::find().filter(CachedAddressColumn::Multiaddress.eq(MultiaddrValue::from(peer_addr.clone()))).one(DATABASE_CONNECTIONS.get_cache_unchecked()).await?,
|
||||
) {
|
||||
(Some(x), Some(y) ) => {
|
||||
if x.id == y.cached_peer_id {
|
||||
event!(Level::TRACE, "Known peer: {}, {}", peer_id, peer_addr);
|
||||
let mut addr: CachedAddressActiveModel = y.into();
|
||||
addr.updated_at = Set(Local::now().to_utc());
|
||||
let updated = addr.update(DATABASE_CONNECTIONS.get_cache_unchecked()).await?;
|
||||
Ok((x, updated))
|
||||
} else {
|
||||
y.delete(DATABASE_CONNECTIONS.get_cache().expect("Cache database should initialized beforehand!")).await?;
|
||||
Ok((x.clone(), CachedAddressActiveModel::new(x.id, peer_addr.clone()).insert(DATABASE_CONNECTIONS.get_cache_unchecked()).await?))
|
||||
}
|
||||
}
|
||||
(Some(x), None) => {
|
||||
event!(Level::INFO, "New address {} for {}", peer_addr, peer_id);
|
||||
Ok((x.clone(),CachedAddressActiveModel::new(x.id, peer_addr.clone()).insert(DATABASE_CONNECTIONS.get_cache_unchecked()).await?))
|
||||
},
|
||||
(None, x) => {
|
||||
event!(Level::INFO, "Add new peer: {}", peer_id);
|
||||
let inserted = CachedPeerActiveModel::new(peer_id.clone()).insert(DATABASE_CONNECTIONS.get_cache_unchecked()).await?;
|
||||
if let Some(y) = x {
|
||||
event!(Level::INFO, "Remove {} from {}", peer_addr, peer_id);
|
||||
y.delete(DATABASE_CONNECTIONS.get_cache_unchecked()).await?;
|
||||
};
|
||||
event!(Level::INFO, "Add address {} to {}", peer_addr, peer_id);
|
||||
Ok((inserted.clone(), CachedAddressActiveModel::new(inserted.id, peer_addr.clone()).insert(DATABASE_CONNECTIONS.get_cache_unchecked()).await?))
|
||||
},
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
27
core/src/proto/authorization_request.rs
Normal file
27
core/src/proto/authorization_request.rs
Normal file
|
|
@ -0,0 +1,27 @@
|
|||
use std::pin::Pin;
|
||||
|
||||
use futures::Stream;
|
||||
use tonic::{Request, Response, Streaming};
|
||||
|
||||
tonic::include_proto!("caretta_sync.authorization_request");
|
||||
pub struct AuthorizationRequestService {}
|
||||
|
||||
#[tonic::async_trait]
|
||||
impl authorization_request_server::AuthorizationRequest for AuthorizationRequestService {
|
||||
type ListStream = Pin<Box<dyn Stream<Item = Result<ListResponse, tonic::Status>> + Send>>;
|
||||
async fn send(&self, request: Request<SendRequest>) -> Result<Response<SendResponse>, tonic::Status> {
|
||||
todo!()
|
||||
}
|
||||
async fn accept(&self, request: Request<AcceptRequest>) -> Result<Response<AcceptResponse>, tonic::Status>{
|
||||
todo!()
|
||||
}
|
||||
async fn reject(&self, request: Request<RejectRequest>) -> Result<Response<RejectResponse>, tonic::Status>{
|
||||
todo!()
|
||||
}
|
||||
async fn info(&self, request: Request<InfoRequest>) -> Result<Response<InfoResponse>, tonic::Status>{
|
||||
todo!()
|
||||
}
|
||||
async fn list(&self, request: Request<Streaming<ListRequest>>) -> Result<Response<Self::ListStream>, tonic::Status> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
19
core/src/proto/authorized_node.rs
Normal file
19
core/src/proto/authorized_node.rs
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
use std::pin::Pin;
|
||||
|
||||
use futures::Stream;
|
||||
use tonic::{Request, Response, Streaming, Status};
|
||||
|
||||
tonic::include_proto!("caretta_sync.authorized_node");
|
||||
pub struct AuthorizedNodeService {}
|
||||
|
||||
#[tonic::async_trait]
|
||||
impl authorized_node_server::AuthorizedNode for AuthorizedNodeService {
|
||||
type ListStream = Pin<Box<dyn Stream<Item = Result<ListResponse, tonic::Status>> + Send>>;
|
||||
|
||||
async fn info(&self, request: Request<InfoRequest>) -> Result<Response<InfoResponse>, tonic::Status>{
|
||||
todo!()
|
||||
}
|
||||
async fn list(&self, request: Request<Streaming<ListRequest>>) -> Result<Response<Self::ListStream>, tonic::Status> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
|
@ -1,16 +0,0 @@
|
|||
use libp2p::Multiaddr;
|
||||
|
||||
use crate::cache::entity::CachedAddressModel;
|
||||
use crate::utils::utc_to_timestamp;
|
||||
use crate::proto::CachedAddressMessage;
|
||||
|
||||
impl From<&CachedAddressModel> for CachedAddressMessage {
|
||||
fn from(a: &CachedAddressModel) -> Self {
|
||||
Self {
|
||||
number: a.id,
|
||||
created_at: Some(utc_to_timestamp(&a.created_at)),
|
||||
updated_at: Some(utc_to_timestamp(&a.updated_at)),
|
||||
multiaddress: Multiaddr::from(a.multiaddress.clone()).to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,14 +0,0 @@
|
|||
use crate::{cache::entity::{CachedAddressModel, CachedPeerModel}, proto::{CachedAddressMessage, CachedPeerMessage}, utils::utc_to_timestamp};
|
||||
|
||||
impl From<(&CachedPeerModel, &Vec<CachedAddressModel>)> for CachedPeerMessage {
|
||||
fn from(source: (&CachedPeerModel, &Vec<CachedAddressModel>)) -> Self {
|
||||
let (peer, addresses) = source;
|
||||
|
||||
Self {
|
||||
number: peer.id,
|
||||
peer_id: peer.peer_id.to_string(),
|
||||
created_at: Some(utc_to_timestamp(&peer.created_at)),
|
||||
addresses: addresses.iter().map(|x| CachedAddressMessage::from(x)).collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
54
core/src/proto/common.rs
Normal file
54
core/src/proto/common.rs
Normal file
|
|
@ -0,0 +1,54 @@
|
|||
use super::*;
|
||||
tonic::include_proto!("caretta_sync.common");
|
||||
|
||||
use crate::proto::{error::{ProtoDeserializeError, ProtoSerializeError}};
|
||||
|
||||
impl From<uuid::Uuid> for Uuid {
|
||||
fn from(value: uuid::Uuid) -> Self {
|
||||
let (first_half, second_half) = value.as_u64_pair();
|
||||
Self {
|
||||
high_bits: first_half,
|
||||
low_bits: second_half
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Uuid> for uuid::Uuid {
|
||||
fn from(value: Uuid) -> Self {
|
||||
uuid::Uuid::from_u64_pair(value.high_bits, value.low_bits)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
impl From<url::Url> for Url {
|
||||
fn from(value: url::Url) -> Self {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<Url> for url::Url {
|
||||
type Error = ProtoDeserializeError;
|
||||
fn try_from(value: Url) -> Result<Self, Self::Error> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::{net::{self, Ipv4Addr}, u16};
|
||||
|
||||
use super::*;
|
||||
fn validate_uuid_conversion(uuid: uuid::Uuid) -> bool{
|
||||
let message = Uuid::from(uuid);
|
||||
uuid == uuid::Uuid::from(message)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn uuid_conversion() {
|
||||
assert!(validate_uuid_conversion(uuid::Uuid::nil()));
|
||||
assert!(validate_uuid_conversion(uuid::Uuid::max()));
|
||||
assert!(validate_uuid_conversion(uuid::Uuid::now_v7()));
|
||||
}
|
||||
|
||||
}
|
||||
17
core/src/proto/convert/direct_addr_info_message.rs
Normal file
17
core/src/proto/convert/direct_addr_info_message.rs
Normal file
|
|
@ -0,0 +1,17 @@
|
|||
use iroh::endpoint::DirectAddrInfo;
|
||||
|
||||
use crate::proto::{error::ProtoSerializeError, DirectAddrInfoMessage, LastControlMessage, SourceMessage};
|
||||
|
||||
impl TryFrom<DirectAddrInfo> for DirectAddrInfoMessage {
|
||||
type Error = ProtoSerializeError;
|
||||
fn try_from(value: DirectAddrInfo) -> Result<Self, Self::Error> {
|
||||
Ok(DirectAddrInfoMessage {
|
||||
addr: value.addr.to_string(),
|
||||
latency: value.latency.map(|x| x.try_into()).transpose()?,
|
||||
last_control: value.last_control.map(|x| LastControlMessage::try_from(x)).transpose()?,
|
||||
last_payload: value.last_payload.map(|x| x.try_into()).transpose()?,
|
||||
last_alive: value.last_alive.map(|x| x.try_into()).transpose()?,
|
||||
sources: value.sources.into_iter().map(|x| SourceMessage::try_from(x)).collect::<Result<Vec<SourceMessage>, Self::Error>>()?
|
||||
})
|
||||
}
|
||||
}
|
||||
16
core/src/proto/convert/last_control_message.rs
Normal file
16
core/src/proto/convert/last_control_message.rs
Normal file
|
|
@ -0,0 +1,16 @@
|
|||
use std::time::Duration;
|
||||
|
||||
use iroh::endpoint::ControlMsg;
|
||||
use prost_types::DurationError;
|
||||
|
||||
use crate::proto::{error::ProtoSerializeError, LastControlMessage};
|
||||
|
||||
impl TryFrom<(Duration, ControlMsg)> for LastControlMessage {
|
||||
type Error = ProtoSerializeError;
|
||||
fn try_from(value: (Duration, ControlMsg)) -> Result<Self, Self::Error> {
|
||||
Ok(LastControlMessage {
|
||||
duration: Some(value.0.try_into()?),
|
||||
control_msg: value.1.to_string()
|
||||
})
|
||||
}
|
||||
}
|
||||
8
core/src/proto/convert/mod.rs
Normal file
8
core/src/proto/convert/mod.rs
Normal file
|
|
@ -0,0 +1,8 @@
|
|||
mod direct_addr_info_message;
|
||||
mod last_control_message;
|
||||
mod node_id_message;
|
||||
mod remote_info_iter_request;
|
||||
mod remote_info_message;
|
||||
mod remote_info_request;
|
||||
mod remote_info_response;
|
||||
mod source_message;
|
||||
19
core/src/proto/convert/remote_info_message.rs
Normal file
19
core/src/proto/convert/remote_info_message.rs
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
use iroh::endpoint::RemoteInfo;
|
||||
|
||||
use crate::{error::Error, proto::{error::ProtoSerializeError, DirectAddrInfoMessage, RemoteInfoMessage}};
|
||||
|
||||
impl TryFrom<RemoteInfo> for RemoteInfoMessage {
|
||||
type Error = ProtoSerializeError;
|
||||
fn try_from(value: RemoteInfo) -> Result<Self, Self::Error> {
|
||||
Ok(Self {
|
||||
node_id: Some(value.node_id.into()),
|
||||
relay_url: value.relay_url.map_or(String::from(""), |x| x.relay_url.to_string()),
|
||||
addrs: value.addrs.into_iter()
|
||||
.map(|x| DirectAddrInfoMessage::try_from(x))
|
||||
.collect::<Result<Vec<DirectAddrInfoMessage>,Self::Error>>()?,
|
||||
conn_type: value.conn_type.to_string(),
|
||||
latency: value.latency.map(|x| x.try_into()).transpose()?,
|
||||
last_used: value.last_used.map(|x| x.try_into()).transpose()?,
|
||||
})
|
||||
}
|
||||
}
|
||||
17
core/src/proto/error.rs
Normal file
17
core/src/proto/error.rs
Normal file
|
|
@ -0,0 +1,17 @@
|
|||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum ProtoSerializeError {
|
||||
#[error("Duration parse error: {0}")]
|
||||
Duration(#[from] prost_types::DurationError),
|
||||
}
|
||||
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum ProtoDeserializeError {
|
||||
#[error("Missing field: {0}")]
|
||||
MissingField(&'static str),
|
||||
#[error("Signature error: {0}")]
|
||||
Signature(#[from] ed25519_dalek::SignatureError),
|
||||
#[error("slice parse error: {0}")]
|
||||
SliceTryFrom(#[from] std::array::TryFromSliceError),
|
||||
#[error("Int parse error: {0}")]
|
||||
IntTryFrom(#[from] std::num::TryFromIntError),
|
||||
}
|
||||
120
core/src/proto/iroh.rs
Normal file
120
core/src/proto/iroh.rs
Normal file
|
|
@ -0,0 +1,120 @@
|
|||
use std::pin::Pin;
|
||||
|
||||
use futures::Stream;
|
||||
use tonic::{async_trait, Request, Response, Status};
|
||||
|
||||
use crate::proto::{net::SocketAddr, remote_node_server, ProtoDeserializeError, ProtoSerializeError, };
|
||||
|
||||
|
||||
tonic::include_proto!("caretta_sync.iroh");
|
||||
|
||||
impl From<iroh::endpoint::ConnectionType> for ConnectionType {
|
||||
fn from(value: iroh::endpoint::ConnectionType) -> Self {
|
||||
use connection_type::*;
|
||||
Self {
|
||||
connection_type_value: Some(match value {
|
||||
iroh::endpoint::ConnectionType::Direct(socket_addr) => {
|
||||
connection_type::ConnectionTypeValue::Direct(connection_type::Direct{direct_value: Some(SocketAddr::from(socket_addr))})
|
||||
},
|
||||
iroh::endpoint::ConnectionType::Relay(relay_url) => {
|
||||
connection_type::ConnectionTypeValue::Relay(connection_type::Relay { relay_value: Some(super::common::Url::from((*relay_url).clone()))})
|
||||
},
|
||||
iroh::endpoint::ConnectionType::Mixed(socket_addr, relay_url) => {
|
||||
connection_type::ConnectionTypeValue::Mixed(connection_type::Mixed { socket_addr: Some(SocketAddr::from(socket_addr)), relay_url: Some(super::common::Url::from((*relay_url).clone()))})
|
||||
},
|
||||
iroh::endpoint::ConnectionType::None => {
|
||||
ConnectionTypeValue::None(None{})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<iroh::endpoint::ControlMsg> for ControlMsg {
|
||||
fn from(value: iroh::endpoint::ControlMsg) -> Self {
|
||||
use control_msg::*;
|
||||
Self { control_msg_vaue: Some(match value {
|
||||
iroh::endpoint::ControlMsg::Ping => ControlMsgVaue::Ping(Ping{}),
|
||||
iroh::endpoint::ControlMsg::Pong => ControlMsgVaue::Pong(Pong {}),
|
||||
iroh::endpoint::ControlMsg::CallMeMaybe => ControlMsgVaue::CallMeMaybe(CallMeMayBe { }),
|
||||
}) }
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<iroh::endpoint::DirectAddrInfo> for DirectAddrInfo {
|
||||
type Error = ProtoSerializeError;
|
||||
fn try_from(value: iroh::endpoint::DirectAddrInfo) -> Result<Self, Self::Error> {
|
||||
use direct_addr_info::*;
|
||||
let last_control: Option<DurationControlMsg> = if let Some((duration, control_msg)) = value.last_control {
|
||||
Some(DurationControlMsg{
|
||||
control_msg: Some(control_msg.into()),
|
||||
duration: Some(duration.try_into()?)
|
||||
})
|
||||
} else {
|
||||
None
|
||||
};
|
||||
Ok(Self {
|
||||
addr: Some(value.addr.into()),
|
||||
latency: value.latency.map(|x| x.try_into()).transpose()?,
|
||||
last_control: last_control,
|
||||
last_payload: value.last_payload.map(|x| x.try_into()).transpose()?,
|
||||
last_alive: value.last_alive.map(|x| x.try_into()).transpose()?,
|
||||
sources: value.sources.into_iter().map(|(s, d)| {
|
||||
Ok::<SourceDuration, ProtoSerializeError>(SourceDuration{
|
||||
source: Some(s.into()),
|
||||
duration: Some(d.try_into()?)
|
||||
})
|
||||
}).collect::<Result<Vec<SourceDuration>, ProtoSerializeError>>()?,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl From<iroh::PublicKey> for PublicKey {
|
||||
fn from(value: iroh::PublicKey) -> Self {
|
||||
Self{ key: Vec::from(value.as_bytes()) }
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<PublicKey> for iroh::PublicKey {
|
||||
type Error = ProtoDeserializeError;
|
||||
fn try_from(value: PublicKey) -> Result<Self, Self::Error> {
|
||||
let slice: [u8; 32] = value.key[0..32].try_into()?;
|
||||
Ok(iroh::PublicKey::from_bytes(&slice)?)
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<iroh::endpoint::RemoteInfo> for RemoteInfo {
|
||||
type Error = ProtoSerializeError;
|
||||
fn try_from(value: iroh::endpoint::RemoteInfo) -> Result<Self, Self::Error> {
|
||||
Ok(Self {
|
||||
node_id: Some(value.node_id.into()),
|
||||
relay_url: value.relay_url.map(|x| {
|
||||
Ok::<RelayUrlInfo, ProtoSerializeError>(RelayUrlInfo {
|
||||
relay_url: Some((*x.relay_url).clone().into()),
|
||||
last_alive: x.last_alive.map(|x| x.try_into()).transpose()?,
|
||||
latency: x.latency.map(|x| x.try_into()).transpose()?
|
||||
})}).transpose()?,
|
||||
addrs: value.addrs.into_iter().map(|x| {
|
||||
x.try_into()
|
||||
}).collect::<Result<Vec<DirectAddrInfo>, ProtoSerializeError>>()?,
|
||||
conn_type: Some(value.conn_type.into()),
|
||||
latency: value.latency.map(|x| x.try_into()).transpose()?,
|
||||
last_used:value.last_used.map(|x| x.try_into()).transpose()?
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl From<iroh::endpoint::Source> for Source {
|
||||
fn from(value: iroh::endpoint::Source) -> Self {
|
||||
use source::*;
|
||||
Self {
|
||||
source_value:Some(match value {
|
||||
iroh::endpoint::Source::Saved => SourceValue::Saved(Saved { }),
|
||||
iroh::endpoint::Source::Udp => SourceValue::Udp(Udp { }),
|
||||
iroh::endpoint::Source::Relay => SourceValue::Relay(Relay { }),
|
||||
iroh::endpoint::Source::App => SourceValue::App(App{}),
|
||||
iroh::endpoint::Source::Discovery { name } => SourceValue::Discovery(Discovery { value: name }),
|
||||
iroh::endpoint::Source::NamedApp { name } => SourceValue::NamedApp(NamedApp { value: name }),
|
||||
}) }
|
||||
}
|
||||
}
|
||||
|
|
@ -1,5 +1,12 @@
|
|||
mod cached_address;
|
||||
mod cached_peer;
|
||||
mod authorization_request;
|
||||
mod authorized_node;
|
||||
mod remote_node;
|
||||
mod common;
|
||||
mod error;
|
||||
mod iroh;
|
||||
mod net;
|
||||
|
||||
tonic::include_proto!("caretta_sync");
|
||||
|
||||
pub use common::*;
|
||||
pub use error::*;
|
||||
pub use remote_node::*;
|
||||
140
core/src/proto/net.rs
Normal file
140
core/src/proto/net.rs
Normal file
|
|
@ -0,0 +1,140 @@
|
|||
tonic::include_proto!("caretta_sync.net");
|
||||
|
||||
use crate::proto::{error::{ProtoDeserializeError, ProtoSerializeError}};
|
||||
|
||||
type Ipv4AddrMessage = Ipv4Addr;
|
||||
type Ipv6AddrMessage = Ipv6Addr;
|
||||
type SocketAddrMessage = SocketAddr;
|
||||
type SocketAddrV4Message = SocketAddrV4;
|
||||
type SocketAddrV6Message = SocketAddrV6;
|
||||
|
||||
impl From<std::net::SocketAddr> for SocketAddrMessage {
|
||||
fn from(value: std::net::SocketAddr) -> Self {
|
||||
Self{
|
||||
socket_addr_value: Some(match value {
|
||||
std::net::SocketAddr::V4(x) => socket_addr::SocketAddrValue::V4(SocketAddrV4Message::from(x)),
|
||||
std::net::SocketAddr::V6(x) => socket_addr::SocketAddrValue::V6(SocketAddrV6Message::from(x)),
|
||||
})}
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<SocketAddrMessage> for std::net::SocketAddr {
|
||||
type Error = ProtoDeserializeError;
|
||||
fn try_from(value: SocketAddrMessage) -> Result<Self, Self::Error> {
|
||||
Ok(match value.socket_addr_value.ok_or(Self::Error::MissingField("SocketAddr.socket_addr"))? {
|
||||
socket_addr::SocketAddrValue::V4(x) => std::net::SocketAddr::V4(x.try_into()?),
|
||||
socket_addr::SocketAddrValue::V6(x) => std::net::SocketAddr::V6(x.try_into()?),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl From<std::net::SocketAddrV4> for SocketAddrV4Message {
|
||||
fn from(value: std::net::SocketAddrV4) -> Self {
|
||||
Self {
|
||||
ip : Some(value.ip().clone().into()),
|
||||
port: value.port().into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<SocketAddrV4Message> for std::net::SocketAddrV4 {
|
||||
type Error = ProtoDeserializeError;
|
||||
fn try_from(value: SocketAddrV4Message) -> Result<Self, Self::Error> {
|
||||
Ok(Self::new(value.ip.ok_or(ProtoDeserializeError::MissingField("SocketAddrV4.ip"))?.into(), value.port.try_into()?))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<std::net::Ipv4Addr> for Ipv4AddrMessage {
|
||||
fn from(value: std::net::Ipv4Addr) -> Self {
|
||||
Self{
|
||||
bits: value.to_bits()
|
||||
}
|
||||
}
|
||||
}
|
||||
impl From<Ipv4AddrMessage> for std::net::Ipv4Addr {
|
||||
fn from(value: Ipv4AddrMessage) -> Self{
|
||||
Self::from_bits(value.bits)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<std::net::SocketAddrV6> for SocketAddrV6Message {
|
||||
fn from(value: std::net::SocketAddrV6) -> Self {
|
||||
Self{
|
||||
ip: Some(value.ip().clone().into()),
|
||||
port: value.port().into()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<SocketAddrV6Message> for std::net::SocketAddrV6 {
|
||||
type Error = ProtoDeserializeError;
|
||||
fn try_from(value: SocketAddrV6Message) -> Result<Self, Self::Error> {
|
||||
Ok(Self::new(
|
||||
value.ip.ok_or(ProtoDeserializeError::MissingField("SocketAddrV6.ip"))?.into(),
|
||||
value.port.try_into()?,
|
||||
0,
|
||||
0
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<std::net::Ipv6Addr> for Ipv6AddrMessage {
|
||||
fn from(value: std::net::Ipv6Addr) -> Self {
|
||||
let bits = value.to_bits();
|
||||
|
||||
Self{
|
||||
high_bits: (bits >> 64) as u64,
|
||||
low_bits: bits as u64,
|
||||
}
|
||||
}
|
||||
}
|
||||
impl From<Ipv6AddrMessage> for std::net::Ipv6Addr{
|
||||
|
||||
fn from(value: Ipv6AddrMessage) -> Self {
|
||||
Self::from_bits(
|
||||
((value.high_bits as u128) << 64) + (value.low_bits as u128)
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::{net::{self, Ipv4Addr}, u16, u8};
|
||||
|
||||
use rand::random;
|
||||
|
||||
use super::*;
|
||||
|
||||
fn validate_socket_addr_conversion(socket_addr: net::SocketAddr) -> Result<bool, ProtoDeserializeError> {
|
||||
let message = SocketAddrMessage::from(socket_addr);
|
||||
Ok(socket_addr == message.try_into()?)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn socket_addr_conversion_ipv4_min() {
|
||||
assert!(validate_socket_addr_conversion(net::SocketAddr::new(net::IpAddr::V4(net::Ipv4Addr::new(0, 0, 0, 0)),u16::MIN)).unwrap());
|
||||
}
|
||||
#[test]
|
||||
fn socket_addr_conversion_ipv4_max() {
|
||||
assert!(validate_socket_addr_conversion(net::SocketAddr::new(net::IpAddr::V4(net::Ipv4Addr::new(u8::MAX, u8::MAX, u8::MAX, u8::MAX)),u16::MAX)).unwrap());
|
||||
}
|
||||
#[test]
|
||||
fn socket_addr_conversion_ipv4_random() {
|
||||
for _ in 0..10 {
|
||||
assert!(validate_socket_addr_conversion(net::SocketAddr::new(net::IpAddr::V4(
|
||||
net::Ipv4Addr::new(random(), random(), random(), random())
|
||||
),
|
||||
random()
|
||||
)).unwrap())
|
||||
}
|
||||
}
|
||||
#[test]
|
||||
fn socket_addr_conversion_ipv6_min() {
|
||||
assert!(validate_socket_addr_conversion(net::SocketAddr::new(net::IpAddr::V6(net::Ipv6Addr::new(0,0,0,0,0,0,0,0)), u16::MIN)).unwrap());
|
||||
}
|
||||
#[test]
|
||||
fn socket_addr_conversion_ipv6_max() {
|
||||
assert!(validate_socket_addr_conversion(net::SocketAddr::new(net::IpAddr::V6(net::Ipv6Addr::new(u16::MAX, u16::MAX, u16::MAX, u16::MAX, u16::MAX, u16::MAX, u16::MAX, u16::MAX)), u16::MAX)).unwrap());
|
||||
}
|
||||
|
||||
}
|
||||
31
core/src/proto/remote_node.rs
Normal file
31
core/src/proto/remote_node.rs
Normal file
|
|
@ -0,0 +1,31 @@
|
|||
use std::{pin::Pin, time::Duration};
|
||||
|
||||
|
||||
use futures::{future::Remote, Stream};
|
||||
use iroh::{endpoint::{DirectAddrInfo, RemoteInfo}, PublicKey};
|
||||
use tonic::{Request, Response, Status, Streaming};
|
||||
use tripod_id::Double;
|
||||
|
||||
use crate::{data::local::{LocalRecordId, RemoteNodeRecord}, error::Error, global::IROH_ENDPOINT, proto::{error::{ProtoDeserializeError, ProtoSerializeError}}};
|
||||
|
||||
|
||||
tonic::include_proto!("caretta_sync.remote_node");
|
||||
|
||||
pub struct RemoteNodeServer{}
|
||||
|
||||
#[tonic::async_trait]
|
||||
impl remote_node_server::RemoteNode for RemoteNodeServer {
|
||||
type ListStream = Pin<Box<dyn Stream<Item = Result<ListResponse, Status>> + Send>>;
|
||||
async fn info(&self, request: Request<InfoRequest>) -> Result<Response<InfoResponse>, Status> {
|
||||
todo!()
|
||||
}
|
||||
async fn list(&self, request: Request<Streaming<ListRequest>>)
|
||||
-> Result<Response<Self::ListStream>, Status> {
|
||||
let iter = IROH_ENDPOINT.get_unchecked().remote_info_iter()
|
||||
.map(|x| {
|
||||
todo!();
|
||||
});
|
||||
let stream = futures::stream::iter(iter);
|
||||
Ok(Response::new(Box::pin(stream)))
|
||||
}
|
||||
}
|
||||
34
core/src/proto/server.rs
Normal file
34
core/src/proto/server.rs
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
use std::pin::Pin;
|
||||
|
||||
use iroh::{endpoint, Endpoint, NodeId};
|
||||
use tonic::{Response, Request, Status};
|
||||
use tokio_stream::Stream;
|
||||
|
||||
use crate::{global::IROH_ENDPOINT, proto::{error::ProtoDeserializeError, RemoteInfoIterRequest, RemoteInfoMessage, RemoteInfoRequest, RemoteInfoResponse}};
|
||||
|
||||
pub struct CarettaSyncServer{}
|
||||
|
||||
#[tonic::async_trait]
|
||||
impl crate::proto::caretta_sync_server::CarettaSync for CarettaSyncServer {
|
||||
type RemoteInfoIterStream = Pin<Box<dyn Stream<Item = Result<RemoteInfoResponse, Status>> + Send>>;
|
||||
async fn remote_info(&self, request: Request<RemoteInfoRequest>) -> Result<Response<RemoteInfoResponse>, Status> {
|
||||
let node_id = NodeId::try_from(request.into_inner().node_id.ok_or(Status::from_error(Box::new(ProtoDeserializeError::MissingField("node_id"))))?).or_else(|e| {
|
||||
Err(Status::from_error(Box::new(e)))
|
||||
})?;
|
||||
let remote_info: Option<RemoteInfoMessage> = IROH_ENDPOINT.get_unchecked().remote_info(node_id).map(|x| x.try_into()).transpose().or_else(|e| {
|
||||
Err(Status::from_error(Box::new(e)))
|
||||
})?;
|
||||
Ok(Response::new(RemoteInfoResponse::from(remote_info)))
|
||||
}
|
||||
async fn remote_info_iter(&self, _: Request<RemoteInfoIterRequest>)
|
||||
-> Result<Response<Self::RemoteInfoIterStream>, Status> {
|
||||
let iter = IROH_ENDPOINT.get_unchecked().remote_info_iter()
|
||||
.map(|x| {
|
||||
RemoteInfoMessage::try_from(x).map(|x| RemoteInfoResponse::from(x)).or_else(|e| {
|
||||
Err(Status::from_error(Box::new(e)))
|
||||
})
|
||||
});
|
||||
let stream = futures::stream::iter(iter);
|
||||
Ok(Response::new(Box::pin(stream)))
|
||||
}
|
||||
}
|
||||
|
|
@ -1,2 +0,0 @@
|
|||
pub mod service;
|
||||
|
||||
|
|
@ -1,30 +0,0 @@
|
|||
use crate::{cache::entity::{CachedAddressEntity, CachedPeerEntity, CachedPeerModel}, global::{DATABASE_CONNECTIONS}, proto::CachedAddressMessage};
|
||||
use futures::future::join_all;
|
||||
use tonic::{Request, Response, Status};
|
||||
|
||||
use crate::proto::{cached_peer_service_server::{CachedPeerServiceServer}, CachedPeerListRequest, CachedPeerListResponse, CachedPeerMessage};
|
||||
use sea_orm::prelude::*;
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct CachedPeerService {}
|
||||
|
||||
|
||||
|
||||
#[tonic::async_trait]
|
||||
impl crate::proto::cached_peer_service_server::CachedPeerService for CachedPeerService {
|
||||
async fn list(&self, request: Request<CachedPeerListRequest>) -> Result<Response<CachedPeerListResponse>, Status> {
|
||||
println!("Got a request: {:?}", request);
|
||||
|
||||
let reply = CachedPeerListResponse {
|
||||
peers: join_all( CachedPeerEntity::find().all(DATABASE_CONNECTIONS.get_cache_unchecked()).await.or_else(|e| Err(Status::from_error(Box::new(e))))?.iter().map(|x| async move {
|
||||
let addresses = CachedAddressEntity::find()
|
||||
.all(DATABASE_CONNECTIONS.get_cache_unchecked())
|
||||
.await
|
||||
.or_else(|e| Err(Status::from_error(Box::new(e))))?;
|
||||
Ok::<CachedPeerMessage, Status>(CachedPeerMessage::from((x, &addresses)))
|
||||
})).await.into_iter().collect::<Result<Vec<_>,_>>()?,
|
||||
};
|
||||
|
||||
Ok(Response::new(reply))
|
||||
}
|
||||
}
|
||||
|
|
@ -1 +0,0 @@
|
|||
pub mod cached_peer;
|
||||
|
|
@ -1,13 +1,13 @@
|
|||
use crate::{config::{Config, P2pConfig, RpcConfig}, error::Error};
|
||||
use crate::{config::{Config, IrohConfig, RpcConfig}, error::Error};
|
||||
|
||||
pub trait ServerTrait {
|
||||
async fn serve_p2p<T>(config: &T) -> Result<(), Error>
|
||||
where T: AsRef<P2pConfig>;
|
||||
where T: AsRef<IrohConfig>;
|
||||
async fn serve_rpc<T>(config: &T) -> Result<(), Error>
|
||||
where T: AsRef<RpcConfig>;
|
||||
async fn serve_all<T>(config: &T) -> Result<(), Error>
|
||||
where
|
||||
T: AsRef<P2pConfig> + AsRef<RpcConfig> {
|
||||
T: AsRef<IrohConfig> + AsRef<RpcConfig> {
|
||||
tokio::try_join!(
|
||||
Self::serve_p2p(config),
|
||||
Self::serve_rpc(config)
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ use std::{path::PathBuf, sync::LazyLock};
|
|||
|
||||
use tempfile::TempDir;
|
||||
use url::Url;
|
||||
use crate::{ config::{Config, PartialConfig, PartialP2pConfig, PartialRpcConfig, RpcConfig, StorageConfig}};
|
||||
use crate::{ config::{Config, PartialConfig, PartialIrohConfig, PartialRpcConfig, RpcConfig, StorageConfig}};
|
||||
|
||||
use serde::{de::DeserializeOwned, Deserialize, Serialize};
|
||||
|
||||
|
|
@ -13,7 +13,7 @@ pub static TEST_CONFIG: LazyLock<Config> = LazyLock::new(|| {
|
|||
|
||||
|
||||
Config {
|
||||
p2p: PartialP2pConfig::default().with_new_private_key().try_into().unwrap(),
|
||||
iroh: PartialIrohConfig::default().with_new_secret_key().try_into().unwrap(),
|
||||
storage: StorageConfig {
|
||||
data_directory: data_dir,
|
||||
cache_directory: cache_dir,
|
||||
|
|
|
|||
|
|
@ -9,7 +9,6 @@ repository.workspace = true
|
|||
[dependencies]
|
||||
bevy.workspace = true
|
||||
caretta-sync = { path = "../..", features = ["bevy"] }
|
||||
libp2p.workspace = true
|
||||
tokio.workspace = true
|
||||
tokio-stream = { version = "0.1.17", features = ["net"] }
|
||||
tonic.workspace = true
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ use caretta_sync::{
|
|||
config::P2pConfig,
|
||||
proto::cached_peer_service_server::CachedPeerServiceServer,
|
||||
server::ServerTrait,
|
||||
rpc::service::cached_peer::CachedPeerService
|
||||
rpc::service::iroh::CachedPeerService
|
||||
};
|
||||
use libp2p::{futures::StreamExt, noise, swarm::SwarmEvent, tcp, yamux};
|
||||
use tokio::net::UnixListener;
|
||||
|
|
|
|||
|
|
@ -10,5 +10,4 @@ repository.workspace = true
|
|||
clap.workspace = true
|
||||
caretta-sync = { path = "../..", features = ["cli", "bevy", "test"] }
|
||||
caretta-sync-example-core.path = "../core"
|
||||
libp2p.workspace = true
|
||||
tokio.workspace = true
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue