Compare commits
13 commits
069ce417df
...
71e0d31d8d
| Author | SHA1 | Date | |
|---|---|---|---|
| 71e0d31d8d | |||
| 6fb909cd07 | |||
| 99fdb12712 | |||
| f2e79d4cd0 | |||
| b57734db33 | |||
| c6e678188f | |||
| d30188e7d9 | |||
| b461dc39a7 | |||
| b53c7170eb | |||
| 7fe348e803 | |||
| 731817d20c | |||
| 92fe596fd8 | |||
| 0092cfec49 |
84 changed files with 1284 additions and 1602 deletions
13
Cargo.toml
13
Cargo.toml
|
|
@ -26,7 +26,7 @@ caretta-sync-macros = { path="macros", optional = true}
|
|||
caretta-sync-core = {workspace = true, features = ["test"]}
|
||||
|
||||
[workspace]
|
||||
members = [ ".", "core", "macros", "cli", "mobile", "examples/*" , "bevy"]
|
||||
members = [ ".", "core", "macros", "cli", "mobile", "examples/*" , "bevy", "id"]
|
||||
resolver = "3"
|
||||
|
||||
[workspace.package]
|
||||
|
|
@ -43,15 +43,20 @@ ciborium = "0.2.2"
|
|||
clap = { version = "4.5.38", features = ["derive"] }
|
||||
caretta-sync-core.path = "core"
|
||||
futures = { version = "0.3.31", features = ["executor"] }
|
||||
libp2p = { version = "0.55.0", features = ["macros", "mdns", "noise", "ping", "tcp", "tokio", "yamux" ] }
|
||||
sea-orm = { version = "1.1.11", features = ["sqlx-sqlite", "runtime-tokio-native-tls", "macros", "with-chrono", "with-uuid"] }
|
||||
sea-orm-migration = { version = "1.1.0", features = ["runtime-tokio-rustls", "sqlx-postgres"] }
|
||||
rand = "0.8.5"
|
||||
serde = { version = "1.0.219", features = ["derive"] }
|
||||
thiserror = "2.0.12"
|
||||
tokio = { version = "1.45.0", features = ["macros", "rt", "rt-multi-thread"] }
|
||||
tokio-stream = "0.1.17"
|
||||
tonic = "0.14.0"
|
||||
url = { version = "2.5.7", features = ["serde"] }
|
||||
uuid = { version = "1.17.0", features = ["v7"] }
|
||||
iroh = { version = "0.91.2", features = ["discovery-local-network", "discovery-pkarr-dht"] }
|
||||
prost = "0.14.1"
|
||||
prost-types = "0.14.1"
|
||||
tonic-prost-build = "0.14.0"
|
||||
tonic-prost = "0.14.0"
|
||||
|
||||
|
||||
[profile.dev]
|
||||
opt-level = 1
|
||||
|
|
|
|||
|
|
@ -10,6 +10,5 @@ repository.workspace = true
|
|||
bevy.workspace = true
|
||||
caretta-sync-core.workspace = true
|
||||
futures.workspace = true
|
||||
sea-orm.workspace = true
|
||||
tokio.workspace = true
|
||||
tonic.workspace = true
|
||||
|
|
@ -15,8 +15,6 @@ ciborium.workspace = true
|
|||
clap.workspace = true
|
||||
dirs = "6.0.0"
|
||||
caretta-sync-core = { workspace = true, features = ["cli"] }
|
||||
libp2p.workspace = true
|
||||
sea-orm.workspace = true
|
||||
serde.workspace = true
|
||||
thiserror.workspace = true
|
||||
tokio.workspace = true
|
||||
|
|
|
|||
|
|
@ -1,24 +0,0 @@
|
|||
use clap::Args;
|
||||
use caretta_sync_core::utils::runnable::Runnable;
|
||||
|
||||
use crate::cli::ConfigArgs;
|
||||
|
||||
use crate::cli::PeerArgs;
|
||||
|
||||
#[derive(Debug, Args)]
|
||||
pub struct DeviceAddCommandArgs {
|
||||
#[command(flatten)]
|
||||
peer: PeerArgs,
|
||||
#[arg(short, long)]
|
||||
passcode: Option<String>,
|
||||
#[command(flatten)]
|
||||
config: ConfigArgs
|
||||
}
|
||||
|
||||
impl Runnable for DeviceAddCommandArgs {
|
||||
fn run(self, app_name: &'static str) {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
0
cli/src/cli/device/auth/approve.rs
Normal file
0
cli/src/cli/device/auth/approve.rs
Normal file
0
cli/src/cli/device/auth/list.rs
Normal file
0
cli/src/cli/device/auth/list.rs
Normal file
0
cli/src/cli/device/auth/mod.rs
Normal file
0
cli/src/cli/device/auth/mod.rs
Normal file
0
cli/src/cli/device/auth/request.rs
Normal file
0
cli/src/cli/device/auth/request.rs
Normal file
|
|
@ -6,7 +6,6 @@ mod scan;
|
|||
|
||||
pub use add::DeviceAddCommandArgs;
|
||||
use caretta_sync_core::utils::runnable::Runnable;
|
||||
use libp2p::{Multiaddr, PeerId};
|
||||
pub use list::DeviceListCommandArgs;
|
||||
pub use ping::DevicePingCommandArgs;
|
||||
pub use remove::DeviceRemoveCommandArgs;
|
||||
|
|
|
|||
|
|
@ -1,8 +1,7 @@
|
|||
use std::marker::PhantomData;
|
||||
|
||||
use clap::Args;
|
||||
use caretta_sync_core::{config::Config, data::migration::DataMigrator, global::{CONFIG, DATABASE_CONNECTIONS}, server::ServerTrait, utils::runnable::Runnable};
|
||||
use libp2p::{noise, ping, swarm::{NetworkBehaviour, SwarmEvent}, tcp, yamux, Swarm};
|
||||
use caretta_sync_core::{config::Config, global::{CONFIG, LOCAL_DATABASE_CONNECTION}, server::ServerTrait, utils::runnable::Runnable};
|
||||
|
||||
use super::ConfigArgs;
|
||||
|
||||
|
|
@ -23,7 +22,7 @@ where
|
|||
#[tokio::main]
|
||||
async fn run(self, app_name: &'static str) {
|
||||
let config = CONFIG.get_or_init::<Config>(self.config.into_config(app_name).await).await;
|
||||
let _ = DATABASE_CONNECTIONS.get_or_init_unchecked(&config, DataMigrator).await;
|
||||
let _ = LOCAL_DATABASE_CONNECTION.get_or_init(&config.storage.get_local_database_path() );
|
||||
T::serve_all(config).await.unwrap();
|
||||
}
|
||||
}
|
||||
|
|
@ -19,14 +19,12 @@ ciborium.workspace = true
|
|||
clap = {workspace = true, optional = true}
|
||||
dirs = "6.0.0"
|
||||
futures.workspace = true
|
||||
libp2p.workspace = true
|
||||
libp2p-core = { version = "0.43.0", features = ["serde"] }
|
||||
libp2p-identity = { version = "0.2.11", features = ["ed25519", "peerid", "rand", "serde"] }
|
||||
prost = "0.14.1"
|
||||
prost-types = "0.14.1"
|
||||
sea-orm.workspace = true
|
||||
sea-orm-migration.workspace = true
|
||||
iroh.workspace = true
|
||||
prost.workspace = true
|
||||
prost-types.workspace = true
|
||||
rusqlite = { version = "0.37.0", features = ["bundled", "chrono"] }
|
||||
serde.workspace = true
|
||||
sysinfo = "0.37.0"
|
||||
tempfile = { version = "3.20.0", optional = true }
|
||||
thiserror.workspace = true
|
||||
tokio.workspace = true
|
||||
|
|
@ -37,8 +35,10 @@ tracing = "0.1.41"
|
|||
tracing-subscriber = { version = "0.3.19", features = ["env-filter"] }
|
||||
uuid.workspace = true
|
||||
url.workspace = true
|
||||
sysinfo = "0.37.0"
|
||||
whoami = "1.6.1"
|
||||
rand.workspace = true
|
||||
ed25519-dalek = { version = "2.2.0", features = ["signature"] }
|
||||
tokio-stream.workspace = true
|
||||
|
||||
[target.'cfg(target_os="android")'.dependencies]
|
||||
jni = "0.21.1"
|
||||
|
|
@ -53,4 +53,4 @@ objc2-app-kit = "0.3.1"
|
|||
tempfile = "3.20.0"
|
||||
|
||||
[build-dependencies]
|
||||
tonic-prost-build = "0.14.0"
|
||||
tonic-prost-build.workspace = true
|
||||
|
|
|
|||
|
|
@ -1,33 +1,51 @@
|
|||
syntax = "proto3";
|
||||
package caretta_sync;
|
||||
import "google/protobuf/timestamp.proto";
|
||||
import "google/protobuf/duration.proto";
|
||||
|
||||
enum PeerListOrderBy {
|
||||
CREATED_AT = 0;
|
||||
UPDATED_AT = 1;
|
||||
PEER_ID = 2;
|
||||
service CarettaSync {
|
||||
rpc RemoteInfo(RemoteInfoRequest) returns (RemoteInfoResponse);
|
||||
rpc RemoteInfoIter(RemoteInfoIterRequest) returns (stream RemoteInfoResponse);
|
||||
}
|
||||
|
||||
service CachedPeerService {
|
||||
rpc List(CachedPeerListRequest) returns (CachedPeerListResponse);
|
||||
message NodeIdMessage {
|
||||
bytes node_id = 1;
|
||||
}
|
||||
|
||||
message CachedPeerListRequest {}
|
||||
|
||||
message CachedPeerMessage {
|
||||
uint32 number = 1;
|
||||
string peer_id = 2;
|
||||
google.protobuf.Timestamp created_at = 3;
|
||||
repeated CachedAddressMessage addresses = 4;
|
||||
message RemoteInfoRequest {
|
||||
NodeIdMessage node_id = 1;
|
||||
}
|
||||
|
||||
message CachedAddressMessage {
|
||||
uint32 number = 1;
|
||||
google.protobuf.Timestamp created_at = 2;
|
||||
google.protobuf.Timestamp updated_at = 3;
|
||||
string multiaddress = 4;
|
||||
message RemoteInfoIterRequest {}
|
||||
|
||||
message RemoteInfoResponse {
|
||||
RemoteInfoMessage remote_info = 1;
|
||||
}
|
||||
|
||||
message CachedPeerListResponse {
|
||||
repeated CachedPeerMessage peers = 1;
|
||||
message RemoteInfoMessage {
|
||||
NodeIdMessage node_id = 1;
|
||||
string relay_url = 2;
|
||||
repeated DirectAddrInfoMessage addrs = 3;
|
||||
string conn_type = 4;
|
||||
google.protobuf.Duration latency = 5;
|
||||
google.protobuf.Duration last_used = 6;
|
||||
}
|
||||
|
||||
message DirectAddrInfoMessage {
|
||||
string addr = 1;
|
||||
google.protobuf.Duration latency = 2;
|
||||
LastControlMessage last_control = 3;
|
||||
google.protobuf.Duration last_payload = 4;
|
||||
google.protobuf.Duration last_alive = 5;
|
||||
repeated SourceMessage sources = 6;
|
||||
}
|
||||
|
||||
message LastControlMessage {
|
||||
google.protobuf.Duration duration = 1;
|
||||
string control_msg = 2;
|
||||
}
|
||||
|
||||
message SourceMessage {
|
||||
string source = 1;
|
||||
google.protobuf.Duration duration = 2;
|
||||
}
|
||||
|
|
|
|||
59
core/src/cache/entity/cached_address.rs
vendored
59
core/src/cache/entity/cached_address.rs
vendored
|
|
@ -1,59 +0,0 @@
|
|||
use std::str::FromStr;
|
||||
|
||||
use chrono::{Days, Local};
|
||||
use libp2p::{multiaddr, Multiaddr, PeerId};
|
||||
use prost_types::Timestamp;
|
||||
use sea_orm::{entity::{
|
||||
prelude::*, *
|
||||
}, sea_query};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{cache, data::value::{MultiaddrValue, PeerIdValue}, utils::utc_to_timestamp};
|
||||
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Deserialize, Serialize)]
|
||||
#[sea_orm(table_name = "cached_address")]
|
||||
pub struct Model {
|
||||
#[sea_orm(primary_key)]
|
||||
pub id: u32,
|
||||
#[sea_orm(indexed)]
|
||||
pub created_at: DateTimeUtc,
|
||||
#[sea_orm(indexed)]
|
||||
pub updated_at: DateTimeUtc,
|
||||
#[sea_orm(indexed)]
|
||||
pub cached_peer_id: u32,
|
||||
#[sea_orm(indexed)]
|
||||
pub multiaddress: MultiaddrValue,
|
||||
}
|
||||
|
||||
|
||||
#[derive(Copy, Clone, Debug, DeriveRelation, EnumIter)]
|
||||
pub enum Relation {
|
||||
#[sea_orm(
|
||||
belongs_to = "super::CachedPeerEntity",
|
||||
from = "Column::CachedPeerId",
|
||||
to = "super::CachedPeerColumn::Id"
|
||||
)]
|
||||
CachedPeer,
|
||||
}
|
||||
impl Related<super::CachedPeerEntity> for Entity {
|
||||
fn to() -> RelationDef {
|
||||
Relation::CachedPeer.def()
|
||||
}
|
||||
}
|
||||
|
||||
impl ActiveModelBehavior for ActiveModel {}
|
||||
|
||||
impl ActiveModel {
|
||||
pub fn new(cached_peer_id: u32, multiaddr: Multiaddr) -> Self {
|
||||
let timestamp: DateTimeUtc = Local::now().to_utc();
|
||||
Self{
|
||||
cached_peer_id: Set(cached_peer_id),
|
||||
multiaddress: Set(MultiaddrValue::from(multiaddr)),
|
||||
created_at: Set(timestamp),
|
||||
updated_at: Set(timestamp),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
57
core/src/cache/entity/cached_peer.rs
vendored
57
core/src/cache/entity/cached_peer.rs
vendored
|
|
@ -1,57 +0,0 @@
|
|||
use std::str::FromStr;
|
||||
|
||||
use chrono::{Days, Local};
|
||||
use libp2p::{multiaddr, Multiaddr, PeerId};
|
||||
use sea_orm::{entity::{
|
||||
prelude::*, *
|
||||
}, sea_query};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::data::value::{MultiaddrValue, PeerIdValue};
|
||||
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Deserialize, Serialize)]
|
||||
#[sea_orm(table_name = "cached_peer")]
|
||||
pub struct Model {
|
||||
#[sea_orm(primary_key)]
|
||||
pub id: u32,
|
||||
#[sea_orm(indexed)]
|
||||
pub created_at: DateTimeUtc,
|
||||
#[sea_orm(indexed)]
|
||||
pub updated_at: DateTimeUtc,
|
||||
#[sea_orm(indexed)]
|
||||
pub peer_id: PeerIdValue,
|
||||
}
|
||||
|
||||
|
||||
#[derive(Copy, Clone, Debug, DeriveRelation, EnumIter)]
|
||||
pub enum Relation {
|
||||
#[sea_orm(has_many = "super::CachedAddressEntity")]
|
||||
CachedAddress,
|
||||
}
|
||||
|
||||
impl Related<super::CachedAddressEntity> for Entity {
|
||||
fn to() -> RelationDef {
|
||||
Relation::CachedAddress.def()
|
||||
}
|
||||
}
|
||||
|
||||
impl ActiveModelBehavior for ActiveModel {}
|
||||
|
||||
impl ActiveModel {
|
||||
pub fn new(peer_id: PeerId) -> Self {
|
||||
let timestamp: DateTimeUtc = Local::now().to_utc();
|
||||
Self{
|
||||
peer_id: Set(PeerIdValue::from(peer_id)),
|
||||
created_at: Set(timestamp),
|
||||
updated_at: Set(timestamp),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Entity {
|
||||
pub fn find_by_peer_id(peer_id: PeerId) -> Select<Entity> {
|
||||
Self::find().filter(Column::PeerId.eq(PeerIdValue::from(peer_id)))
|
||||
}
|
||||
}
|
||||
48
core/src/cache/entity/mod.rs
vendored
48
core/src/cache/entity/mod.rs
vendored
|
|
@ -1,48 +0,0 @@
|
|||
mod cached_peer;
|
||||
mod cached_address;
|
||||
|
||||
pub use cached_peer::{
|
||||
ActiveModel as CachedPeerActiveModel,
|
||||
Column as CachedPeerColumn,
|
||||
Model as CachedPeerModel,
|
||||
Entity as CachedPeerEntity,
|
||||
};
|
||||
|
||||
pub use cached_address::{
|
||||
ActiveModel as CachedAddressActiveModel,
|
||||
Column as CachedAddressColumn,
|
||||
Model as CachedAddressModel,
|
||||
Entity as CachedAddressEntity,
|
||||
};
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::net::Ipv4Addr;
|
||||
|
||||
use crate::{cache::entity::cached_peer, data::migration::DataMigrator, global::{DATABASE_CONNECTIONS}, tests::TEST_CONFIG};
|
||||
|
||||
use super::*;
|
||||
|
||||
use libp2p::{identity::{self, Keypair}, multiaddr, swarm::handler::multi, Multiaddr, PeerId};
|
||||
use sea_orm::ActiveModelTrait;
|
||||
|
||||
|
||||
|
||||
#[tokio::test]
|
||||
async fn insert() {
|
||||
|
||||
let db = DATABASE_CONNECTIONS.get_or_init_unchecked(&*TEST_CONFIG, DataMigrator).await.cache;
|
||||
let peer_id = Keypair::generate_ed25519().public().to_peer_id();
|
||||
let multiaddr = Multiaddr::empty()
|
||||
.with(Ipv4Addr::new(127,0,0,1).into())
|
||||
.with(multiaddr::Protocol::Tcp(0));
|
||||
let inserted_cached_peer: CachedPeerModel = CachedPeerActiveModel::new(peer_id.clone())
|
||||
.insert(db).await.unwrap();
|
||||
let inserted_cached_address: CachedAddressModel = CachedAddressActiveModel::new(inserted_cached_peer.id, multiaddr.clone())
|
||||
.insert(db).await.unwrap();
|
||||
assert_eq!(PeerId::from(inserted_cached_peer.peer_id), peer_id);
|
||||
assert_eq!(Multiaddr::from(inserted_cached_address.multiaddress), multiaddr);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -1,148 +0,0 @@
|
|||
use sea_orm_migration::{prelude::*, schema::*};
|
||||
|
||||
use crate::migration::TableMigration;
|
||||
|
||||
#[derive(DeriveMigrationName)]
|
||||
pub struct Migration;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl MigrationTrait for Migration {
|
||||
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
|
||||
CachedPeer::up(manager).await?;
|
||||
CachedAddress::up(manager).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
|
||||
CachedAddress::down(manager).await?;
|
||||
CachedPeer::down(manager).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(DeriveIden, DeriveMigrationName)]
|
||||
enum CachedPeer {
|
||||
Table,
|
||||
Id,
|
||||
PeerId,
|
||||
CreatedAt,
|
||||
UpdatedAt,
|
||||
}
|
||||
|
||||
static IDX_CACHED_PEER_PEER_ID: &str = "idx_cached_peer_peer_id";
|
||||
static IDX_CACHED_PEER_CREATED_AT: &str = "idx_cached_peer_created_at";
|
||||
static IDX_CACHED_PEER_UPDATED_AT: &str = "idx_cached_peer_updated_at";
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl TableMigration for CachedPeer {
|
||||
async fn up<'a>(manager: &'a SchemaManager<'a>) -> Result<(), DbErr> {
|
||||
manager.create_table(
|
||||
Table::create()
|
||||
.table(Self::Table)
|
||||
.if_not_exists()
|
||||
.col(pk_auto(Self::Id))
|
||||
.col(string_len(Self::PeerId, 255))
|
||||
.col(timestamp(Self::CreatedAt))
|
||||
.col(timestamp(Self::UpdatedAt))
|
||||
.to_owned()
|
||||
).await?;
|
||||
manager.create_index(
|
||||
Index::create()
|
||||
.name(IDX_CACHED_PEER_PEER_ID)
|
||||
.table(Self::Table)
|
||||
.col(Self::PeerId)
|
||||
.to_owned()
|
||||
).await?;
|
||||
manager.create_index(
|
||||
Index::create()
|
||||
.name(IDX_CACHED_PEER_CREATED_AT)
|
||||
.table(Self::Table)
|
||||
.col(Self::CreatedAt)
|
||||
.to_owned()
|
||||
).await?;
|
||||
manager.create_index(
|
||||
Index::create()
|
||||
.name(IDX_CACHED_PEER_UPDATED_AT)
|
||||
.table(Self::Table)
|
||||
.col(Self::UpdatedAt)
|
||||
.to_owned()
|
||||
).await?;
|
||||
Ok(())
|
||||
}
|
||||
async fn down<'a>(manager: &'a SchemaManager<'a>) -> Result<(), DbErr>{
|
||||
manager.drop_table(Table::drop().table(Self::Table).to_owned()).await
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(DeriveIden, DeriveMigrationName)]
|
||||
enum CachedAddress {
|
||||
Table,
|
||||
Id,
|
||||
CachedPeerId,
|
||||
CreatedAt,
|
||||
UpdatedAt,
|
||||
Multiaddress,
|
||||
}
|
||||
|
||||
static IDX_CACHED_ADDRESS_MULTIADDRESS: &str = "idx_cached_address_multiaddress";
|
||||
static IDX_CACHED_ADDRESS_CACHED_PEER_ID: &str = "idx_cached_address_cached_peer_id";
|
||||
static IDX_CACHED_ADDRESS_CREATED_AT: &str = "idx_cached_address_created_at";
|
||||
static IDX_CACHED_ADDRESS_UPDATED_AT: &str = "idx_cached_address_updated_at";
|
||||
static FK_CACHED_ADDRESS_CACHED_PEER: &str = "fk_cached_address_cached_peer";
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl TableMigration for CachedAddress {
|
||||
async fn up<'a>(manager: &'a SchemaManager<'a>) -> Result<(), DbErr> {
|
||||
manager.create_table(
|
||||
Table::create()
|
||||
.table(Self::Table)
|
||||
.if_not_exists()
|
||||
.col(pk_auto(Self::Id))
|
||||
.col(integer(Self::CachedPeerId))
|
||||
.foreign_key(ForeignKey::create()
|
||||
.name(FK_CACHED_ADDRESS_CACHED_PEER)
|
||||
.from(Self::Table,Self::CachedPeerId)
|
||||
.to(CachedPeer::Table, CachedPeer::Id)
|
||||
.on_delete(ForeignKeyAction::Cascade)
|
||||
.on_update(ForeignKeyAction::Cascade)
|
||||
)
|
||||
.col(timestamp(Self::CreatedAt))
|
||||
.col(timestamp(Self::UpdatedAt))
|
||||
.col(text_uniq(Self::Multiaddress))
|
||||
.to_owned()
|
||||
).await?;
|
||||
manager.create_index(
|
||||
Index::create()
|
||||
.name(IDX_CACHED_ADDRESS_CACHED_PEER_ID)
|
||||
.table(Self::Table)
|
||||
.col(Self::CachedPeerId)
|
||||
.to_owned()
|
||||
).await?;
|
||||
manager.create_index(
|
||||
Index::create()
|
||||
.name(IDX_CACHED_ADDRESS_MULTIADDRESS)
|
||||
.table(Self::Table)
|
||||
.col(Self::Multiaddress)
|
||||
.to_owned()
|
||||
).await?;
|
||||
manager.create_index(
|
||||
Index::create()
|
||||
.name(IDX_CACHED_ADDRESS_CREATED_AT)
|
||||
.table(Self::Table)
|
||||
.col(Self::CreatedAt)
|
||||
.to_owned()
|
||||
).await?;
|
||||
manager.create_index(
|
||||
Index::create()
|
||||
.name(IDX_CACHED_ADDRESS_UPDATED_AT)
|
||||
.table(Self::Table)
|
||||
.col(Self::UpdatedAt)
|
||||
.to_owned()
|
||||
).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
async fn down<'a>(manager: &'a SchemaManager<'a>) -> Result<(), DbErr>{
|
||||
manager.drop_table(Table::drop().table(Self::Table).to_owned()).await
|
||||
}
|
||||
}
|
||||
12
core/src/cache/migration/mod.rs
vendored
12
core/src/cache/migration/mod.rs
vendored
|
|
@ -1,12 +0,0 @@
|
|||
use sea_orm_migration::prelude::*;
|
||||
|
||||
pub mod m20220101_000001_create_cache_tables;
|
||||
|
||||
pub struct CacheMigrator;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl MigratorTrait for CacheMigrator {
|
||||
fn migrations() -> Vec<Box<dyn MigrationTrait>> {
|
||||
vec![Box::new(m20220101_000001_create_cache_tables::Migration)]
|
||||
}
|
||||
}
|
||||
2
core/src/cache/mod.rs
vendored
2
core/src/cache/mod.rs
vendored
|
|
@ -1,2 +0,0 @@
|
|||
pub mod entity;
|
||||
pub mod migration;
|
||||
134
core/src/config/iroh.rs
Normal file
134
core/src/config/iroh.rs
Normal file
|
|
@ -0,0 +1,134 @@
|
|||
use std::{net::{IpAddr, Ipv4Addr}, ops, path::{Path, PathBuf}};
|
||||
|
||||
use base64::{prelude::BASE64_STANDARD, Engine};
|
||||
#[cfg(feature="cli")]
|
||||
use clap::Args;
|
||||
use futures::StreamExt;
|
||||
use iroh::{Endpoint, SecretKey};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::{fs::File, io::{AsyncReadExt, AsyncWriteExt}};
|
||||
use tracing_subscriber::EnvFilter;
|
||||
|
||||
|
||||
use crate::{
|
||||
config::PartialConfig,
|
||||
error::Error, utils::{emptiable::Emptiable, mergeable::Mergeable}
|
||||
};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct IrohConfig {
|
||||
pub enable: bool,
|
||||
pub secret_key: SecretKey,
|
||||
pub use_n0_discovery_service: bool,
|
||||
}
|
||||
|
||||
impl IrohConfig {
|
||||
async fn into_endpoint(config: Self) -> Result<Option<Endpoint>, crate::error::Error> {
|
||||
if config.enable {
|
||||
let mut endpoint = Endpoint::builder()
|
||||
.secret_key(config.secret_key)
|
||||
.discovery_dht()
|
||||
.discovery_local_network();
|
||||
if config.use_n0_discovery_service {
|
||||
endpoint = endpoint.discovery_n0();
|
||||
}
|
||||
Ok(Some(endpoint.bind().await?))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<PartialIrohConfig> for IrohConfig {
|
||||
type Error = crate::error::Error;
|
||||
fn try_from(raw: PartialIrohConfig) -> Result<IrohConfig, Self::Error> {
|
||||
Ok(IrohConfig {
|
||||
enable: raw.enable.ok_or(Error::MissingConfig("iroh.enable"))?,
|
||||
secret_key: raw.secret_key.ok_or(Error::MissingConfig("iroh.secret_key"))?,
|
||||
use_n0_discovery_service: raw.use_n0_discovery_service.ok_or(Error::MissingConfig("iroh.use_n0_discovery_service"))?
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
#[cfg_attr(feature="cli",derive(Args))]
|
||||
#[derive(Clone, Debug, Deserialize, Serialize)]
|
||||
pub struct PartialIrohConfig {
|
||||
#[cfg_attr(feature="cli",arg(long="p2p_enable"))]
|
||||
pub enable: Option<bool>,
|
||||
#[cfg_attr(feature="cli",arg(long))]
|
||||
pub secret_key: Option<SecretKey>,
|
||||
#[cfg_attr(feature="cli",arg(long))]
|
||||
pub use_n0_discovery_service: Option<bool>,
|
||||
}
|
||||
|
||||
impl PartialIrohConfig {
|
||||
pub fn with_new_secret_key(mut self) -> Self {
|
||||
let mut rng = rand::rngs::OsRng;
|
||||
self.secret_key = Some(SecretKey::generate(&mut rng));
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl From<IrohConfig> for PartialIrohConfig {
|
||||
fn from(config: IrohConfig) -> Self {
|
||||
Self {
|
||||
enable: Some(config.enable),
|
||||
secret_key: Some(config.secret_key),
|
||||
use_n0_discovery_service: Some(config.use_n0_discovery_service)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for PartialIrohConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
enable: Some(true),
|
||||
secret_key: None,
|
||||
use_n0_discovery_service: Some(true)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Emptiable for PartialIrohConfig {
|
||||
fn empty() -> Self {
|
||||
Self{
|
||||
enable: None,
|
||||
secret_key: None,
|
||||
use_n0_discovery_service: None
|
||||
}
|
||||
}
|
||||
|
||||
fn is_empty(&self) -> bool {
|
||||
self.enable.is_none() && self.secret_key.is_none() && self.use_n0_discovery_service.is_none()
|
||||
}
|
||||
}
|
||||
|
||||
impl Mergeable for PartialIrohConfig {
|
||||
fn merge(&mut self, mut other: Self) {
|
||||
if let Some(x) = other.enable.take() {
|
||||
let _ = self.enable.insert(x);
|
||||
};
|
||||
if let Some(x) = other.secret_key.take() {
|
||||
let _ = self.secret_key.insert(x);
|
||||
};
|
||||
if let Some(x) = other.use_n0_discovery_service.take() {
|
||||
let _ = self.use_n0_discovery_service.insert(x);
|
||||
};
|
||||
}
|
||||
}
|
||||
impl Mergeable for Option<PartialIrohConfig> {
|
||||
fn merge(&mut self, mut other: Self) {
|
||||
match other.take() {
|
||||
Some(x) => {
|
||||
if let Some(y) = self.as_mut() {
|
||||
y.merge(x);
|
||||
} else {
|
||||
let _ = self.insert(x);
|
||||
}
|
||||
},
|
||||
None => {}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
@ -1,16 +1,16 @@
|
|||
pub mod error;
|
||||
mod storage;
|
||||
mod p2p;
|
||||
mod iroh;
|
||||
mod rpc;
|
||||
|
||||
use std::{path::Path, default::Default};
|
||||
use std::{default::Default, fs::File, io::{Read, Write}, path::Path};
|
||||
use crate::{utils::{emptiable::Emptiable, mergeable::Mergeable}};
|
||||
pub use error::ConfigError;
|
||||
use serde::{de::DeserializeOwned, Deserialize, Serialize};
|
||||
|
||||
use tokio::{fs::File, io::{AsyncReadExt, AsyncWriteExt}};
|
||||
use tokio::{io::{AsyncReadExt, AsyncWriteExt}};
|
||||
pub use storage::{StorageConfig, PartialStorageConfig};
|
||||
pub use p2p::{P2pConfig, PartialP2pConfig};
|
||||
pub use iroh::{IrohConfig, PartialIrohConfig};
|
||||
pub use rpc::*;
|
||||
|
||||
#[cfg(feature="cli")]
|
||||
|
|
@ -18,7 +18,7 @@ use clap::Args;
|
|||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Config {
|
||||
pub p2p: P2pConfig,
|
||||
pub iroh: IrohConfig,
|
||||
pub storage: StorageConfig,
|
||||
pub rpc: RpcConfig,
|
||||
}
|
||||
|
|
@ -29,9 +29,9 @@ impl AsRef<StorageConfig> for Config {
|
|||
}
|
||||
}
|
||||
|
||||
impl AsRef<P2pConfig> for Config {
|
||||
fn as_ref(&self) -> &P2pConfig {
|
||||
&self.p2p
|
||||
impl AsRef<IrohConfig> for Config {
|
||||
fn as_ref(&self) -> &IrohConfig {
|
||||
&self.iroh
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -46,17 +46,17 @@ impl TryFrom<PartialConfig> for Config {
|
|||
fn try_from(value: PartialConfig) -> Result<Self, Self::Error> {
|
||||
Ok(Self{
|
||||
rpc: value.rpc.ok_or(crate::error::Error::MissingConfig("rpc"))?.try_into()?,
|
||||
p2p: value.p2p.ok_or(crate::error::Error::MissingConfig("p2p"))?.try_into()?,
|
||||
iroh: value.iroh.ok_or(crate::error::Error::MissingConfig("p2p"))?.try_into()?,
|
||||
storage: value.storage.ok_or(crate::error::Error::MissingConfig("storage"))?.try_into()?
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg_attr(feature="cli", derive(Args))]
|
||||
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
|
||||
#[derive(Clone, Debug, Deserialize, Serialize)]
|
||||
pub struct PartialConfig {
|
||||
#[cfg_attr(feature="cli", command(flatten))]
|
||||
pub p2p: Option<PartialP2pConfig>,
|
||||
pub iroh: Option<PartialIrohConfig>,
|
||||
#[cfg_attr(feature="cli", command(flatten))]
|
||||
pub storage: Option<PartialStorageConfig>,
|
||||
#[cfg_attr(feature="cli", command(flatten))]
|
||||
|
|
@ -66,7 +66,7 @@ pub struct PartialConfig {
|
|||
impl PartialConfig {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
p2p : Some(PartialP2pConfig::empty().with_new_private_key()),
|
||||
iroh : Some(PartialIrohConfig::empty().with_new_secret_key()),
|
||||
storage: Some(PartialStorageConfig::empty()),
|
||||
rpc: Some(PartialRpcConfig::empty()),
|
||||
}
|
||||
|
|
@ -77,16 +77,16 @@ impl PartialConfig {
|
|||
pub fn into_toml(&self) -> Result<String, toml::ser::Error> {
|
||||
toml::to_string(self)
|
||||
}
|
||||
pub async fn read_or_create<T>(path: T) -> Result<Self, ConfigError>
|
||||
pub fn read_or_create<T>(path: T) -> Result<Self, ConfigError>
|
||||
where
|
||||
T: AsRef<Path>
|
||||
{
|
||||
if !path.as_ref().exists() {
|
||||
Self::new().write_to(&path).await?;
|
||||
Self::new().write_to(&path)?;
|
||||
}
|
||||
Self::read_from(&path).await
|
||||
Self::read_from(&path)
|
||||
}
|
||||
pub async fn read_from<T>(path:T) -> Result<Self, ConfigError>
|
||||
pub fn read_from<T>(path:T) -> Result<Self, ConfigError>
|
||||
where
|
||||
T: AsRef<Path>
|
||||
{
|
||||
|
|
@ -94,15 +94,15 @@ impl PartialConfig {
|
|||
if let Some(x) = path.as_ref().parent() {
|
||||
std::fs::create_dir_all(x)?;
|
||||
};
|
||||
let _ = File::create(&path).await?;
|
||||
let _ = File::create(&path)?;
|
||||
}
|
||||
let mut file = File::open(path.as_ref()).await?;
|
||||
let mut file = File::open(path.as_ref())?;
|
||||
let mut content = String::new();
|
||||
file.read_to_string(&mut content).await?;
|
||||
file.read_to_string(&mut content)?;
|
||||
let config: Self = toml::from_str(&content)?;
|
||||
Ok(config)
|
||||
}
|
||||
pub async fn write_to<T>(&self, path:T) -> Result<(), ConfigError>
|
||||
pub fn write_to<T>(&self, path:T) -> Result<(), ConfigError>
|
||||
where
|
||||
T: AsRef<Path>
|
||||
{
|
||||
|
|
@ -110,15 +110,15 @@ impl PartialConfig {
|
|||
if let Some(x) = path.as_ref().parent() {
|
||||
std::fs::create_dir_all(x)?;
|
||||
};
|
||||
let _ = File::create(&path).await?;
|
||||
let _ = File::create(&path)?;
|
||||
}
|
||||
let mut file = File::create(&path).await?;
|
||||
file.write_all(toml::to_string(self)?.as_bytes()).await?;
|
||||
let mut file = File::create(&path)?;
|
||||
file.write_all(toml::to_string(self)?.as_bytes())?;
|
||||
Ok(())
|
||||
}
|
||||
pub fn default(app_name: &'static str) -> Self {
|
||||
Self {
|
||||
p2p: Some(PartialP2pConfig::default()),
|
||||
iroh: Some(PartialIrohConfig::default()),
|
||||
rpc: Some(PartialRpcConfig::default(app_name)),
|
||||
storage: Some(PartialStorageConfig::default(app_name)),
|
||||
}
|
||||
|
|
@ -128,7 +128,7 @@ impl PartialConfig {
|
|||
impl From<Config> for PartialConfig {
|
||||
fn from(value: Config) -> Self {
|
||||
Self {
|
||||
p2p: Some(value.p2p.into()),
|
||||
iroh: Some(value.iroh.into()),
|
||||
storage: Some(value.storage.into()),
|
||||
rpc: Some(value.rpc.into())
|
||||
}
|
||||
|
|
@ -138,20 +138,20 @@ impl From<Config> for PartialConfig {
|
|||
impl Emptiable for PartialConfig {
|
||||
fn empty() -> Self {
|
||||
Self {
|
||||
p2p: None,
|
||||
iroh: None,
|
||||
storage: None,
|
||||
rpc: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn is_empty(&self) -> bool {
|
||||
self.p2p.is_empty() && self.rpc.is_empty() && self.storage.is_empty()
|
||||
self.iroh.is_empty() && self.rpc.is_empty() && self.storage.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
impl Mergeable for PartialConfig {
|
||||
fn merge(&mut self, other: Self) {
|
||||
self.p2p.merge(other.p2p);
|
||||
self.iroh.merge(other.iroh);
|
||||
self.rpc.merge(other.rpc);
|
||||
self.storage.merge(other.storage);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,181 +0,0 @@
|
|||
use std::{net::{IpAddr, Ipv4Addr}, ops, path::{Path, PathBuf}};
|
||||
|
||||
use base64::{prelude::BASE64_STANDARD, Engine};
|
||||
#[cfg(feature="cli")]
|
||||
use clap::Args;
|
||||
use futures::StreamExt;
|
||||
use libp2p::{identity::{self, DecodingError, Keypair}, noise, ping, swarm::SwarmEvent, tcp, yamux, Swarm};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::{fs::File, io::{AsyncReadExt, AsyncWriteExt}};
|
||||
use tracing_subscriber::EnvFilter;
|
||||
|
||||
|
||||
use crate::{
|
||||
config::PartialConfig,
|
||||
error::Error, p2p, utils::{emptiable::Emptiable, mergeable::Mergeable}
|
||||
};
|
||||
|
||||
static DEFAULT_P2P_LISTEN_IPS: &[IpAddr] = &[IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))];
|
||||
static DEFAULT_P2P_PORT: u16 = 0;
|
||||
|
||||
fn keypair_to_base64(keypair: &Keypair) -> String {
|
||||
let vec = match keypair.to_protobuf_encoding() {
|
||||
Ok(x) => x,
|
||||
Err(_) => unreachable!(),
|
||||
};
|
||||
BASE64_STANDARD.encode(vec)
|
||||
}
|
||||
|
||||
fn base64_to_keypair(base64: &str) -> Result<Keypair, Error> {
|
||||
let vec = BASE64_STANDARD.decode(base64)?;
|
||||
Ok(Keypair::from_protobuf_encoding(&vec)?)
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct P2pConfig {
|
||||
pub private_key: Keypair,
|
||||
pub listen_ips: Vec<IpAddr>,
|
||||
pub port: u16,
|
||||
}
|
||||
|
||||
impl P2pConfig {
|
||||
async fn try_into_swarm (self) -> Result<Swarm<p2p::Behaviour>, Error> {
|
||||
let mut swarm = libp2p::SwarmBuilder::with_existing_identity(self.private_key)
|
||||
.with_tokio()
|
||||
.with_tcp(
|
||||
tcp::Config::default(),
|
||||
noise::Config::new,
|
||||
yamux::Config::default,
|
||||
)?
|
||||
.with_behaviour(|keypair| p2p::Behaviour::try_from(keypair).unwrap())?
|
||||
.build();
|
||||
swarm.listen_on("/ip4/0.0.0.0/tcp/0".parse()?)?;
|
||||
Ok(swarm)
|
||||
}
|
||||
pub async fn launch_swarm(self) -> Result<(), Error>{
|
||||
let mut swarm = self.try_into_swarm().await?;
|
||||
loop{
|
||||
let swarm_event = swarm.select_next_some().await;
|
||||
tokio::spawn(async move{
|
||||
match swarm_event {
|
||||
SwarmEvent::NewListenAddr { address, .. } => println!("Listening on {address:?}"),
|
||||
SwarmEvent::Behaviour(event) => {
|
||||
println!("{event:?}");
|
||||
event.run().await;
|
||||
},
|
||||
_ => {}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<PartialP2pConfig> for P2pConfig {
|
||||
type Error = crate::error::Error;
|
||||
fn try_from(raw: PartialP2pConfig) -> Result<P2pConfig, Self::Error> {
|
||||
Ok(P2pConfig {
|
||||
private_key: base64_to_keypair(&raw.private_key.ok_or(Error::MissingConfig("secret"))?)?,
|
||||
listen_ips: raw.listen_ips.ok_or(Error::MissingConfig("listen_ips"))?,
|
||||
port: raw.port.ok_or(Error::MissingConfig("port"))?
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg_attr(feature="cli",derive(Args))]
|
||||
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
|
||||
pub struct PartialP2pConfig {
|
||||
#[cfg_attr(feature="cli",arg(long))]
|
||||
pub private_key: Option<String>,
|
||||
#[cfg_attr(feature="cli",arg(long))]
|
||||
pub listen_ips: Option<Vec<IpAddr>>,
|
||||
#[cfg_attr(feature="cli",arg(long))]
|
||||
pub port: Option<u16>,
|
||||
}
|
||||
impl PartialP2pConfig {
|
||||
pub fn with_new_private_key(mut self) -> Self {
|
||||
self.private_key = Some(keypair_to_base64(&Keypair::generate_ed25519()));
|
||||
self
|
||||
}
|
||||
pub fn init_private_key(&mut self) {
|
||||
let _ = self.private_key.insert(keypair_to_base64(&Keypair::generate_ed25519()));
|
||||
}
|
||||
}
|
||||
|
||||
impl From<P2pConfig> for PartialP2pConfig {
|
||||
fn from(config: P2pConfig) -> Self {
|
||||
Self {
|
||||
private_key: Some(keypair_to_base64(&config.private_key)),
|
||||
listen_ips: Some(config.listen_ips),
|
||||
port: Some(config.port)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for PartialP2pConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
private_key: None,
|
||||
listen_ips: Some(Vec::from(DEFAULT_P2P_LISTEN_IPS)),
|
||||
port: Some(DEFAULT_P2P_PORT),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Emptiable for PartialP2pConfig {
|
||||
fn empty() -> Self {
|
||||
Self{
|
||||
private_key: None,
|
||||
listen_ips: None,
|
||||
port: None
|
||||
}
|
||||
}
|
||||
|
||||
fn is_empty(&self) -> bool {
|
||||
self.private_key.is_none() && self.listen_ips.is_none() && self.port.is_none()
|
||||
}
|
||||
}
|
||||
|
||||
impl Mergeable for PartialP2pConfig {
|
||||
fn merge(&mut self, mut other: Self) {
|
||||
if let Some(x) = other.private_key.take() {
|
||||
let _ = self.private_key.insert(x);
|
||||
};
|
||||
if let Some(x) = other.listen_ips.take() {
|
||||
let _ = self.listen_ips.insert(x);
|
||||
};
|
||||
if let Some(x) = other.port.take() {
|
||||
let _ = self.port.insert(x);
|
||||
};
|
||||
}
|
||||
}
|
||||
impl Mergeable for Option<PartialP2pConfig> {
|
||||
fn merge(&mut self, mut other: Self) {
|
||||
match other.take() {
|
||||
Some(x) => {
|
||||
if let Some(y) = self.as_mut() {
|
||||
y.merge(x);
|
||||
} else {
|
||||
let _ = self.insert(x);
|
||||
}
|
||||
},
|
||||
None => {}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use libp2p::identity;
|
||||
use super::*;
|
||||
|
||||
|
||||
#[tokio::test]
|
||||
async fn parse_keypair() {
|
||||
let keypair = identity::Keypair::generate_ed25519();
|
||||
let keypair2 = base64_to_keypair(&keypair_to_base64(&keypair)).unwrap();
|
||||
|
||||
assert_eq!(keypair.public(), keypair2.public());
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -3,7 +3,6 @@ use std::{net::{IpAddr, Ipv4Addr, SocketAddr, TcpListener}, path::PathBuf, str::
|
|||
use clap::Args;
|
||||
use url::Url;
|
||||
use crate::{config::PartialConfig, utils::{emptiable::Emptiable, mergeable::Mergeable}};
|
||||
use libp2p::mdns::Config;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::config::error::ConfigError;
|
||||
|
|
|
|||
|
|
@ -3,10 +3,10 @@ use std::path::PathBuf;
|
|||
#[cfg(feature="cli")]
|
||||
use clap::Args;
|
||||
|
||||
use rusqlite::Connection;
|
||||
#[cfg(any(test, feature="test"))]
|
||||
use tempfile::tempdir;
|
||||
use crate::{config::{ConfigError, PartialConfig}, utils::{emptiable::Emptiable, get_binary_name, mergeable::Mergeable}};
|
||||
use libp2p::mdns::Config;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
|
|
@ -15,6 +15,21 @@ pub struct StorageConfig {
|
|||
pub cache_directory: PathBuf,
|
||||
}
|
||||
|
||||
impl StorageConfig {
|
||||
pub fn get_global_data_directory(&self) -> PathBuf {
|
||||
self.data_directory.join("global")
|
||||
}
|
||||
pub fn get_global_root_document_path(&self) -> PathBuf {
|
||||
self.data_directory.join("global.bin")
|
||||
}
|
||||
pub fn get_local_data_directory(&self) -> PathBuf {
|
||||
self.data_directory.join("local")
|
||||
}
|
||||
pub fn get_local_database_path(&self) -> PathBuf {
|
||||
self.data_directory.join("local.sqlite")
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<PartialStorageConfig> for StorageConfig {
|
||||
type Error = ConfigError;
|
||||
|
||||
|
|
@ -25,6 +40,7 @@ impl TryFrom<PartialStorageConfig> for StorageConfig {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg_attr(feature="cli", derive(Args))]
|
||||
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
|
||||
pub struct PartialStorageConfig {
|
||||
|
|
@ -83,6 +99,7 @@ impl PartialStorageConfig {
|
|||
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
impl From<StorageConfig> for PartialStorageConfig {
|
||||
|
|
|
|||
0
core/src/data/distributed/mod.rs
Normal file
0
core/src/data/distributed/mod.rs
Normal file
|
|
@ -1,35 +0,0 @@
|
|||
mod trusted_node;
|
||||
mod record_deletion;
|
||||
|
||||
pub use trusted_node::{
|
||||
ActiveModel as TrustedNodeActiveModel,
|
||||
Column as TrustedNodeColumn,
|
||||
Entity as TrustedNodeEntity,
|
||||
Model as TrustedNodeModel,
|
||||
};
|
||||
|
||||
pub use record_deletion::{
|
||||
ActiveModel as RecordDeletionActiveModel,
|
||||
Column as RecordDeletionColumn,
|
||||
Entity as RecordDeletionEntity,
|
||||
Model as RecordDeletionModel,
|
||||
};
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::{data::{migration::DataMigrator, value::PeerIdValue}, global::{generate_uuid, DATABASE_CONNECTIONS}, tests::TEST_CONFIG};
|
||||
|
||||
use super::*;
|
||||
|
||||
use libp2p::{identity, PeerId};
|
||||
use sea_orm::ActiveModelTrait;
|
||||
|
||||
#[tokio::test]
|
||||
async fn check_insert() {
|
||||
let db = DATABASE_CONNECTIONS.get_or_init_unchecked(&*TEST_CONFIG, DataMigrator).await.cache;
|
||||
|
||||
let node = TrustedNodeActiveModel::new(PeerId::random(), "test note".to_owned()).insert(db).await.unwrap();
|
||||
let _ = RecordDeletionActiveModel::new(node.id, "test_table".to_string(), generate_uuid()).insert(db).await.unwrap();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -1,42 +0,0 @@
|
|||
use chrono::Local;
|
||||
use sea_orm::{entity::{
|
||||
prelude::*, *
|
||||
}, sea_query::table};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use crate::data::syncable::*;
|
||||
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Serialize, Deserialize)]
|
||||
#[cfg_attr(feature="macros", derive(SyncableModel))]
|
||||
#[sea_orm(table_name = "record_deletion")]
|
||||
pub struct Model {
|
||||
#[sea_orm(primary_key, auto_increment = false)]
|
||||
#[cfg_attr(feature="macros", syncable(id))]
|
||||
pub id: Uuid,
|
||||
#[sea_orm(indexed)]
|
||||
#[cfg_attr(feature="macros", syncable(timestamp))]
|
||||
pub created_at: DateTimeUtc,
|
||||
#[cfg_attr(feature="macros", syncable(author_id))]
|
||||
pub created_by: Uuid,
|
||||
pub table_name: String,
|
||||
pub record_id: Uuid,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, DeriveRelation, EnumIter)]
|
||||
pub enum Relation{}
|
||||
|
||||
impl ActiveModelBehavior for ActiveModel {}
|
||||
|
||||
impl ActiveModel {
|
||||
pub fn new(node_id: Uuid, table_name: String, record_id: Uuid) -> Self {
|
||||
let timestamp: DateTimeUtc = Local::now().to_utc();
|
||||
Self{
|
||||
id: Set(crate::global::generate_uuid()),
|
||||
created_at: Set(timestamp),
|
||||
created_by: Set(node_id),
|
||||
table_name: Set(table_name),
|
||||
record_id: Set(record_id),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1,49 +0,0 @@
|
|||
use chrono::Local;
|
||||
use libp2p::PeerId;
|
||||
use sea_orm::entity::{
|
||||
*,
|
||||
prelude::*
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::data::value::PeerIdValue;
|
||||
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Serialize, Deserialize)]
|
||||
#[sea_orm(table_name = "trusted_node")]
|
||||
pub struct Model {
|
||||
#[sea_orm(primary_key, auto_increment = false)]
|
||||
pub id: Uuid,
|
||||
#[sea_orm(indexed)]
|
||||
pub created_at: DateTimeUtc,
|
||||
#[sea_orm(indexed)]
|
||||
pub updated_at: DateTimeUtc,
|
||||
#[sea_orm(indexed)]
|
||||
pub synced_at: Option<DateTimeUtc>,
|
||||
#[sea_orm(indexed)]
|
||||
pub peer_id: PeerIdValue,
|
||||
#[sea_orm(column_type = "Text")]
|
||||
pub note: String,
|
||||
pub is_prefered: bool,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, DeriveRelation, EnumIter)]
|
||||
pub enum Relation {}
|
||||
|
||||
impl ActiveModelBehavior for ActiveModel {}
|
||||
|
||||
impl ActiveModel {
|
||||
pub fn new(peer_id: PeerId, note: String) -> Self {
|
||||
let timestamp: DateTimeUtc = Local::now().to_utc();
|
||||
Self{
|
||||
id: Set(crate::global::generate_uuid()),
|
||||
peer_id: Set(PeerIdValue::from(peer_id)),
|
||||
created_at: Set(timestamp),
|
||||
updated_at: Set(timestamp),
|
||||
synced_at: Set(None),
|
||||
is_prefered: Set(false),
|
||||
note: Set(note),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
91
core/src/data/local/authorization/mod.rs
Normal file
91
core/src/data/local/authorization/mod.rs
Normal file
|
|
@ -0,0 +1,91 @@
|
|||
//! Structs about authorization.
|
||||
|
||||
mod request;
|
||||
mod response;
|
||||
|
||||
use std::os::unix::raw::time_t;
|
||||
|
||||
use chrono::{DateTime, Local, NaiveDateTime};
|
||||
use iroh::NodeId;
|
||||
pub use request::*;
|
||||
pub use response::*;
|
||||
use rusqlite::{params, types::FromSqlError, Connection};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::data::local::RusqliteRecord;
|
||||
|
||||
/// On going authorization
|
||||
pub struct Authorization {
|
||||
request_id: Uuid,
|
||||
node_id: NodeId,
|
||||
node_info: Option<String>,
|
||||
passcode: Option<String>,
|
||||
created_at: DateTime<Local>,
|
||||
updated_at: DateTime<Local>,
|
||||
}
|
||||
static TABLE_NAME: &str = "authorization";
|
||||
static DEFAULT_COLUMNS: [&str;5] = [
|
||||
"request_id",
|
||||
"node_id",
|
||||
"created_at",
|
||||
"updated_at"
|
||||
];
|
||||
|
||||
impl Authorization {
|
||||
pub fn new_sent(node_id: NodeId, passcode: String) -> Self {
|
||||
let timestamp = Local::now();
|
||||
Self {
|
||||
node_id: node_id,
|
||||
passcode: passcode,
|
||||
created_at: timestamp.clone(),
|
||||
updated_at: timestamp
|
||||
}
|
||||
}
|
||||
pub fn new_received(node_id:)
|
||||
pub fn get_by_node_id(node_id: NodeId, connection: &Connection) -> Result<Self, rusqlite::Error> {
|
||||
connection.query_row(
|
||||
"SELECT node_id, passcode, created_at, updated_at FROM authorizaation WHRE node_id=(?1)",
|
||||
params![node_id.as_bytes()],
|
||||
Self::from_row
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
impl RusqliteRecord for Authorization {
|
||||
fn from_row(row: &rusqlite::Row<'_>) -> Result<Self, rusqlite::Error> {
|
||||
let created_at: NaiveDateTime = row.get(2)?;
|
||||
let updated_at: NaiveDateTime = row.get(3)?;
|
||||
let node_id: Vec<u8> = row.get(0)?;
|
||||
Ok(Self {
|
||||
node_id: NodeId::from_bytes(node_id[..32].try_into().or_else(|e| {
|
||||
Err(rusqlite::types::FromSqlError::InvalidBlobSize {
|
||||
expected_size: 32,
|
||||
blob_size: node_id.len()
|
||||
})
|
||||
})?).or(Err(FromSqlError::InvalidType))?,
|
||||
passcode: row.get(1)?,
|
||||
created_at: DateTime::from(created_at.and_utc()),
|
||||
updated_at: DateTime::from(updated_at.and_utc()),
|
||||
})
|
||||
}
|
||||
fn insert(&self, connection: &rusqlite::Connection) -> Result<(), rusqlite::Error> {
|
||||
connection.execute(
|
||||
"INSERT INTO authorization (node_id, passcode, created_at, updated_at) VALUES (?1, ?2, ?3, ?4)",
|
||||
(&self.node_id.as_bytes(), &self.passcode, &self.created_at.naive_utc(), &self.updated_at.naive_utc()),
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
fn get_all(connection: &rusqlite::Connection) -> Result<Vec<Self>, rusqlite::Error> {
|
||||
let mut stmt = connection.prepare(&(String::from("SELECT ") + &DEFAULT_COLUMNS.join(", ") + " FROM " + TABLE_NAME))?;
|
||||
let rows = stmt.query_map(
|
||||
[],
|
||||
Self::from_row
|
||||
)?;
|
||||
let mut result= Vec::new();
|
||||
for row in rows {
|
||||
result.push(row?);
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
}
|
||||
8
core/src/data/local/authorization/request.rs
Normal file
8
core/src/data/local/authorization/request.rs
Normal file
|
|
@ -0,0 +1,8 @@
|
|||
use iroh::NodeId;
|
||||
|
||||
/// Request of node authentication.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct AuthorizationRequest {
|
||||
sender_id: NodeId,
|
||||
sender_info: String,
|
||||
}
|
||||
12
core/src/data/local/authorization/response.rs
Normal file
12
core/src/data/local/authorization/response.rs
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
use iroh::NodeId;
|
||||
|
||||
/// Response of node authentication.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct AuthorizationResponse {
|
||||
sender_id: NodeId,
|
||||
passcode: String,
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
14
core/src/data/local/migration/mod.rs
Normal file
14
core/src/data/local/migration/mod.rs
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
mod v1;
|
||||
|
||||
use rusqlite::{Error, Connection};
|
||||
use tracing::{event, Level};
|
||||
|
||||
pub fn migrate(con: &mut Connection) -> Result<(), Error>{
|
||||
let version: u32 = con.pragma_query_value(None,"user_version", |row| row.get(0)).expect("Failed to get user_version");
|
||||
if version < 1 {
|
||||
event!(Level::INFO, "Migrate local db to version 1");
|
||||
v1::migrate(con)?;
|
||||
event!(Level::INFO, "Migration done.");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
27
core/src/data/local/migration/v1.rs
Normal file
27
core/src/data/local/migration/v1.rs
Normal file
|
|
@ -0,0 +1,27 @@
|
|||
use rusqlite::{Error, Connection};
|
||||
|
||||
pub fn migrate(con: &mut Connection) -> Result<(), Error>{
|
||||
let tx = con.transaction()?;
|
||||
tx.execute_batch(
|
||||
"BEGIN;
|
||||
CREATE TABLE authorized_peer (
|
||||
id INTEGER PRIMARY KEY,
|
||||
node_id BLOB NOT NULL UNIQUE,
|
||||
last_synced_at TEXT,
|
||||
last_sent_version_vector BLOB
|
||||
created_at TEXT NOT NULL,
|
||||
updated_at TEXT NOT NULL,
|
||||
);
|
||||
CREATE TABLE authorization (
|
||||
id INTEGER PRIMARY KEY,
|
||||
node_id BLOB UNIQUE NOT NULL,
|
||||
passcode TEXT NOT NULL,
|
||||
created_at TEXT NOT NULL,
|
||||
updated_at TEXT NOT NULL,
|
||||
);
|
||||
COMMIT;",
|
||||
)?;
|
||||
tx.pragma_update(None, "user_version", 1)?;
|
||||
tx.commit()?;
|
||||
Ok(())
|
||||
}
|
||||
27
core/src/data/local/mod.rs
Normal file
27
core/src/data/local/mod.rs
Normal file
|
|
@ -0,0 +1,27 @@
|
|||
mod authorization;
|
||||
pub mod migration;
|
||||
|
||||
use std::{cell::OnceCell, iter::Map, path::Path, sync::{LazyLock, OnceLock}};
|
||||
|
||||
use migration::migrate;
|
||||
use rusqlite::{ffi::Error, Connection, MappedRows, Row};
|
||||
|
||||
use crate::{config::StorageConfig, global::{CONFIG, LOCAL_DATABASE_CONNECTION}};
|
||||
|
||||
pub use authorization::*;
|
||||
|
||||
pub trait RusqliteRecord: Sized {
|
||||
fn insert(&self, connection: &Connection) -> Result<(), rusqlite::Error>;
|
||||
fn from_row(row: &Row<'_>) -> Result<Self, rusqlite::Error>;
|
||||
fn get_all(connection: &Connection) -> Result<Vec<Self>, rusqlite::Error>;
|
||||
}
|
||||
|
||||
pub trait LocalRecord : RusqliteRecord{
|
||||
fn insert_global(&self) -> Result<(), rusqlite::Error> {
|
||||
self.insert(&LOCAL_DATABASE_CONNECTION.get_unchecked())
|
||||
}
|
||||
fn get_all_global() -> Result<Vec<Self>, rusqlite::Error> {
|
||||
let connection = LOCAL_DATABASE_CONNECTION.get_unchecked();
|
||||
Self::get_all(&connection)
|
||||
}
|
||||
}
|
||||
|
|
@ -1,98 +0,0 @@
|
|||
use sea_orm_migration::{prelude::*, schema::*};
|
||||
|
||||
use crate::migration::TableMigration;
|
||||
|
||||
#[derive(DeriveMigrationName)]
|
||||
pub struct Migration;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl MigrationTrait for Migration {
|
||||
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
|
||||
TrustedNode::up(manager).await?;
|
||||
RecordDeletion::up(manager).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
|
||||
TrustedNode::down(manager).await?;
|
||||
RecordDeletion::down(manager).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(DeriveIden)]
|
||||
enum TrustedNode {
|
||||
Table,
|
||||
Id,
|
||||
CreatedAt,
|
||||
UpdatedAt,
|
||||
SyncedAt,
|
||||
PeerId,
|
||||
Note,
|
||||
IsPrefered,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl TableMigration for TrustedNode {
|
||||
async fn up<'a>(manager: &'a SchemaManager<'a>) -> Result<(), DbErr> {
|
||||
manager.create_table(
|
||||
Table::create()
|
||||
.table(Self::Table)
|
||||
.if_not_exists()
|
||||
.col(pk_uuid(Self::Id))
|
||||
.col(timestamp(Self::CreatedAt))
|
||||
.col(timestamp(Self::UpdatedAt))
|
||||
.col(timestamp_null(Self::SyncedAt))
|
||||
.col(string_len(Self::PeerId, 255))
|
||||
.col(text(Self::Note))
|
||||
.col(boolean(Self::IsPrefered))
|
||||
.to_owned()
|
||||
).await?;
|
||||
Ok(())
|
||||
|
||||
|
||||
}
|
||||
async fn down<'a>(manager: &'a SchemaManager<'a>) -> Result<(), DbErr>{
|
||||
manager.drop_table(Table::drop().table(Self::Table).to_owned()).await
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(DeriveIden, DeriveMigrationName)]
|
||||
enum RecordDeletion {
|
||||
Table,
|
||||
Id,
|
||||
CreatedAt,
|
||||
CreatedBy,
|
||||
TableName,
|
||||
RecordId,
|
||||
}
|
||||
|
||||
static FK_RECORD_DELETION_TRUSTED_NODE: &str = "fk_record_deletion_trusted_node";
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl TableMigration for RecordDeletion {
|
||||
async fn up<'a>(manager: &'a SchemaManager<'a>) -> Result<(), DbErr> {
|
||||
manager.create_table(
|
||||
Table::create()
|
||||
.table(Self::Table)
|
||||
.if_not_exists()
|
||||
.col(pk_uuid(Self::Id))
|
||||
.col(timestamp_with_time_zone(Self::CreatedAt))
|
||||
.col(uuid(Self::CreatedBy))
|
||||
.foreign_key(ForeignKey::create()
|
||||
.name(FK_RECORD_DELETION_TRUSTED_NODE)
|
||||
.from(Self::Table,Self::CreatedBy)
|
||||
.to(TrustedNode::Table, TrustedNode::Id)
|
||||
.on_delete(ForeignKeyAction::Cascade)
|
||||
.on_update(ForeignKeyAction::Cascade)
|
||||
)
|
||||
.col(string(Self::TableName))
|
||||
.col(uuid(Self::RecordId))
|
||||
.to_owned()
|
||||
).await?;
|
||||
Ok(())
|
||||
}
|
||||
async fn down<'a>(manager: &'a SchemaManager<'a>) -> Result<(), DbErr>{
|
||||
manager.drop_table(Table::drop().table(Self::Table).to_owned()).await
|
||||
}
|
||||
}
|
||||
|
|
@ -1,14 +0,0 @@
|
|||
use sea_orm_migration::prelude::*;
|
||||
|
||||
pub mod m20220101_000001_create_main_tables;
|
||||
|
||||
#[cfg(any(test, feature="test"))]
|
||||
pub struct DataMigrator;
|
||||
|
||||
#[cfg(any(test, feature="test"))]
|
||||
#[async_trait::async_trait]
|
||||
impl MigratorTrait for DataMigrator {
|
||||
fn migrations() -> Vec<Box<dyn MigrationTrait>> {
|
||||
vec![Box::new(m20220101_000001_create_main_tables::Migration)]
|
||||
}
|
||||
}
|
||||
|
|
@ -1,4 +1,2 @@
|
|||
pub mod entity;
|
||||
pub mod migration;
|
||||
pub mod syncable;
|
||||
pub mod value;
|
||||
pub mod distributed;
|
||||
pub mod local;
|
||||
|
|
|
|||
|
|
@ -1,79 +0,0 @@
|
|||
use sea_orm::{prelude::*, query::*, sea_query::SimpleExpr, *};
|
||||
#[cfg(feature="macros")]
|
||||
pub use caretta_sync_macros::SyncableModel;
|
||||
pub trait SyncableModel: ModelTrait<Entity = Self::SyncableEntity> {
|
||||
type SyncableEntity: SyncableEntity<SyncableModel = Self>;
|
||||
fn get_timestamp(&self) -> DateTimeUtc;
|
||||
fn get_id(&self) -> Uuid;
|
||||
fn get_author_id(&self) -> Uuid;
|
||||
}
|
||||
|
||||
pub trait SyncableEntity: EntityTrait<
|
||||
Model = Self::SyncableModel,
|
||||
ActiveModel = Self::SyncableActiveModel,
|
||||
Column = Self::SyncableColumn,
|
||||
>{
|
||||
type SyncableModel: SyncableModel<SyncableEntity = Self> + FromQueryResult;
|
||||
type SyncableActiveModel: SyncableActiveModel<SyncableEntity= Self>;
|
||||
type SyncableColumn: SyncableColumn;
|
||||
|
||||
async fn get_updated(from: DateTimeUtc, db: &DatabaseConnection) -> Result<Vec<<Self as EntityTrait>::Model>, SyncableError> {
|
||||
let result: Vec<Self::SyncableModel> = <Self as EntityTrait>::find()
|
||||
.filter(Self::SyncableColumn::timestamp_after(from))
|
||||
.all(db)
|
||||
.await.unwrap();
|
||||
Ok(result)
|
||||
}
|
||||
async fn get_updated_by(author: Uuid, from: DateTimeUtc, db: &DatabaseConnection) -> Result<Vec<<Self as EntityTrait>::Model>, SyncableError> {
|
||||
let result: Vec<Self::SyncableModel> = <Self as EntityTrait>::find()
|
||||
.filter(Self::SyncableColumn::timestamp_after(from))
|
||||
.filter(Self::SyncableColumn::author_id_eq(author))
|
||||
.all(db)
|
||||
.await.unwrap();
|
||||
Ok(result)
|
||||
}
|
||||
fn apply_updated(models: Vec<<Self as EntityTrait>::Model>, db: &DatabaseConnection) {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
pub trait SyncableActiveModel: ActiveModelTrait<Entity = Self::SyncableEntity> {
|
||||
|
||||
type SyncableEntity: SyncableEntity<SyncableActiveModel = Self>;
|
||||
fn get_id(&self) -> Option<Uuid>;
|
||||
fn get_timestamp(&self) -> Option<DateTimeUtc>;
|
||||
fn get_author_id(&self) -> Option<Uuid>;
|
||||
fn try_merge(&mut self, other: <Self::SyncableEntity as SyncableEntity>::SyncableModel) -> Result<(), SyncableError> {
|
||||
if self.get_id().ok_or(SyncableError::MissingField("uuid"))? != other.get_id() {
|
||||
return Err(SyncableError::MismatchUuid)
|
||||
}
|
||||
if self.get_timestamp().ok_or(SyncableError::MissingField("updated_at"))? < other.get_timestamp() {
|
||||
for column in <<<Self as ActiveModelTrait>::Entity as EntityTrait>::Column as Iterable>::iter() {
|
||||
if column.should_synced(){
|
||||
self.take(column).set_if_not_equals(other.get(column));
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
pub trait SyncableColumn: ColumnTrait {
|
||||
fn is_id(&self) -> bool;
|
||||
fn is_timestamp(&self) -> bool;
|
||||
fn should_synced(&self) -> bool;
|
||||
fn timestamp_after(from: DateTimeUtc) -> SimpleExpr;
|
||||
fn author_id_eq(author_id: Uuid) -> SimpleExpr;
|
||||
fn is_author_id(&self) -> bool;
|
||||
}
|
||||
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum SyncableError {
|
||||
#[error("Invalid UUID")]
|
||||
MismatchUuid,
|
||||
#[error("mandatory field {0} is missing")]
|
||||
MissingField(&'static str),
|
||||
|
||||
}
|
||||
|
|
@ -1,5 +0,0 @@
|
|||
mod multiaddr;
|
||||
mod peer_id;
|
||||
|
||||
pub use multiaddr::MultiaddrValue;
|
||||
pub use peer_id::PeerIdValue;
|
||||
|
|
@ -1,68 +0,0 @@
|
|||
use std::str::FromStr;
|
||||
|
||||
use libp2p::Multiaddr;
|
||||
use sea_orm::{sea_query::ValueTypeErr, DbErr};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
|
||||
pub struct MultiaddrValue(Multiaddr);
|
||||
|
||||
impl From<Multiaddr> for MultiaddrValue {
|
||||
fn from(source: Multiaddr) -> Self {
|
||||
Self(source)
|
||||
}
|
||||
}
|
||||
impl From<MultiaddrValue> for Multiaddr {
|
||||
fn from(source: MultiaddrValue) -> Self {
|
||||
source.0
|
||||
}
|
||||
}
|
||||
|
||||
impl From<MultiaddrValue> for sea_orm::Value {
|
||||
fn from(value: MultiaddrValue) -> Self {
|
||||
Self::from(value.0.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl sea_orm::TryGetable for MultiaddrValue {
|
||||
fn try_get_by<I: sea_orm::ColIdx>(res: &sea_orm::QueryResult, idx: I)
|
||||
-> std::result::Result<Self, sea_orm::TryGetError> {
|
||||
match <String as sea_orm::TryGetable>::try_get_by(res, idx){
|
||||
Ok(x) => match Multiaddr::from_str(&x) {
|
||||
Ok(y) => Ok(Self(y)),
|
||||
Err(_) => Err(DbErr::Type("Multiaddr".to_string()).into()),
|
||||
},
|
||||
Err(x) => Err(x),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl sea_orm::sea_query::ValueType for MultiaddrValue {
|
||||
fn try_from(v: sea_orm::Value) -> std::result::Result<Self, sea_orm::sea_query::ValueTypeErr> {
|
||||
match <String as sea_orm::sea_query::ValueType>::try_from(v) {
|
||||
Ok(x) => match Multiaddr::from_str(&x) {
|
||||
Ok(y) => Ok(Self(y)),
|
||||
Err(_) => Err(ValueTypeErr{}),
|
||||
},
|
||||
Err(e) => Err(e)
|
||||
}
|
||||
}
|
||||
|
||||
fn type_name() -> std::string::String {
|
||||
stringify!(MultiaddrValue).to_owned()
|
||||
}
|
||||
|
||||
fn array_type() -> sea_orm::sea_query::ArrayType {
|
||||
sea_orm::sea_query::ArrayType::String
|
||||
}
|
||||
|
||||
fn column_type() -> sea_orm::sea_query::ColumnType {
|
||||
sea_orm::sea_query::ColumnType::Text
|
||||
}
|
||||
}
|
||||
|
||||
impl sea_orm::sea_query::Nullable for MultiaddrValue {
|
||||
fn null() -> sea_orm::Value {
|
||||
<String as sea_orm::sea_query::Nullable>::null()
|
||||
}
|
||||
}
|
||||
|
|
@ -1,101 +0,0 @@
|
|||
use std::str::FromStr;
|
||||
|
||||
use libp2p::PeerId;
|
||||
use sea_orm::{sea_query::ValueTypeErr, DbErr};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::error::Error;
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct PeerIdValue(PeerId);
|
||||
|
||||
impl<'de> Deserialize<'de> for PeerIdValue {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de> {
|
||||
Self::from_str(&String::deserialize(deserializer)?).or(Err(<D::Error as serde::de::Error>::custom("fail to parse PeerId")))
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for PeerIdValue {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer {
|
||||
serializer.serialize_str(&self.0.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for PeerIdValue{
|
||||
type Err = libp2p::identity::ParseError;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
Ok(Self(PeerId::from_str(s)?))
|
||||
}
|
||||
}
|
||||
|
||||
impl ToString for PeerIdValue {
|
||||
fn to_string(&self) -> String {
|
||||
self.0.to_string()
|
||||
}
|
||||
}
|
||||
|
||||
impl From<PeerId> for PeerIdValue {
|
||||
fn from(source: PeerId) -> Self {
|
||||
Self(source)
|
||||
}
|
||||
}
|
||||
impl From<PeerIdValue> for PeerId {
|
||||
fn from(source: PeerIdValue) -> Self {
|
||||
source.0
|
||||
}
|
||||
}
|
||||
|
||||
impl From<PeerIdValue> for sea_orm::Value {
|
||||
fn from(value: PeerIdValue) -> Self {
|
||||
Self::from(value.0.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl sea_orm::TryGetable for PeerIdValue {
|
||||
fn try_get_by<I: sea_orm::ColIdx>(res: &sea_orm::QueryResult, idx: I)
|
||||
-> std::result::Result<Self, sea_orm::TryGetError> {
|
||||
match <String as sea_orm::TryGetable>::try_get_by(res, idx){
|
||||
Ok(x) => match PeerId::from_str(&x) {
|
||||
Ok(y) => Ok(Self(y)),
|
||||
Err(_) => Err(DbErr::Type("PeerId".to_string()).into()),
|
||||
},
|
||||
Err(x) => Err(x),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl sea_orm::sea_query::ValueType for PeerIdValue {
|
||||
fn try_from(v: sea_orm::Value) -> std::result::Result<Self, sea_orm::sea_query::ValueTypeErr> {
|
||||
match <String as sea_orm::sea_query::ValueType>::try_from(v) {
|
||||
Ok(x) => match PeerId::from_str(&x) {
|
||||
Ok(y) => Ok(Self(y)),
|
||||
Err(_) => Err(ValueTypeErr{}),
|
||||
},
|
||||
Err(e) => Err(e)
|
||||
}
|
||||
}
|
||||
|
||||
fn type_name() -> std::string::String {
|
||||
stringify!(PeerIdValue).to_owned()
|
||||
}
|
||||
|
||||
fn array_type() -> sea_orm::sea_query::ArrayType {
|
||||
sea_orm::sea_query::ArrayType::String
|
||||
}
|
||||
|
||||
fn column_type() -> sea_orm::sea_query::ColumnType {
|
||||
sea_orm::sea_query::ColumnType::Text
|
||||
}
|
||||
}
|
||||
|
||||
impl sea_orm::sea_query::Nullable for PeerIdValue {
|
||||
fn null() -> sea_orm::Value {
|
||||
<String as sea_orm::sea_query::Nullable>::null()
|
||||
}
|
||||
}
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
use std::ffi::OsString;
|
||||
use std::{array::TryFromSliceError, ffi::OsString};
|
||||
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum Error {
|
||||
|
|
@ -10,33 +10,27 @@ pub enum Error {
|
|||
CiborSerialize(#[from] ciborium::ser::Error<std::io::Error>),
|
||||
#[error("Config error: {0}")]
|
||||
Config(#[from] crate::config::error::ConfigError),
|
||||
#[error("DB Error: {0}")]
|
||||
Db(#[from]sea_orm::DbErr),
|
||||
#[error("Dial Error: {0}")]
|
||||
Dial(#[from] libp2p::swarm::DialError),
|
||||
#[error("Decoding identity error: {0}")]
|
||||
IdentityDecoding(#[from] libp2p::identity::DecodingError),
|
||||
#[error("Infallible: {0}")]
|
||||
Infallible(#[from] std::convert::Infallible),
|
||||
#[error("IO Error: {0}")]
|
||||
Io(#[from]std::io::Error),
|
||||
#[error("Iroh bind error: {0}")]
|
||||
IrohBind(#[from] iroh::endpoint::BindError),
|
||||
#[error("mandatory config `{0}` is missing")]
|
||||
MissingConfig(&'static str),
|
||||
#[error("Multiaddr error: {0}")]
|
||||
Multiaddr(#[from] libp2p::multiaddr::Error),
|
||||
#[error("Noise error: {0}")]
|
||||
Noise(#[from] libp2p::noise::Error),
|
||||
#[error("Parse OsString error: {0:?}")]
|
||||
OsStringConvert(std::ffi::OsString),
|
||||
#[cfg(feature="cli")]
|
||||
#[error("Parse args error: {0}")]
|
||||
ParseCommand(#[from] clap::Error),
|
||||
#[error("Signature error: {0}")]
|
||||
Signature(#[from] ed25519_dalek::SignatureError),
|
||||
#[error("slice parse error: {0}")]
|
||||
SliceTryFrom(#[from] TryFromSliceError),
|
||||
#[error("toml deserialization error: {0}")]
|
||||
TomlDe(#[from] toml::de::Error),
|
||||
#[error("toml serialization error: {0}")]
|
||||
TomlSer(#[from] toml::ser::Error),
|
||||
#[error("Transport error: {0}")]
|
||||
Transport(#[from]libp2p::TransportError<std::io::Error>)
|
||||
}
|
||||
|
||||
impl From<std::ffi::OsString> for Error {
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
use tempfile::TempDir;
|
||||
use tokio::sync::OnceCell;
|
||||
|
||||
use crate::{config::{Config, ConfigError, PartialP2pConfig, PartialRpcConfig, PartialStorageConfig, StorageConfig}, error::Error};
|
||||
use crate::{config::{Config, ConfigError, PartialIrohConfig, PartialRpcConfig, PartialStorageConfig, StorageConfig}, error::Error};
|
||||
|
||||
pub static CONFIG: GlobalConfig = GlobalConfig::const_new();
|
||||
pub struct GlobalConfig {
|
||||
|
|
|
|||
|
|
@ -1,121 +0,0 @@
|
|||
use std::path::{Path, PathBuf};
|
||||
|
||||
use dirs::cache_dir;
|
||||
use sea_orm::{ConnectOptions, Database, DbErr, DatabaseConnection};
|
||||
use sea_orm_migration::MigratorTrait;
|
||||
use crate::{cache::migration::CacheMigrator, config::StorageConfig, error::Error};
|
||||
use tokio::sync::OnceCell;
|
||||
|
||||
pub static DATABASE_CONNECTIONS: GlobalDatabaseConnections = GlobalDatabaseConnections::const_new();
|
||||
|
||||
pub struct DatabaseConnections<'a> {
|
||||
pub data: &'a DatabaseConnection,
|
||||
pub cache: &'a DatabaseConnection
|
||||
}
|
||||
|
||||
pub struct GlobalDatabaseConnections {
|
||||
data: OnceCell<DatabaseConnection>,
|
||||
cache: OnceCell<DatabaseConnection>,
|
||||
}
|
||||
|
||||
impl GlobalDatabaseConnections {
|
||||
pub const fn const_new() -> Self {
|
||||
Self {
|
||||
data: OnceCell::const_new(),
|
||||
cache: OnceCell::const_new()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_data(&'static self) -> Option<&'static DatabaseConnection> {
|
||||
self.data.get()
|
||||
}
|
||||
|
||||
pub fn get_data_unchecked(&'static self) -> &'static DatabaseConnection {
|
||||
self.get_data().expect("Global data database connection should initialized before access!")
|
||||
}
|
||||
|
||||
pub fn get_cache(&'static self) -> Option<&'static DatabaseConnection> {
|
||||
self.cache.get()
|
||||
}
|
||||
|
||||
pub fn get_cache_unchecked(&'static self) -> &'static DatabaseConnection {
|
||||
self.get_cache().expect("Global cache database connection should initialized before access!")
|
||||
}
|
||||
|
||||
fn get_data_file_path<T>(config: &T) -> PathBuf
|
||||
where
|
||||
T: AsRef<StorageConfig>
|
||||
{
|
||||
config.as_ref().data_directory.join("data.sqlite")
|
||||
}
|
||||
|
||||
fn get_cache_file_path<T>(config: &T) -> PathBuf
|
||||
where
|
||||
T: AsRef<StorageConfig>
|
||||
{
|
||||
config.as_ref().cache_directory.join("cache.sqlite")
|
||||
}
|
||||
|
||||
fn get_url_unchecked<T>(path: T) -> String
|
||||
where
|
||||
T: AsRef<Path>
|
||||
{
|
||||
"sqlite://".to_string() + path.as_ref().as_os_str().to_str().expect("Failed to convert path to string!") + "?mode=rwc"
|
||||
}
|
||||
|
||||
async fn get_or_init_database_connection_unchecked<T, U>(cell: &OnceCell<DatabaseConnection>, options: T, _: U ) -> &DatabaseConnection
|
||||
where
|
||||
T: Into<ConnectOptions>,
|
||||
U: MigratorTrait
|
||||
{
|
||||
cell.get_or_init(|| async {
|
||||
let db = Database::connect(options.into()).await.unwrap();
|
||||
U::up(&db, None).await.unwrap();
|
||||
db
|
||||
}).await
|
||||
}
|
||||
|
||||
|
||||
pub async fn get_or_init_unchecked<T, U>(&'static self, config: T, _migrator: U) -> DatabaseConnections
|
||||
where
|
||||
T: AsRef<StorageConfig>,
|
||||
U: MigratorTrait,
|
||||
{
|
||||
let data_path = Self::get_data_file_path(&config);
|
||||
if let Some(x) = data_path.parent() {
|
||||
std::fs::create_dir_all(x).expect("Failed to create directory for data database");
|
||||
}
|
||||
let cache_path = Self::get_cache_file_path(&config);
|
||||
if let Some(x) = cache_path.parent() {
|
||||
std::fs::create_dir_all(x).expect("Failed to create directory for cache database");
|
||||
}
|
||||
DatabaseConnections{
|
||||
data: Self::get_or_init_database_connection_unchecked(
|
||||
&self.data,
|
||||
Self::get_url_unchecked(data_path),
|
||||
_migrator
|
||||
).await,
|
||||
cache: Self::get_or_init_database_connection_unchecked(
|
||||
&self.cache,
|
||||
Self::get_url_unchecked(cache_path),
|
||||
CacheMigrator
|
||||
).await,
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub use tests::*;
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::{cache::migration::CacheMigrator, data::migration::DataMigrator, global::CONFIG, tests::{TEST_CONFIG}};
|
||||
|
||||
#[tokio::test]
|
||||
pub async fn get_or_init_database() {
|
||||
DATABASE_CONNECTIONS.get_or_init_unchecked(&*TEST_CONFIG, DataMigrator).await;
|
||||
}
|
||||
}
|
||||
27
core/src/global/iroh_endpoint.rs
Normal file
27
core/src/global/iroh_endpoint.rs
Normal file
|
|
@ -0,0 +1,27 @@
|
|||
use std::sync::OnceLock;
|
||||
|
||||
use iroh::Endpoint;
|
||||
|
||||
pub static IROH_ENDPOINT: GlobalIrohEndpoint = GlobalIrohEndpoint::const_new();
|
||||
|
||||
pub struct GlobalIrohEndpoint {
|
||||
inner: OnceLock<Endpoint>
|
||||
}
|
||||
|
||||
impl GlobalIrohEndpoint {
|
||||
const fn const_new() -> Self {
|
||||
Self {
|
||||
inner: OnceLock::new()
|
||||
}
|
||||
}
|
||||
pub fn get_or_init(&self, endpoint: &Endpoint) -> Endpoint {
|
||||
self.inner.get_or_init(|| endpoint.clone()).clone()
|
||||
}
|
||||
pub fn get(&self) -> Option<Endpoint> {
|
||||
self.inner.get().map(|x| x.clone())
|
||||
}
|
||||
pub fn get_unchecked(&self) -> Endpoint {
|
||||
self.get().expect("Global Iroh Endpoint must be initialized before use")
|
||||
}
|
||||
}
|
||||
|
||||
49
core/src/global/local_database_connection.rs
Normal file
49
core/src/global/local_database_connection.rs
Normal file
|
|
@ -0,0 +1,49 @@
|
|||
use std::{fs::create_dir_all, path::{Path, PathBuf}, sync::OnceLock};
|
||||
|
||||
use rusqlite::Connection;
|
||||
|
||||
use crate::{data::local::migration::migrate, error::Error};
|
||||
|
||||
pub static LOCAL_DATABASE_CONNECTION: GlobalLocalDatabaseConnection = GlobalLocalDatabaseConnection::const_new();
|
||||
|
||||
pub struct GlobalLocalDatabaseConnection {
|
||||
path: OnceLock<PathBuf>
|
||||
}
|
||||
|
||||
fn path_to_connection_or_panic<P>(path: &P) -> Connection
|
||||
where
|
||||
P: AsRef<Path>
|
||||
{
|
||||
Connection::open(path.as_ref()).expect("Failed to open database connection for local data")
|
||||
}
|
||||
|
||||
impl GlobalLocalDatabaseConnection {
|
||||
const fn const_new() -> Self {
|
||||
Self {
|
||||
path: OnceLock::new()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_or_init<P>(&self, path: &P) -> Connection
|
||||
where
|
||||
P: AsRef<Path>,
|
||||
{
|
||||
path_to_connection_or_panic(self.path.get_or_init(|| {
|
||||
let path = path.as_ref();
|
||||
let parent = path.parent().expect("Database path should have parent directory");
|
||||
create_dir_all(parent).expect("Failed to create parent directory of database");
|
||||
let mut conn = path_to_connection_or_panic(&path);
|
||||
migrate(&mut conn).expect("Local database migration should be done correctly");
|
||||
path.to_path_buf()
|
||||
}))
|
||||
}
|
||||
|
||||
pub fn get(&self) -> Option<Connection> {
|
||||
self.path.get().map(|path| {
|
||||
path_to_connection_or_panic(path)
|
||||
})
|
||||
}
|
||||
pub fn get_unchecked(&self) -> Connection {
|
||||
self.get().expect("Global database for local data mulst be initialized before use")
|
||||
}
|
||||
}
|
||||
|
|
@ -1,24 +1,22 @@
|
|||
use std::{any::type_name, collections::HashMap, net::{IpAddr, Ipv4Addr}, path::{Path, PathBuf}, sync::LazyLock};
|
||||
|
||||
use crate::{config::{P2pConfig, PartialP2pConfig, StorageConfig}, error::Error };
|
||||
use libp2p::{swarm::SwarmEvent, Multiaddr, PeerId};
|
||||
use sea_orm::{prelude::*, Database};
|
||||
use sea_orm_migration::MigratorTrait;
|
||||
use crate::{config::{StorageConfig}, error::Error };
|
||||
use tokio::sync::{OnceCell, RwLock, RwLockReadGuard, RwLockWriteGuard};
|
||||
|
||||
mod config;
|
||||
mod iroh_endpoint;
|
||||
mod local_database_connection;
|
||||
|
||||
pub use config::*;
|
||||
mod database_connection;
|
||||
pub use database_connection::*;
|
||||
pub use iroh_endpoint::*;
|
||||
pub use local_database_connection::*;
|
||||
use uuid::{ContextV7, Timestamp, Uuid};
|
||||
|
||||
pub fn generate_uuid() -> Uuid {
|
||||
Uuid::new_v7(Timestamp::now(ContextV7::new()))
|
||||
}
|
||||
|
||||
pub static DEFAULT_LISTEN_IPS: &[IpAddr] = &[IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))];
|
||||
|
||||
|
||||
fn uninitialized_message<T>(var: T) -> String {
|
||||
format!("{} is uninitialized!", &stringify!(var))
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,13 +1,8 @@
|
|||
pub mod cache;
|
||||
pub mod config;
|
||||
pub mod data;
|
||||
pub mod error;
|
||||
pub mod global;
|
||||
pub mod message;
|
||||
pub mod migration;
|
||||
pub mod p2p;
|
||||
pub mod proto;
|
||||
pub mod rpc;
|
||||
#[cfg(any(test, feature="test"))]
|
||||
pub mod tests;
|
||||
pub mod utils;
|
||||
|
|
|
|||
|
|
@ -1,52 +0,0 @@
|
|||
mod node;
|
||||
use serde::{de::DeserializeOwned, Serialize};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{utils::async_convert::{AsyncTryFrom, AsyncTryInto}, error::Error};
|
||||
|
||||
pub trait Message: DeserializeOwned + Sized + Serialize {
|
||||
fn into_writer<W: std::io::Write>(&self, writer: W) -> Result<(), ciborium::ser::Error<std::io::Error>> {
|
||||
ciborium::into_writer(self, writer)
|
||||
}
|
||||
fn into_vec_u8(&self) -> Result<Vec<u8>, ciborium::ser::Error<std::io::Error>> {
|
||||
let mut buf: Vec<u8> = Vec::new();
|
||||
self.into_writer(&mut buf)?;
|
||||
Ok(buf)
|
||||
}
|
||||
fn from_reader<R: std::io::Read>(reader: R) -> Result<Self, ciborium::de::Error<std::io::Error>> {
|
||||
ciborium::from_reader(reader)
|
||||
}
|
||||
}
|
||||
|
||||
pub trait Request<T>: Into<T> + From<T> + AsyncTryInto<Self::Response>
|
||||
where T: Message {
|
||||
type Response: Response<T, Request = Self>;
|
||||
async fn send_p2p(self) -> Result<Self::Response, Error>;
|
||||
}
|
||||
|
||||
pub trait Response<T>: Into<T> + From<T> + AsyncTryFrom<Self::Request>
|
||||
where T: Message{
|
||||
type Request: Request<T, Response = Self>;
|
||||
async fn from_request_with_local(req: Self::Request) -> Result<Self,Error>;
|
||||
async fn from_request_with_p2p(req: Self::Request) -> Result<Self, Error> {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
pub trait FromDatabase {
|
||||
async fn from_storage();
|
||||
}
|
||||
|
||||
|
||||
pub trait P2pRequest<T>: Into<T> + From<T>
|
||||
where T: Message {
|
||||
type P2pResponse: P2pResponse<T, P2pRequest = Self>;
|
||||
async fn send_p2p(&self) -> Result<Self::P2pResponse, crate::p2p::error::P2pError>{
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
pub trait P2pResponse<T>: Into<T> + From<T> + AsyncTryFrom<(Self::P2pRequest)>
|
||||
where T: Message {
|
||||
type P2pRequest: P2pRequest<T, P2pResponse = Self>;
|
||||
async fn try_from_p2p_request(source: Self::P2pRequest) -> Result<Self, crate::p2p::error::P2pError>;
|
||||
}
|
||||
|
|
@ -1,10 +0,0 @@
|
|||
use serde::{Deserialize, Serialize};
|
||||
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize)]
|
||||
pub struct ListTrustedNodeRequest;
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize)]
|
||||
pub struct ListTrustedNodeResponse {
|
||||
node: Vec<crate::data::entity::TrustedNodeModel>
|
||||
}
|
||||
|
|
@ -1,7 +0,0 @@
|
|||
use sea_orm_migration::{prelude::*, schema::*};
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait TableMigration {
|
||||
async fn up<'a>(manager: &'a SchemaManager<'a>) -> Result<(), DbErr> ;
|
||||
async fn down<'a>(manager: &'a SchemaManager<'a>) -> Result<(), DbErr>;
|
||||
}
|
||||
|
|
@ -1,4 +0,0 @@
|
|||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum P2pError {
|
||||
|
||||
}
|
||||
|
|
@ -1,104 +0,0 @@
|
|||
pub mod error;
|
||||
use chrono::Local;
|
||||
use libp2p::{ identity::Keypair, mdns, ping, swarm, Multiaddr, PeerId};
|
||||
use sea_orm::{prelude::DateTimeUtc, ActiveModelTrait, ActiveValue::Set, ColumnTrait, EntityTrait, ModelTrait, QueryFilter};
|
||||
use tracing::{event, Level};
|
||||
|
||||
use crate::{cache::entity::{CachedPeerActiveModel, CachedAddressActiveModel, CachedAddressColumn, CachedAddressEntity, CachedAddressModel, CachedPeerColumn, CachedPeerEntity, CachedPeerModel}, data::value::{MultiaddrValue, PeerIdValue}, error::Error, global::DATABASE_CONNECTIONS};
|
||||
|
||||
#[derive(swarm::NetworkBehaviour)]
|
||||
#[behaviour(to_swarm = "Event")]
|
||||
pub struct Behaviour {
|
||||
pub mdns: mdns::tokio::Behaviour,
|
||||
pub ping: ping::Behaviour,
|
||||
}
|
||||
|
||||
impl TryFrom<&Keypair> for Behaviour {
|
||||
type Error = Error;
|
||||
fn try_from(keypair: &Keypair) -> Result<Self, Error> {
|
||||
Ok(Self {
|
||||
mdns: mdns::tokio::Behaviour::new(
|
||||
mdns::Config::default(),
|
||||
keypair.public().into(),
|
||||
)?,
|
||||
ping: libp2p::ping::Behaviour::new(ping::Config::new()),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum Event {
|
||||
Mdns(mdns::Event),
|
||||
Ping(ping::Event),
|
||||
}
|
||||
|
||||
impl Event {
|
||||
pub async fn run(&self)
|
||||
{
|
||||
match self {
|
||||
Self::Mdns(x) => {
|
||||
match x {
|
||||
mdns::Event::Discovered(e) => {
|
||||
for peer in e.iter() {
|
||||
event!(Level::TRACE, "Peer discovered via mdns: {}, {}", &peer.0, &peer.1);
|
||||
match try_get_or_insert_cached_peer(&peer.0, &peer.1).await {
|
||||
Ok(_) => {},
|
||||
Err(e) => {
|
||||
event!(Level::WARN, "{:?}", e);
|
||||
}
|
||||
};
|
||||
}
|
||||
},
|
||||
_ => {},
|
||||
}
|
||||
},
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
impl From<mdns::Event> for Event {
|
||||
fn from(event: mdns::Event) -> Self {
|
||||
Self::Mdns(event)
|
||||
}
|
||||
}
|
||||
impl From<ping::Event> for Event {
|
||||
fn from(event: ping::Event) -> Self {
|
||||
Self::Ping(event)
|
||||
}
|
||||
}
|
||||
|
||||
async fn try_get_or_insert_cached_peer(peer_id: &PeerId, peer_addr: &Multiaddr) -> Result<(CachedPeerModel, CachedAddressModel), Error> {
|
||||
match (
|
||||
CachedPeerEntity::find().filter(CachedPeerColumn::PeerId.eq(PeerIdValue::from(peer_id.clone()))).one(DATABASE_CONNECTIONS.get_cache_unchecked()).await?,
|
||||
CachedAddressEntity::find().filter(CachedAddressColumn::Multiaddress.eq(MultiaddrValue::from(peer_addr.clone()))).one(DATABASE_CONNECTIONS.get_cache_unchecked()).await?,
|
||||
) {
|
||||
(Some(x), Some(y) ) => {
|
||||
if x.id == y.cached_peer_id {
|
||||
event!(Level::TRACE, "Known peer: {}, {}", peer_id, peer_addr);
|
||||
let mut addr: CachedAddressActiveModel = y.into();
|
||||
addr.updated_at = Set(Local::now().to_utc());
|
||||
let updated = addr.update(DATABASE_CONNECTIONS.get_cache_unchecked()).await?;
|
||||
Ok((x, updated))
|
||||
} else {
|
||||
y.delete(DATABASE_CONNECTIONS.get_cache().expect("Cache database should initialized beforehand!")).await?;
|
||||
Ok((x.clone(), CachedAddressActiveModel::new(x.id, peer_addr.clone()).insert(DATABASE_CONNECTIONS.get_cache_unchecked()).await?))
|
||||
}
|
||||
}
|
||||
(Some(x), None) => {
|
||||
event!(Level::INFO, "New address {} for {}", peer_addr, peer_id);
|
||||
Ok((x.clone(),CachedAddressActiveModel::new(x.id, peer_addr.clone()).insert(DATABASE_CONNECTIONS.get_cache_unchecked()).await?))
|
||||
},
|
||||
(None, x) => {
|
||||
event!(Level::INFO, "Add new peer: {}", peer_id);
|
||||
let inserted = CachedPeerActiveModel::new(peer_id.clone()).insert(DATABASE_CONNECTIONS.get_cache_unchecked()).await?;
|
||||
if let Some(y) = x {
|
||||
event!(Level::INFO, "Remove {} from {}", peer_addr, peer_id);
|
||||
y.delete(DATABASE_CONNECTIONS.get_cache_unchecked()).await?;
|
||||
};
|
||||
event!(Level::INFO, "Add address {} to {}", peer_addr, peer_id);
|
||||
Ok((inserted.clone(), CachedAddressActiveModel::new(inserted.id, peer_addr.clone()).insert(DATABASE_CONNECTIONS.get_cache_unchecked()).await?))
|
||||
},
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
|
|
@ -1,16 +0,0 @@
|
|||
use libp2p::Multiaddr;
|
||||
|
||||
use crate::cache::entity::CachedAddressModel;
|
||||
use crate::utils::utc_to_timestamp;
|
||||
use crate::proto::CachedAddressMessage;
|
||||
|
||||
impl From<&CachedAddressModel> for CachedAddressMessage {
|
||||
fn from(a: &CachedAddressModel) -> Self {
|
||||
Self {
|
||||
number: a.id,
|
||||
created_at: Some(utc_to_timestamp(&a.created_at)),
|
||||
updated_at: Some(utc_to_timestamp(&a.updated_at)),
|
||||
multiaddress: Multiaddr::from(a.multiaddress.clone()).to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,14 +0,0 @@
|
|||
use crate::{cache::entity::{CachedAddressModel, CachedPeerModel}, proto::{CachedAddressMessage, CachedPeerMessage}, utils::utc_to_timestamp};
|
||||
|
||||
impl From<(&CachedPeerModel, &Vec<CachedAddressModel>)> for CachedPeerMessage {
|
||||
fn from(source: (&CachedPeerModel, &Vec<CachedAddressModel>)) -> Self {
|
||||
let (peer, addresses) = source;
|
||||
|
||||
Self {
|
||||
number: peer.id,
|
||||
peer_id: peer.peer_id.to_string(),
|
||||
created_at: Some(utc_to_timestamp(&peer.created_at)),
|
||||
addresses: addresses.iter().map(|x| CachedAddressMessage::from(x)).collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
17
core/src/proto/convert/direct_addr_info_message.rs
Normal file
17
core/src/proto/convert/direct_addr_info_message.rs
Normal file
|
|
@ -0,0 +1,17 @@
|
|||
use iroh::endpoint::DirectAddrInfo;
|
||||
|
||||
use crate::proto::{error::ProtoSerializeError, DirectAddrInfoMessage, LastControlMessage, SourceMessage};
|
||||
|
||||
impl TryFrom<DirectAddrInfo> for DirectAddrInfoMessage {
|
||||
type Error = ProtoSerializeError;
|
||||
fn try_from(value: DirectAddrInfo) -> Result<Self, Self::Error> {
|
||||
Ok(DirectAddrInfoMessage {
|
||||
addr: value.addr.to_string(),
|
||||
latency: value.latency.map(|x| x.try_into()).transpose()?,
|
||||
last_control: value.last_control.map(|x| LastControlMessage::try_from(x)).transpose()?,
|
||||
last_payload: value.last_payload.map(|x| x.try_into()).transpose()?,
|
||||
last_alive: value.last_alive.map(|x| x.try_into()).transpose()?,
|
||||
sources: value.sources.into_iter().map(|x| SourceMessage::try_from(x)).collect::<Result<Vec<SourceMessage>, Self::Error>>()?
|
||||
})
|
||||
}
|
||||
}
|
||||
16
core/src/proto/convert/last_control_message.rs
Normal file
16
core/src/proto/convert/last_control_message.rs
Normal file
|
|
@ -0,0 +1,16 @@
|
|||
use std::time::Duration;
|
||||
|
||||
use iroh::endpoint::ControlMsg;
|
||||
use prost_types::DurationError;
|
||||
|
||||
use crate::proto::{error::ProtoSerializeError, LastControlMessage};
|
||||
|
||||
impl TryFrom<(Duration, ControlMsg)> for LastControlMessage {
|
||||
type Error = ProtoSerializeError;
|
||||
fn try_from(value: (Duration, ControlMsg)) -> Result<Self, Self::Error> {
|
||||
Ok(LastControlMessage {
|
||||
duration: Some(value.0.try_into()?),
|
||||
control_msg: value.1.to_string()
|
||||
})
|
||||
}
|
||||
}
|
||||
8
core/src/proto/convert/mod.rs
Normal file
8
core/src/proto/convert/mod.rs
Normal file
|
|
@ -0,0 +1,8 @@
|
|||
mod direct_addr_info_message;
|
||||
mod last_control_message;
|
||||
mod node_id_message;
|
||||
mod remote_info_iter_request;
|
||||
mod remote_info_message;
|
||||
mod remote_info_request;
|
||||
mod remote_info_response;
|
||||
mod source_message;
|
||||
17
core/src/proto/convert/node_id_message.rs
Normal file
17
core/src/proto/convert/node_id_message.rs
Normal file
|
|
@ -0,0 +1,17 @@
|
|||
use iroh::NodeId;
|
||||
|
||||
use crate::proto::{error::{ProtoDeserializeError, ProtoSerializeError}, NodeIdMessage};
|
||||
|
||||
impl From<NodeId> for NodeIdMessage {
|
||||
fn from(value: NodeId) -> Self {
|
||||
NodeIdMessage { node_id: Vec::from(value.as_bytes()) }
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<NodeIdMessage> for NodeId {
|
||||
type Error = ProtoDeserializeError;
|
||||
fn try_from(value: NodeIdMessage) -> Result<Self, Self::Error> {
|
||||
let slice: [u8; 32] = value.node_id[0..32].try_into()?;
|
||||
Ok(NodeId::from_bytes(&slice)?)
|
||||
}
|
||||
}
|
||||
7
core/src/proto/convert/remote_info_iter_request.rs
Normal file
7
core/src/proto/convert/remote_info_iter_request.rs
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
use crate::proto::RemoteInfoIterRequest;
|
||||
|
||||
impl RemoteInfoIterRequest {
|
||||
pub fn new() -> Self {
|
||||
Self{}
|
||||
}
|
||||
}
|
||||
19
core/src/proto/convert/remote_info_message.rs
Normal file
19
core/src/proto/convert/remote_info_message.rs
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
use iroh::endpoint::RemoteInfo;
|
||||
|
||||
use crate::{error::Error, proto::{error::ProtoSerializeError, DirectAddrInfoMessage, RemoteInfoMessage}};
|
||||
|
||||
impl TryFrom<RemoteInfo> for RemoteInfoMessage {
|
||||
type Error = ProtoSerializeError;
|
||||
fn try_from(value: RemoteInfo) -> Result<Self, Self::Error> {
|
||||
Ok(Self {
|
||||
node_id: Some(value.node_id.into()),
|
||||
relay_url: value.relay_url.map_or(String::from(""), |x| x.relay_url.to_string()),
|
||||
addrs: value.addrs.into_iter()
|
||||
.map(|x| DirectAddrInfoMessage::try_from(x))
|
||||
.collect::<Result<Vec<DirectAddrInfoMessage>,Self::Error>>()?,
|
||||
conn_type: value.conn_type.to_string(),
|
||||
latency: value.latency.map(|x| x.try_into()).transpose()?,
|
||||
last_used: value.last_used.map(|x| x.try_into()).transpose()?,
|
||||
})
|
||||
}
|
||||
}
|
||||
11
core/src/proto/convert/remote_info_request.rs
Normal file
11
core/src/proto/convert/remote_info_request.rs
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
use iroh::NodeId;
|
||||
|
||||
use crate::proto::{error::ProtoDeserializeError, NodeIdMessage, RemoteInfoRequest};
|
||||
|
||||
impl From<NodeIdMessage> for RemoteInfoRequest {
|
||||
fn from(value: NodeIdMessage) -> Self {
|
||||
Self {
|
||||
node_id : Some(value)
|
||||
}
|
||||
}
|
||||
}
|
||||
16
core/src/proto/convert/remote_info_response.rs
Normal file
16
core/src/proto/convert/remote_info_response.rs
Normal file
|
|
@ -0,0 +1,16 @@
|
|||
use crate::{ proto::{RemoteInfoMessage, RemoteInfoResponse}};
|
||||
|
||||
impl From<RemoteInfoMessage> for RemoteInfoResponse {
|
||||
fn from(value: RemoteInfoMessage) -> Self {
|
||||
Self {
|
||||
remote_info: Some(value)
|
||||
}
|
||||
}
|
||||
}
|
||||
impl From<Option<RemoteInfoMessage>> for RemoteInfoResponse {
|
||||
fn from(value: Option<RemoteInfoMessage>) -> Self {
|
||||
Self{
|
||||
remote_info: value,
|
||||
}
|
||||
}
|
||||
}
|
||||
16
core/src/proto/convert/source_message.rs
Normal file
16
core/src/proto/convert/source_message.rs
Normal file
|
|
@ -0,0 +1,16 @@
|
|||
use std::time::Duration;
|
||||
|
||||
use iroh::endpoint::Source;
|
||||
|
||||
use crate::{error::Error, proto::{error::ProtoSerializeError, SourceMessage}};
|
||||
|
||||
impl TryFrom<(Source, Duration)> for SourceMessage {
|
||||
type Error = ProtoSerializeError;
|
||||
fn try_from(src: (Source, Duration)) -> Result<Self, Self::Error> {
|
||||
let (source, duration )= src;
|
||||
Ok(Self {
|
||||
source: source.to_string(),
|
||||
duration: Some(duration.try_into()?),
|
||||
})
|
||||
}
|
||||
}
|
||||
15
core/src/proto/error.rs
Normal file
15
core/src/proto/error.rs
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum ProtoSerializeError {
|
||||
#[error("Duration parse error: {0}")]
|
||||
Duration(#[from] prost_types::DurationError),
|
||||
}
|
||||
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum ProtoDeserializeError {
|
||||
#[error("Missing field: {0}")]
|
||||
MissingField(&'static str),
|
||||
#[error("Signature error: {0}")]
|
||||
Signature(#[from] ed25519_dalek::SignatureError),
|
||||
#[error("slice parse error: {0}")]
|
||||
SliceTryFrom(#[from] std::array::TryFromSliceError),
|
||||
}
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
mod cached_address;
|
||||
mod cached_peer;
|
||||
|
||||
tonic::include_proto!("caretta_sync");
|
||||
mod convert;
|
||||
mod error;
|
||||
mod server;
|
||||
|
||||
tonic::include_proto!("caretta_sync");
|
||||
34
core/src/proto/server.rs
Normal file
34
core/src/proto/server.rs
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
use std::pin::Pin;
|
||||
|
||||
use iroh::{endpoint, Endpoint, NodeId};
|
||||
use tonic::{Response, Request, Status};
|
||||
use tokio_stream::Stream;
|
||||
|
||||
use crate::{global::IROH_ENDPOINT, proto::{error::ProtoDeserializeError, RemoteInfoIterRequest, RemoteInfoMessage, RemoteInfoRequest, RemoteInfoResponse}};
|
||||
|
||||
pub struct CarettaSyncServer{}
|
||||
|
||||
#[tonic::async_trait]
|
||||
impl crate::proto::caretta_sync_server::CarettaSync for CarettaSyncServer {
|
||||
type RemoteInfoIterStream = Pin<Box<dyn Stream<Item = Result<RemoteInfoResponse, Status>> + Send>>;
|
||||
async fn remote_info(&self, request: Request<RemoteInfoRequest>) -> Result<Response<RemoteInfoResponse>, Status> {
|
||||
let node_id = NodeId::try_from(request.into_inner().node_id.ok_or(Status::from_error(Box::new(ProtoDeserializeError::MissingField("node_id"))))?).or_else(|e| {
|
||||
Err(Status::from_error(Box::new(e)))
|
||||
})?;
|
||||
let remote_info: Option<RemoteInfoMessage> = IROH_ENDPOINT.get_unchecked().remote_info(node_id).map(|x| x.try_into()).transpose().or_else(|e| {
|
||||
Err(Status::from_error(Box::new(e)))
|
||||
})?;
|
||||
Ok(Response::new(RemoteInfoResponse::from(remote_info)))
|
||||
}
|
||||
async fn remote_info_iter(&self, _: Request<RemoteInfoIterRequest>)
|
||||
-> Result<Response<Self::RemoteInfoIterStream>, Status> {
|
||||
let iter = IROH_ENDPOINT.get_unchecked().remote_info_iter()
|
||||
.map(|x| {
|
||||
RemoteInfoMessage::try_from(x).map(|x| RemoteInfoResponse::from(x)).or_else(|e| {
|
||||
Err(Status::from_error(Box::new(e)))
|
||||
})
|
||||
});
|
||||
let stream = futures::stream::iter(iter);
|
||||
Ok(Response::new(Box::pin(stream)))
|
||||
}
|
||||
}
|
||||
|
|
@ -1,2 +0,0 @@
|
|||
pub mod service;
|
||||
|
||||
|
|
@ -1,30 +0,0 @@
|
|||
use crate::{cache::entity::{CachedAddressEntity, CachedPeerEntity, CachedPeerModel}, global::{DATABASE_CONNECTIONS}, proto::CachedAddressMessage};
|
||||
use futures::future::join_all;
|
||||
use tonic::{Request, Response, Status};
|
||||
|
||||
use crate::proto::{cached_peer_service_server::{CachedPeerServiceServer}, CachedPeerListRequest, CachedPeerListResponse, CachedPeerMessage};
|
||||
use sea_orm::prelude::*;
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct CachedPeerService {}
|
||||
|
||||
|
||||
|
||||
#[tonic::async_trait]
|
||||
impl crate::proto::cached_peer_service_server::CachedPeerService for CachedPeerService {
|
||||
async fn list(&self, request: Request<CachedPeerListRequest>) -> Result<Response<CachedPeerListResponse>, Status> {
|
||||
println!("Got a request: {:?}", request);
|
||||
|
||||
let reply = CachedPeerListResponse {
|
||||
peers: join_all( CachedPeerEntity::find().all(DATABASE_CONNECTIONS.get_cache_unchecked()).await.or_else(|e| Err(Status::from_error(Box::new(e))))?.iter().map(|x| async move {
|
||||
let addresses = CachedAddressEntity::find()
|
||||
.all(DATABASE_CONNECTIONS.get_cache_unchecked())
|
||||
.await
|
||||
.or_else(|e| Err(Status::from_error(Box::new(e))))?;
|
||||
Ok::<CachedPeerMessage, Status>(CachedPeerMessage::from((x, &addresses)))
|
||||
})).await.into_iter().collect::<Result<Vec<_>,_>>()?,
|
||||
};
|
||||
|
||||
Ok(Response::new(reply))
|
||||
}
|
||||
}
|
||||
|
|
@ -1 +0,0 @@
|
|||
pub mod cached_peer;
|
||||
|
|
@ -1,13 +1,13 @@
|
|||
use crate::{config::{Config, P2pConfig, RpcConfig}, error::Error};
|
||||
use crate::{config::{Config, IrohConfig, RpcConfig}, error::Error};
|
||||
|
||||
pub trait ServerTrait {
|
||||
async fn serve_p2p<T>(config: &T) -> Result<(), Error>
|
||||
where T: AsRef<P2pConfig>;
|
||||
where T: AsRef<IrohConfig>;
|
||||
async fn serve_rpc<T>(config: &T) -> Result<(), Error>
|
||||
where T: AsRef<RpcConfig>;
|
||||
async fn serve_all<T>(config: &T) -> Result<(), Error>
|
||||
where
|
||||
T: AsRef<P2pConfig> + AsRef<RpcConfig> {
|
||||
T: AsRef<IrohConfig> + AsRef<RpcConfig> {
|
||||
tokio::try_join!(
|
||||
Self::serve_p2p(config),
|
||||
Self::serve_rpc(config)
|
||||
|
|
|
|||
|
|
@ -1,10 +1,8 @@
|
|||
use std::{path::PathBuf, sync::LazyLock};
|
||||
|
||||
use sea_orm::{sea_query::{FromValueTuple, IntoValueTuple, ValueType}, ActiveModelBehavior, ActiveModelTrait, ColumnTrait, Condition, DatabaseConnection, EntityTrait, IntoActiveModel, ModelTrait, PrimaryKeyToColumn, PrimaryKeyTrait, Value};
|
||||
use sea_orm::QueryFilter;
|
||||
use tempfile::TempDir;
|
||||
use url::Url;
|
||||
use crate::{ config::{Config, PartialConfig, PartialP2pConfig, PartialRpcConfig, RpcConfig, StorageConfig}, message::Message};
|
||||
use crate::{ config::{Config, PartialConfig, PartialIrohConfig, PartialRpcConfig, RpcConfig, StorageConfig}};
|
||||
|
||||
use serde::{de::DeserializeOwned, Deserialize, Serialize};
|
||||
|
||||
|
|
@ -15,7 +13,7 @@ pub static TEST_CONFIG: LazyLock<Config> = LazyLock::new(|| {
|
|||
|
||||
|
||||
Config {
|
||||
p2p: PartialP2pConfig::default().with_new_private_key().try_into().unwrap(),
|
||||
iroh: PartialIrohConfig::default().with_new_secret_key().try_into().unwrap(),
|
||||
storage: StorageConfig {
|
||||
data_directory: data_dir,
|
||||
cache_directory: cache_dir,
|
||||
|
|
|
|||
|
|
@ -9,7 +9,6 @@ repository.workspace = true
|
|||
[dependencies]
|
||||
bevy.workspace = true
|
||||
caretta-sync = { path = "../..", features = ["bevy"] }
|
||||
libp2p.workspace = true
|
||||
tokio.workspace = true
|
||||
tokio-stream = { version = "0.1.17", features = ["net"] }
|
||||
tonic.workspace = true
|
||||
|
|
|
|||
|
|
@ -10,5 +10,4 @@ repository.workspace = true
|
|||
clap.workspace = true
|
||||
caretta-sync = { path = "../..", features = ["cli", "bevy", "test"] }
|
||||
caretta-sync-example-core.path = "../core"
|
||||
libp2p.workspace = true
|
||||
tokio.workspace = true
|
||||
|
|
|
|||
13
id/Cargo.toml
Normal file
13
id/Cargo.toml
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
[package]
|
||||
name = "caretta-id"
|
||||
edition.workspace = true
|
||||
version = "0.1.0-alpha"
|
||||
description.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
|
||||
[features]
|
||||
|
||||
[dependencies]
|
||||
rand.workspace = true
|
||||
thiserror.workspace = true
|
||||
21
id/README.md
Normal file
21
id/README.md
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
# Carreta ID
|
||||
Random user-friendly id for distubuted system for personal data.
|
||||
## Examples
|
||||
|
||||
- `123` : shortest version
|
||||
- `456-789` : default size, still user freindly and sufficient randomness (for personal data)
|
||||
- `abc-def-ghj` : long version. alphabets except i, l and o are also valid
|
||||
## Specs
|
||||
### Characters
|
||||
|
||||
|
||||
|
||||
## Perpose
|
||||
When I considering implementing IDs for users(not for internal system) to specify items, such as GitHub commit hashes or issue numbers, the following issues arose.
|
||||
|
||||
- Sequential numbers like Git issues are difficult to implement in distributes systems because collitions are unavoidable.
|
||||
- Random number like UUID is too long for users
|
||||
- Short random number like 7-digit commit hash seems good but is is not standardized specification.
|
||||
|
||||
So I decided to make my own ID specifications.
|
||||
|
||||
135
id/src/double.rs
Normal file
135
id/src/double.rs
Normal file
|
|
@ -0,0 +1,135 @@
|
|||
use std::{fmt::Display, str::FromStr};
|
||||
|
||||
use rand::{distributions::Standard, prelude::Distribution, Rng};
|
||||
|
||||
use crate::{utils::is_delimiter, Error, Id, SingleId};
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct DoubleId{
|
||||
inner: (SingleId, SingleId)
|
||||
}
|
||||
|
||||
impl Id for DoubleId{
|
||||
type SizeType = u32;
|
||||
const SIZE: Self::SizeType = (SingleId::SIZE as u32).pow(2);
|
||||
/// ```
|
||||
/// use caretta_id::{Id, DoubleId};
|
||||
/// use std::str::FromStr;
|
||||
///
|
||||
/// assert_eq!(DoubleId::NIL, DoubleId::from_str("000-000").unwrap());
|
||||
/// assert_eq!(DoubleId::NIL, DoubleId::try_from(0).unwrap());
|
||||
/// ```
|
||||
const NIL: Self = Self{
|
||||
inner: (SingleId::NIL, SingleId::NIL)
|
||||
};
|
||||
|
||||
/// ```
|
||||
/// use caretta_id::{Id, DoubleId};
|
||||
/// use std::str::FromStr;
|
||||
///
|
||||
/// assert_eq!(DoubleId::MAX, DoubleId::from_str("zzz-zzz").unwrap());
|
||||
/// assert_eq!(DoubleId::MAX, DoubleId::try_from(1291467968).unwrap());
|
||||
/// ```
|
||||
const MAX: Self = Self{
|
||||
inner: (SingleId::MAX, SingleId::MAX)
|
||||
};
|
||||
|
||||
#[cfg(test)]
|
||||
fn is_valid(&self) -> bool {
|
||||
self.inner.0.is_valid() && self.inner.1.is_valid() && (u32::from(self.clone()) < Self::SIZE)
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for DoubleId {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}-{}", self.inner.0, self.inner.1)
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for DoubleId {
|
||||
type Err = Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
Ok(Self {
|
||||
inner : match s.len() {
|
||||
7 => {
|
||||
let delimiter = s[3..4].chars().next().unwrap();
|
||||
if is_delimiter(delimiter) {
|
||||
Ok((SingleId::from_str(&s[0..3])?,SingleId::from_str(&s[4..7])?))
|
||||
} else {
|
||||
Err(Error::InvalidDelimiter{
|
||||
found: vec![delimiter],
|
||||
raw: s.to_string()
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
6 => {
|
||||
Ok((SingleId::from_str(&s[0..3])?,SingleId::from_str(&s[3..6])?))
|
||||
}
|
||||
x => Err(Error::InvalidLength{
|
||||
expected: (6, 7),
|
||||
found: x,
|
||||
raw: s.to_string()
|
||||
})
|
||||
}?
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl Distribution<DoubleId> for Standard {
|
||||
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> DoubleId {
|
||||
DoubleId {
|
||||
inner: (rng.r#gen(), rng.r#gen())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<u32> for DoubleId {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(value: u32) -> Result<Self, Self::Error> {
|
||||
if value < Self::SIZE {
|
||||
Ok(Self{
|
||||
inner: (
|
||||
SingleId::try_from(u16::try_from(value/(SingleId::SIZE as u32)).unwrap())?,
|
||||
SingleId::try_from(u16::try_from(value % (SingleId::SIZE as u32)).unwrap())?
|
||||
)})
|
||||
} else {
|
||||
Err(Error::OutsideOfRange{
|
||||
expected: Self::SIZE as usize,
|
||||
found: value as usize
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<DoubleId> for u32 {
|
||||
fn from(value: DoubleId) -> Self {
|
||||
u32::from(u16::from(value.inner.0)) * u32::from(SingleId::SIZE) + u32::from(u16::from(value.inner.1))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn assert_random<R>(rand: &mut R)
|
||||
where
|
||||
R: Rng
|
||||
{
|
||||
let id: DoubleId = rand.r#gen();
|
||||
assert!(id.is_valid());
|
||||
assert_eq!(id,DoubleId::from_str(&id.to_string()).unwrap());
|
||||
assert_eq!(id, DoubleId::try_from(u32::from(id.clone())).unwrap())
|
||||
}
|
||||
#[test]
|
||||
fn random_x10() {
|
||||
let mut rng = rand::thread_rng();
|
||||
for _ in 0..10 {
|
||||
assert_random(&mut rng);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
28
id/src/error.rs
Normal file
28
id/src/error.rs
Normal file
|
|
@ -0,0 +1,28 @@
|
|||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum Error {
|
||||
#[error("expected under {expected}, found {found}")]
|
||||
OutsideOfRange{
|
||||
expected: usize,
|
||||
found: usize,
|
||||
},
|
||||
#[error("Invalid chunk: {0}")]
|
||||
InvalidChunk(String),
|
||||
#[error("Length of id expected {} or {} but found {found}: {raw}", .expected.0, .expected.1 )]
|
||||
InvalidLength{
|
||||
expected: (u8, u8),
|
||||
found: usize,
|
||||
raw: String
|
||||
},
|
||||
#[error("Number of chunks expected {expected} but {found}: {raw}")]
|
||||
InvalidLengthOfChunks{
|
||||
expected: u8,
|
||||
found: usize,
|
||||
raw: String,
|
||||
},
|
||||
#[error("Delimiter expected '-' or '_' but '{found:?}' found: {raw}")]
|
||||
InvalidDelimiter{
|
||||
found: Vec<char>,
|
||||
raw: String,
|
||||
}
|
||||
}
|
||||
|
||||
24
id/src/lib.rs
Normal file
24
id/src/lib.rs
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
mod single;
|
||||
mod double;
|
||||
mod error;
|
||||
mod triple;
|
||||
mod utils;
|
||||
|
||||
use rand::Rng;
|
||||
pub use single::*;
|
||||
pub use double::*;
|
||||
pub use triple::*;
|
||||
pub use error::*;
|
||||
|
||||
const DOUBLE_ID_SIZE: u32 = (SingleId::SIZE as u32).pow(2);
|
||||
const TRIPLE_ID_SIZE: u64 = (SingleId::SIZE as u64).pow(3);
|
||||
|
||||
pub trait Id {
|
||||
type SizeType;
|
||||
const NIL: Self;
|
||||
const MAX: Self;
|
||||
const SIZE: Self::SizeType;
|
||||
#[cfg(test)]
|
||||
fn is_valid(&self) -> bool;
|
||||
}
|
||||
|
||||
212
id/src/single.rs
Normal file
212
id/src/single.rs
Normal file
|
|
@ -0,0 +1,212 @@
|
|||
use std::{fmt::Display, str::FromStr};
|
||||
|
||||
use rand::{distributions::{uniform::UniformInt, Standard}, prelude::Distribution, Rng};
|
||||
|
||||
use crate::{error::Error, Id};
|
||||
|
||||
const CHARACTERS: &[u8;33] = b"0123456789abcdefghjkmnpqrstuvwxyz";
|
||||
const BASE: u16 = 33;
|
||||
const SQUARED_BASE: u16 = BASE.pow(2);
|
||||
const CUBED_BASE: u16 = BASE.pow(3);
|
||||
|
||||
fn char_to_u8(c: char) -> Option<u8> {
|
||||
Some(match c {
|
||||
'0' => 0,
|
||||
'1' => 1,
|
||||
'2' => 2,
|
||||
'3' => 3,
|
||||
'4' => 4,
|
||||
'5' => 5,
|
||||
'6' => 6,
|
||||
'7' => 7,
|
||||
'8' => 8,
|
||||
'9' => 9,
|
||||
'a' => 10,
|
||||
'b' => 11,
|
||||
'c' => 12,
|
||||
'd' => 13,
|
||||
'e' => 14,
|
||||
'f' => 15,
|
||||
'g' => 16,
|
||||
'h' => 17,
|
||||
'i' => 1,
|
||||
'j' => 18,
|
||||
'k' => 19,
|
||||
'l' => 1,
|
||||
'm' => 20,
|
||||
'n' => 21,
|
||||
'o' => 0,
|
||||
'p' => 22,
|
||||
'q' => 23,
|
||||
'r' => 24,
|
||||
's' => 25,
|
||||
't' => 26,
|
||||
'u' => 27,
|
||||
'v' => 28,
|
||||
'w' => 29,
|
||||
'x' => 30,
|
||||
'y' => 31,
|
||||
'z' => 32,
|
||||
'A' => 10,
|
||||
'B' => 11,
|
||||
'C' => 12,
|
||||
'D' => 13,
|
||||
'E' => 14,
|
||||
'F' => 15,
|
||||
'G' => 16,
|
||||
'H' => 17,
|
||||
'I' => 1,
|
||||
'J' => 18,
|
||||
'K' => 19,
|
||||
'L' => 1,
|
||||
'M' => 20,
|
||||
'N' => 21,
|
||||
'O' => 0,
|
||||
'P' => 22,
|
||||
'Q' => 23,
|
||||
'R' => 24,
|
||||
'S' => 25,
|
||||
'T' => 26,
|
||||
'U' => 27,
|
||||
'V' => 28,
|
||||
'W' => 29,
|
||||
'X' => 30,
|
||||
'Y' => 31,
|
||||
'Z' => 32,
|
||||
_ => return None
|
||||
})
|
||||
}
|
||||
|
||||
fn str_to_u16(s: &str) -> Result<u16, Error> {
|
||||
if s.len() != 3 {
|
||||
return Err(Error::InvalidChunk(format!("Chunk '{}' is not 3 characters", s)))
|
||||
}
|
||||
let mut buf : [u16;3] = [0;3];
|
||||
for (i, c) in s.chars().enumerate() {
|
||||
buf[i] = BASE.pow((2 - i) as u32) * (char_to_u8(c).ok_or(Error::InvalidChunk(format!("Invalid char: {}", c)))? as u16);
|
||||
}
|
||||
|
||||
Ok(buf.iter().sum())
|
||||
}
|
||||
fn u16_to_string(int: u16) -> Result<String, Error> {
|
||||
if int >= CUBED_BASE {
|
||||
return Err(Error::OutsideOfRange{
|
||||
expected: CUBED_BASE as usize,
|
||||
found: int as usize
|
||||
})
|
||||
}
|
||||
let first_char = char::from(CHARACTERS[usize::try_from(int / SQUARED_BASE).unwrap()]);
|
||||
let second_char = char::from(CHARACTERS[usize::try_from((int % SQUARED_BASE)/ BASE).unwrap()]);
|
||||
let third_char = char::from(CHARACTERS[usize::try_from(int % BASE).unwrap()]);
|
||||
Ok(format!("{}{}{}", first_char, second_char, third_char))
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct SingleId{
|
||||
inner: u16
|
||||
}
|
||||
|
||||
|
||||
impl Id for SingleId {
|
||||
type SizeType = u16;
|
||||
const SIZE: Self::SizeType = CUBED_BASE;
|
||||
|
||||
/// ```
|
||||
/// use caretta_id::{SingleId, Id};
|
||||
/// use std::str::FromStr;
|
||||
///
|
||||
/// assert_eq!(SingleId::NIL, SingleId::from_str("000").unwrap());
|
||||
/// assert_eq!(SingleId::NIL, SingleId::try_from(0).unwrap());
|
||||
/// ```
|
||||
const NIL: SingleId = SingleId{
|
||||
inner: 0
|
||||
};
|
||||
|
||||
/// ```
|
||||
/// use caretta_id::{Id, SingleId};
|
||||
/// use std::str::FromStr;
|
||||
///
|
||||
/// assert_eq!(SingleId::MAX, SingleId::from_str("zzz").unwrap());
|
||||
/// assert_eq!(SingleId::MAX, SingleId::try_from(35936).unwrap());
|
||||
/// ```
|
||||
const MAX: SingleId = SingleId{
|
||||
inner: Self::SIZE-1
|
||||
};
|
||||
|
||||
#[cfg(test)]
|
||||
fn is_valid(&self) -> bool {
|
||||
self.inner < Self::SIZE
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for SingleId {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", u16_to_string(self.inner).unwrap())
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for SingleId {
|
||||
type Err = Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
Ok(Self{
|
||||
inner: str_to_u16(s)?
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Distribution<SingleId> for Standard {
|
||||
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> SingleId {
|
||||
SingleId {
|
||||
inner: rng.gen_range(0..SingleId::SIZE)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<u16> for SingleId {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(value: u16) -> Result<Self, Self::Error> {
|
||||
if value < Self::SIZE {
|
||||
Ok(Self{inner: value})
|
||||
} else {
|
||||
Err(Error::OutsideOfRange{
|
||||
expected: Self::SIZE as usize,
|
||||
found: value as usize
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<SingleId> for u16 {
|
||||
fn from(value: SingleId) -> Self {
|
||||
value.inner
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn assert_random<R>(rand: &mut R)
|
||||
where
|
||||
R: Rng
|
||||
{
|
||||
let chunk: SingleId = rand.r#gen();
|
||||
assert!(chunk.is_valid());
|
||||
let s = chunk.to_string();
|
||||
assert_eq!(chunk,SingleId::from_str(&s).unwrap());
|
||||
let i = u16::from(chunk.clone());
|
||||
assert_eq!(chunk, SingleId::try_from(i).unwrap());
|
||||
}
|
||||
#[test]
|
||||
fn random_x10() {
|
||||
let mut rng = rand::thread_rng();
|
||||
for _ in 0..10 {
|
||||
assert_random(&mut rng);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
145
id/src/triple.rs
Normal file
145
id/src/triple.rs
Normal file
|
|
@ -0,0 +1,145 @@
|
|||
use crate::{utils::is_delimiter, DoubleId, Error, SingleId};
|
||||
|
||||
use std::{fmt::Display, str::FromStr};
|
||||
|
||||
use rand::{distributions::Standard, prelude::Distribution, Rng};
|
||||
|
||||
use crate::Id;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct TripleId {
|
||||
inner: (SingleId, SingleId, SingleId)
|
||||
}
|
||||
|
||||
impl Id for TripleId{
|
||||
type SizeType = u64;
|
||||
const SIZE: Self::SizeType = (SingleId::SIZE as u64).pow(3);
|
||||
/// ```
|
||||
/// use caretta_id::{Id, TripleId};
|
||||
/// use std::str::FromStr;
|
||||
///
|
||||
/// assert_eq!(TripleId::NIL, TripleId::from_str("000-000-000").unwrap());
|
||||
/// assert_eq!(TripleId::NIL, TripleId::try_from(0).unwrap());
|
||||
/// ```
|
||||
const NIL: Self = Self{
|
||||
inner: (SingleId::NIL, SingleId::NIL, SingleId::NIL)
|
||||
};
|
||||
|
||||
/// ```
|
||||
/// use caretta_id::{Id, TripleId};
|
||||
/// use std::str::FromStr;
|
||||
///
|
||||
/// assert_eq!(TripleId::MAX, TripleId::from_str("zzz-zzz-zzz").unwrap());
|
||||
/// assert_eq!(TripleId::MAX, TripleId::try_from(46411484401952).unwrap());
|
||||
/// ```
|
||||
const MAX: Self = Self{
|
||||
inner: (SingleId::MAX, SingleId::MAX, SingleId::MAX)
|
||||
};
|
||||
|
||||
#[cfg(test)]
|
||||
fn is_valid(&self) -> bool {
|
||||
self.inner.0.is_valid() && self.inner.1.is_valid() && self.inner.2.is_valid() && (u64::from(self.clone()) < Self::SIZE)
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for TripleId {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}-{}-{}", self.inner.0, self.inner.1, self.inner.2)
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for TripleId {
|
||||
type Err = Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
Ok(Self {
|
||||
inner : match s.len() {
|
||||
11 => {
|
||||
let delimiter = [
|
||||
s[3..4].chars().next().unwrap(),
|
||||
s[7..8].chars().next().unwrap(),
|
||||
];
|
||||
if is_delimiter(delimiter[0]) && is_delimiter(delimiter[1]) {
|
||||
Ok((SingleId::from_str(&s[0..3])?,SingleId::from_str(&s[4..7])?,SingleId::from_str(&s[8..11])?))
|
||||
} else {
|
||||
Err(Error::InvalidDelimiter{
|
||||
found: Vec::from(delimiter),
|
||||
raw: s.to_string()
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
9 => {
|
||||
Ok((SingleId::from_str(&s[0..3])?,SingleId::from_str(&s[3..6])?,SingleId::from_str(&s[6..9])?))
|
||||
}
|
||||
x => {
|
||||
Err(Self::Err::InvalidLength{
|
||||
expected: (9, 11),
|
||||
found: x,
|
||||
raw: s.to_string()
|
||||
})
|
||||
}
|
||||
} ?
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl Distribution<TripleId> for Standard {
|
||||
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> TripleId {
|
||||
TripleId {
|
||||
inner: (rng.r#gen(), rng.r#gen(), rng.r#gen())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<u64> for TripleId {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(value: u64) -> Result<Self, Self::Error> {
|
||||
if value < Self::SIZE {
|
||||
Ok(Self{
|
||||
inner: (
|
||||
SingleId::try_from(u16::try_from(value / (DoubleId::SIZE as u64)).unwrap())?,
|
||||
SingleId::try_from(u16::try_from((value % (DoubleId::SIZE as u64)) /(SingleId::SIZE as u64)).unwrap())?,
|
||||
SingleId::try_from(u16::try_from(value % (SingleId::SIZE as u64)).unwrap())?
|
||||
)})
|
||||
} else {
|
||||
Err(Error::OutsideOfRange{
|
||||
expected: Self::SIZE as usize,
|
||||
found: value as usize
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<TripleId> for u64 {
|
||||
fn from(value: TripleId) -> Self {
|
||||
(u16::from(value.inner.0) as u64) * (DoubleId::SIZE as u64)
|
||||
+ (u16::from(value.inner.1) as u64) * (SingleId::SIZE as u64)
|
||||
+ (u16::from(value.inner.2) as u64)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn assert_random<R>(rand: &mut R)
|
||||
where
|
||||
R: Rng
|
||||
{
|
||||
let id: TripleId = rand.r#gen();
|
||||
assert!(id.is_valid());
|
||||
assert_eq!(id, TripleId::from_str(&id.to_string()).unwrap());
|
||||
assert_eq!(id, TripleId::try_from(u64::from(id.clone())).unwrap());
|
||||
}
|
||||
#[test]
|
||||
fn random_x10() {
|
||||
let mut rng = rand::thread_rng();
|
||||
for _ in 0..10 {
|
||||
assert_random(&mut rng);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
11
id/src/utils.rs
Normal file
11
id/src/utils.rs
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
use std::str::FromStr;
|
||||
|
||||
use crate::SingleId;
|
||||
|
||||
|
||||
pub fn is_delimiter(c: char) -> bool {
|
||||
match c {
|
||||
'-' | '_' => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
|
@ -18,6 +18,5 @@ syn = { version = "2.0.104", features = ["full"] }
|
|||
[dev-dependencies]
|
||||
chrono.workspace = true
|
||||
caretta-sync-core.workspace = true
|
||||
sea-orm.workspace = true
|
||||
tokio.workspace = true
|
||||
uuid.workspace = true
|
||||
|
|
|
|||
|
|
@ -7,74 +7,6 @@ use quote::{format_ident, quote, ToTokens};
|
|||
use syn::{parse_macro_input, Data, DataStruct, DeriveInput, Expr, ExprTuple, Field, Fields, FieldsNamed, Ident};
|
||||
use derive::*;
|
||||
|
||||
#[proc_macro_derive(SyncableModel, attributes(syncable))]
|
||||
pub fn syncable_model(input: TokenStream) -> TokenStream {
|
||||
let input = parse_macro_input!(input as DeriveInput);
|
||||
let struct_name = input.ident;
|
||||
assert_eq!(format_ident!("{}", struct_name), "Model");
|
||||
let fields = extract_fields(&input.data);
|
||||
let id_snake = extract_unique_field_ident(&fields, "id");
|
||||
let id_camel = Ident::new(&id_snake.to_string().to_upper_camel_case(), Span::call_site());
|
||||
let timestamp_snake = extract_unique_field_ident(&fields, "timestamp");
|
||||
let timestamp_camel = Ident::new(×tamp_snake.to_string().to_upper_camel_case(), Span::call_site());
|
||||
let author_id_snake = extract_unique_field_ident(&fields, "author_id");
|
||||
let author_id_camel = Ident::new(&author_id_snake.to_string().to_upper_camel_case(), Span::call_site());
|
||||
let skips_snake = extract_field_idents(&fields, "skip");
|
||||
let output = quote!{
|
||||
impl SyncableModel for #struct_name {
|
||||
type SyncableEntity = Entity;
|
||||
fn get_id(&self) -> Uuid {
|
||||
self.#id_snake
|
||||
}
|
||||
fn get_timestamp(&self) -> DateTimeUtc {
|
||||
self.#timestamp_snake
|
||||
}
|
||||
fn get_author_id(&self) -> Uuid {
|
||||
self.#author_id_snake
|
||||
}
|
||||
}
|
||||
impl SyncableEntity for Entity {
|
||||
type SyncableModel = Model;
|
||||
type SyncableActiveModel = ActiveModel;
|
||||
type SyncableColumn = Column;
|
||||
}
|
||||
|
||||
impl SyncableActiveModel for ActiveModel {
|
||||
type SyncableEntity = Entity;
|
||||
fn get_id(&self) -> Option<Uuid> {
|
||||
self.#id_snake.try_as_ref().cloned()
|
||||
}
|
||||
fn get_timestamp(&self) -> Option<DateTimeUtc> {
|
||||
self.#timestamp_snake.try_as_ref().cloned()
|
||||
}
|
||||
fn get_author_id(&self) -> Option<Uuid> {
|
||||
self.#author_id_snake.try_as_ref().cloned()
|
||||
}
|
||||
}
|
||||
impl SyncableColumn for Column {
|
||||
fn is_id(&self) -> bool {
|
||||
matches!(self, Column::#id_camel)
|
||||
}
|
||||
fn is_timestamp(&self) -> bool {
|
||||
matches!(self, Column::#timestamp_camel)
|
||||
}
|
||||
fn is_author_id(&self) -> bool {
|
||||
matches!(self, Column::#author_id_camel)
|
||||
}
|
||||
fn should_synced(&self) -> bool {
|
||||
todo!()
|
||||
}
|
||||
fn timestamp_after(timestamp: DateTimeUtc) -> sea_orm::sea_query::expr::SimpleExpr {
|
||||
Column::#timestamp_camel.gte(timestamp)
|
||||
}
|
||||
fn author_id_eq(author_id: Uuid) -> sea_orm::sea_query::expr::SimpleExpr {
|
||||
Column::#author_id_camel.eq(author_id)
|
||||
}
|
||||
|
||||
}
|
||||
};
|
||||
output.into()
|
||||
}
|
||||
fn extract_unique_field_ident<'a>(fields: &'a FieldsNamed, attribute_arg: &'static str) -> &'a Ident {
|
||||
let mut fields = extract_field_idents(fields, attribute_arg);
|
||||
if fields.len() == 1 {
|
||||
|
|
|
|||
|
|
@ -1,35 +0,0 @@
|
|||
use chrono::Local;
|
||||
use sea_orm::{
|
||||
prelude::*,
|
||||
entity::{
|
||||
*,
|
||||
prelude::*
|
||||
}
|
||||
};
|
||||
use caretta_sync_core::data::syncable::*;
|
||||
use caretta_sync_macros::SyncableModel;
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, DeriveEntityModel, SyncableModel)]
|
||||
#[sea_orm(table_name = "syncable")]
|
||||
pub struct Model {
|
||||
#[sea_orm(primary_key, auto_increment = false)]
|
||||
#[syncable(id)]
|
||||
pub id: Uuid,
|
||||
#[sea_orm(indexed)]
|
||||
#[syncable(timestamp)]
|
||||
pub created_at: DateTimeUtc,
|
||||
#[syncable(author_id)]
|
||||
pub created_by: Uuid,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, DeriveRelation, EnumIter)]
|
||||
pub enum Relation{}
|
||||
|
||||
impl ActiveModelBehavior for ActiveModel {}
|
||||
|
||||
#[test]
|
||||
fn test_columns() {
|
||||
assert!(Column::Id.is_id());
|
||||
assert!(Column::CreatedAt.is_timestamp());
|
||||
assert!(Column::CreatedBy.is_author_id());
|
||||
}
|
||||
Loading…
Add table
Reference in a new issue