diff --git a/Cargo.toml b/Cargo.toml index 0fdd42a..3b3562c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -44,8 +44,6 @@ clap = { version = "4.5.38", features = ["derive"] } caretta-sync-core.path = "core" futures = { version = "0.3.31", features = ["executor"] } libp2p = { version = "0.55.0", features = ["macros", "mdns", "noise", "ping", "tcp", "tokio", "yamux" ] } -sea-orm = { version = "1.1.11", features = ["sqlx-sqlite", "runtime-tokio-native-tls", "macros", "with-chrono", "with-uuid"] } -sea-orm-migration = { version = "1.1.0", features = ["runtime-tokio-rustls", "sqlx-postgres"] } serde = { version = "1.0.219", features = ["derive"] } thiserror = "2.0.12" tokio = { version = "1.45.0", features = ["macros", "rt", "rt-multi-thread"] } diff --git a/bevy/Cargo.toml b/bevy/Cargo.toml index a6f8fe4..f2d710b 100644 --- a/bevy/Cargo.toml +++ b/bevy/Cargo.toml @@ -10,6 +10,5 @@ repository.workspace = true bevy.workspace = true caretta-sync-core.workspace = true futures.workspace = true -sea-orm.workspace = true tokio.workspace = true tonic.workspace = true \ No newline at end of file diff --git a/cli/Cargo.toml b/cli/Cargo.toml index 306b9a3..9dddd29 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -16,7 +16,6 @@ clap.workspace = true dirs = "6.0.0" caretta-sync-core = { workspace = true, features = ["cli"] } libp2p.workspace = true -sea-orm.workspace = true serde.workspace = true thiserror.workspace = true tokio.workspace = true diff --git a/core/Cargo.toml b/core/Cargo.toml index 286b54c..b3ad9da 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -24,8 +24,6 @@ libp2p-core = { version = "0.43.0", features = ["serde"] } libp2p-identity = { version = "0.2.11", features = ["ed25519", "peerid", "rand", "serde"] } prost = "0.14.1" prost-types = "0.14.1" -sea-orm.workspace = true -sea-orm-migration.workspace = true serde.workspace = true tempfile = { version = "3.20.0", optional = true } thiserror.workspace = true @@ -39,6 +37,7 @@ uuid.workspace = true url.workspace = true sysinfo = "0.37.0" whoami = "1.6.1" +rusqlite = { version = "0.37.0", features = ["bundled"] } [target.'cfg(target_os="android")'.dependencies] jni = "0.21.1" diff --git a/core/src/data/distributed/mod.rs b/core/src/data/distributed/mod.rs new file mode 100644 index 0000000..e69de29 diff --git a/core/src/data/entity/mod.rs b/core/src/data/entity/mod.rs deleted file mode 100644 index 0ae88ea..0000000 --- a/core/src/data/entity/mod.rs +++ /dev/null @@ -1,35 +0,0 @@ -mod trusted_node; -mod record_deletion; - -pub use trusted_node::{ - ActiveModel as TrustedNodeActiveModel, - Column as TrustedNodeColumn, - Entity as TrustedNodeEntity, - Model as TrustedNodeModel, -}; - -pub use record_deletion::{ - ActiveModel as RecordDeletionActiveModel, - Column as RecordDeletionColumn, - Entity as RecordDeletionEntity, - Model as RecordDeletionModel, -}; - -#[cfg(test)] -mod tests { - use crate::{data::{migration::DataMigrator, value::PeerIdValue}, global::{generate_uuid, DATABASE_CONNECTIONS}, tests::TEST_CONFIG}; - - use super::*; - - use libp2p::{identity, PeerId}; - use sea_orm::ActiveModelTrait; - - #[tokio::test] - async fn check_insert() { - let db = DATABASE_CONNECTIONS.get_or_init_unchecked(&*TEST_CONFIG, DataMigrator).await.cache; - - let node = TrustedNodeActiveModel::new(PeerId::random(), "test note".to_owned()).insert(db).await.unwrap(); - let _ = RecordDeletionActiveModel::new(node.id, "test_table".to_string(), generate_uuid()).insert(db).await.unwrap(); - } - -} \ No newline at end of file diff --git a/core/src/data/entity/record_deletion.rs b/core/src/data/entity/record_deletion.rs deleted file mode 100644 index 97f947e..0000000 --- a/core/src/data/entity/record_deletion.rs +++ /dev/null @@ -1,42 +0,0 @@ -use chrono::Local; -use sea_orm::{entity::{ - prelude::*, * -}, sea_query::table}; -use serde::{Deserialize, Serialize}; -use crate::data::syncable::*; - - -#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Serialize, Deserialize)] -#[cfg_attr(feature="macros", derive(SyncableModel))] -#[sea_orm(table_name = "record_deletion")] -pub struct Model { - #[sea_orm(primary_key, auto_increment = false)] - #[cfg_attr(feature="macros", syncable(id))] - pub id: Uuid, - #[sea_orm(indexed)] - #[cfg_attr(feature="macros", syncable(timestamp))] - pub created_at: DateTimeUtc, - #[cfg_attr(feature="macros", syncable(author_id))] - pub created_by: Uuid, - pub table_name: String, - pub record_id: Uuid, -} - -#[derive(Copy, Clone, Debug, DeriveRelation, EnumIter)] -pub enum Relation{} - -impl ActiveModelBehavior for ActiveModel {} - -impl ActiveModel { - pub fn new(node_id: Uuid, table_name: String, record_id: Uuid) -> Self { - let timestamp: DateTimeUtc = Local::now().to_utc(); - Self{ - id: Set(crate::global::generate_uuid()), - created_at: Set(timestamp), - created_by: Set(node_id), - table_name: Set(table_name), - record_id: Set(record_id), - } - } -} - diff --git a/core/src/data/entity/trusted_node.rs b/core/src/data/entity/trusted_node.rs deleted file mode 100644 index 5aaec12..0000000 --- a/core/src/data/entity/trusted_node.rs +++ /dev/null @@ -1,49 +0,0 @@ -use chrono::Local; -use libp2p::PeerId; -use sea_orm::entity::{ - *, - prelude::* -}; -use serde::{Deserialize, Serialize}; - -use crate::data::value::PeerIdValue; - - -#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Serialize, Deserialize)] -#[sea_orm(table_name = "trusted_node")] -pub struct Model { - #[sea_orm(primary_key, auto_increment = false)] - pub id: Uuid, - #[sea_orm(indexed)] - pub created_at: DateTimeUtc, - #[sea_orm(indexed)] - pub updated_at: DateTimeUtc, - #[sea_orm(indexed)] - pub synced_at: Option, - #[sea_orm(indexed)] - pub peer_id: PeerIdValue, - #[sea_orm(column_type = "Text")] - pub note: String, - pub is_prefered: bool, -} - -#[derive(Copy, Clone, Debug, DeriveRelation, EnumIter)] -pub enum Relation {} - -impl ActiveModelBehavior for ActiveModel {} - -impl ActiveModel { - pub fn new(peer_id: PeerId, note: String) -> Self { - let timestamp: DateTimeUtc = Local::now().to_utc(); - Self{ - id: Set(crate::global::generate_uuid()), - peer_id: Set(PeerIdValue::from(peer_id)), - created_at: Set(timestamp), - updated_at: Set(timestamp), - synced_at: Set(None), - is_prefered: Set(false), - note: Set(note), - } - } -} - diff --git a/core/src/data/local/migration.rs b/core/src/data/local/migration.rs new file mode 100644 index 0000000..adfc08a --- /dev/null +++ b/core/src/data/local/migration.rs @@ -0,0 +1,45 @@ +use rusqlite::{ffi::Error, Connection}; +use tracing::{event, Level}; + + +pub fn migrate(con: &mut Connection) -> Result<(), Error>{ + let version: u32 = con.pragma_query_value(None,"user_version", |row| row.get(0))?; + if version < 1 { + event!(Level::INFO, "Migrate local db to version 1"); + let tx = con.transaction()?; + tx.execute( + "CREATE TABLE known_peer ( + id INTEGER PRIMARY KEY, + peer_id TEXT UNIQUE NOT NULL, + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL, + )", + () + )?; + tx.execute( + "CREATE TABLE known_address ( + id INTEGER PRIMARY KEY, + known_peer_id INTEGER NOT NULL, + multiaddr TEXT UNIQUE NOT NULL, + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL, + protocol TEXT NOT NULL, + FOREIGN KEY(knwon_peer_id) REFERENCES knwon_peer(id), + )", + () + )?; + tx.execute( + "CREATE TABLE authorized_peer ( + id INTEGER PRIMARY KEY, + known_peer_id INTEGER NOT NULL UNIQUE, + synced_at TEXT, + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL, + FOREIGN KEY(known_peer_id) REFERENCES knwon_peer(id)", + () + )?; + tx.pragma_update(None, "user_version", 1)?; + tx.commit()?; + event!(Level::INFO, "Migration done.") + } +} \ No newline at end of file diff --git a/core/src/data/local/mod.rs b/core/src/data/local/mod.rs new file mode 100644 index 0000000..61dbed5 --- /dev/null +++ b/core/src/data/local/mod.rs @@ -0,0 +1 @@ +pub mod migration; \ No newline at end of file diff --git a/core/src/data/migration/m20220101_000001_create_main_tables.rs b/core/src/data/migration/m20220101_000001_create_main_tables.rs deleted file mode 100644 index 4f231f1..0000000 --- a/core/src/data/migration/m20220101_000001_create_main_tables.rs +++ /dev/null @@ -1,98 +0,0 @@ -use sea_orm_migration::{prelude::*, schema::*}; - -use crate::migration::TableMigration; - -#[derive(DeriveMigrationName)] -pub struct Migration; - -#[async_trait::async_trait] -impl MigrationTrait for Migration { - async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { - TrustedNode::up(manager).await?; - RecordDeletion::up(manager).await?; - Ok(()) - } - - async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { - TrustedNode::down(manager).await?; - RecordDeletion::down(manager).await?; - Ok(()) - } -} - -#[derive(DeriveIden)] -enum TrustedNode { - Table, - Id, - CreatedAt, - UpdatedAt, - SyncedAt, - PeerId, - Note, - IsPrefered, -} - -#[async_trait::async_trait] -impl TableMigration for TrustedNode { - async fn up<'a>(manager: &'a SchemaManager<'a>) -> Result<(), DbErr> { - manager.create_table( - Table::create() - .table(Self::Table) - .if_not_exists() - .col(pk_uuid(Self::Id)) - .col(timestamp(Self::CreatedAt)) - .col(timestamp(Self::UpdatedAt)) - .col(timestamp_null(Self::SyncedAt)) - .col(string_len(Self::PeerId, 255)) - .col(text(Self::Note)) - .col(boolean(Self::IsPrefered)) - .to_owned() - ).await?; - Ok(()) - - - } - async fn down<'a>(manager: &'a SchemaManager<'a>) -> Result<(), DbErr>{ - manager.drop_table(Table::drop().table(Self::Table).to_owned()).await - } -} - -#[derive(DeriveIden, DeriveMigrationName)] -enum RecordDeletion { - Table, - Id, - CreatedAt, - CreatedBy, - TableName, - RecordId, -} - -static FK_RECORD_DELETION_TRUSTED_NODE: &str = "fk_record_deletion_trusted_node"; - -#[async_trait::async_trait] -impl TableMigration for RecordDeletion { - async fn up<'a>(manager: &'a SchemaManager<'a>) -> Result<(), DbErr> { - manager.create_table( - Table::create() - .table(Self::Table) - .if_not_exists() - .col(pk_uuid(Self::Id)) - .col(timestamp_with_time_zone(Self::CreatedAt)) - .col(uuid(Self::CreatedBy)) - .foreign_key(ForeignKey::create() - .name(FK_RECORD_DELETION_TRUSTED_NODE) - .from(Self::Table,Self::CreatedBy) - .to(TrustedNode::Table, TrustedNode::Id) - .on_delete(ForeignKeyAction::Cascade) - .on_update(ForeignKeyAction::Cascade) - ) - .col(string(Self::TableName)) - .col(uuid(Self::RecordId)) - .to_owned() - ).await?; - Ok(()) - } - async fn down<'a>(manager: &'a SchemaManager<'a>) -> Result<(), DbErr>{ - manager.drop_table(Table::drop().table(Self::Table).to_owned()).await - } -} \ No newline at end of file diff --git a/core/src/data/migration/mod.rs b/core/src/data/migration/mod.rs deleted file mode 100644 index f2c5ccb..0000000 --- a/core/src/data/migration/mod.rs +++ /dev/null @@ -1,14 +0,0 @@ -use sea_orm_migration::prelude::*; - -pub mod m20220101_000001_create_main_tables; - -#[cfg(any(test, feature="test"))] -pub struct DataMigrator; - -#[cfg(any(test, feature="test"))] -#[async_trait::async_trait] -impl MigratorTrait for DataMigrator { - fn migrations() -> Vec> { - vec![Box::new(m20220101_000001_create_main_tables::Migration)] - } -} diff --git a/core/src/data/mod.rs b/core/src/data/mod.rs index c1779c9..81545c8 100644 --- a/core/src/data/mod.rs +++ b/core/src/data/mod.rs @@ -1,4 +1,2 @@ -pub mod entity; -pub mod migration; -pub mod syncable; -pub mod value; +pub mod distributed; +pub mod local; diff --git a/core/src/data/syncable.rs b/core/src/data/syncable.rs deleted file mode 100644 index 8096960..0000000 --- a/core/src/data/syncable.rs +++ /dev/null @@ -1,79 +0,0 @@ -use sea_orm::{prelude::*, query::*, sea_query::SimpleExpr, *}; -#[cfg(feature="macros")] -pub use caretta_sync_macros::SyncableModel; -pub trait SyncableModel: ModelTrait { - type SyncableEntity: SyncableEntity; - fn get_timestamp(&self) -> DateTimeUtc; - fn get_id(&self) -> Uuid; - fn get_author_id(&self) -> Uuid; -} - -pub trait SyncableEntity: EntityTrait< - Model = Self::SyncableModel, - ActiveModel = Self::SyncableActiveModel, - Column = Self::SyncableColumn, ->{ - type SyncableModel: SyncableModel + FromQueryResult; - type SyncableActiveModel: SyncableActiveModel; - type SyncableColumn: SyncableColumn; - - async fn get_updated(from: DateTimeUtc, db: &DatabaseConnection) -> Result::Model>, SyncableError> { - let result: Vec = ::find() - .filter(Self::SyncableColumn::timestamp_after(from)) - .all(db) - .await.unwrap(); - Ok(result) - } - async fn get_updated_by(author: Uuid, from: DateTimeUtc, db: &DatabaseConnection) -> Result::Model>, SyncableError> { - let result: Vec = ::find() - .filter(Self::SyncableColumn::timestamp_after(from)) - .filter(Self::SyncableColumn::author_id_eq(author)) - .all(db) - .await.unwrap(); - Ok(result) - } - fn apply_updated(models: Vec<::Model>, db: &DatabaseConnection) { - todo!() - } -} - -pub trait SyncableActiveModel: ActiveModelTrait { - - type SyncableEntity: SyncableEntity; - fn get_id(&self) -> Option; - fn get_timestamp(&self) -> Option; - fn get_author_id(&self) -> Option; - fn try_merge(&mut self, other: ::SyncableModel) -> Result<(), SyncableError> { - if self.get_id().ok_or(SyncableError::MissingField("uuid"))? != other.get_id() { - return Err(SyncableError::MismatchUuid) - } - if self.get_timestamp().ok_or(SyncableError::MissingField("updated_at"))? < other.get_timestamp() { - for column in <<::Entity as EntityTrait>::Column as Iterable>::iter() { - if column.should_synced(){ - self.take(column).set_if_not_equals(other.get(column)); - } - } - } - Ok(()) - } - -} - -pub trait SyncableColumn: ColumnTrait { - fn is_id(&self) -> bool; - fn is_timestamp(&self) -> bool; - fn should_synced(&self) -> bool; - fn timestamp_after(from: DateTimeUtc) -> SimpleExpr; - fn author_id_eq(author_id: Uuid) -> SimpleExpr; - fn is_author_id(&self) -> bool; -} - - -#[derive(Debug, thiserror::Error)] -pub enum SyncableError { - #[error("Invalid UUID")] - MismatchUuid, - #[error("mandatory field {0} is missing")] - MissingField(&'static str), - -} \ No newline at end of file diff --git a/core/src/data/value/mod.rs b/core/src/data/value/mod.rs deleted file mode 100644 index b32b175..0000000 --- a/core/src/data/value/mod.rs +++ /dev/null @@ -1,5 +0,0 @@ -mod multiaddr; -mod peer_id; - -pub use multiaddr::MultiaddrValue; -pub use peer_id::PeerIdValue; \ No newline at end of file diff --git a/core/src/data/value/multiaddr.rs b/core/src/data/value/multiaddr.rs deleted file mode 100644 index f4de622..0000000 --- a/core/src/data/value/multiaddr.rs +++ /dev/null @@ -1,68 +0,0 @@ -use std::str::FromStr; - -use libp2p::Multiaddr; -use sea_orm::{sea_query::ValueTypeErr, DbErr}; -use serde::{Deserialize, Serialize}; - -#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)] -pub struct MultiaddrValue(Multiaddr); - -impl From for MultiaddrValue { - fn from(source: Multiaddr) -> Self { - Self(source) - } -} -impl From for Multiaddr { - fn from(source: MultiaddrValue) -> Self { - source.0 - } -} - -impl From for sea_orm::Value { - fn from(value: MultiaddrValue) -> Self { - Self::from(value.0.to_string()) - } -} - -impl sea_orm::TryGetable for MultiaddrValue { - fn try_get_by(res: &sea_orm::QueryResult, idx: I) - -> std::result::Result { - match ::try_get_by(res, idx){ - Ok(x) => match Multiaddr::from_str(&x) { - Ok(y) => Ok(Self(y)), - Err(_) => Err(DbErr::Type("Multiaddr".to_string()).into()), - }, - Err(x) => Err(x), - } - } -} - -impl sea_orm::sea_query::ValueType for MultiaddrValue { - fn try_from(v: sea_orm::Value) -> std::result::Result { - match ::try_from(v) { - Ok(x) => match Multiaddr::from_str(&x) { - Ok(y) => Ok(Self(y)), - Err(_) => Err(ValueTypeErr{}), - }, - Err(e) => Err(e) - } - } - - fn type_name() -> std::string::String { - stringify!(MultiaddrValue).to_owned() - } - - fn array_type() -> sea_orm::sea_query::ArrayType { - sea_orm::sea_query::ArrayType::String - } - - fn column_type() -> sea_orm::sea_query::ColumnType { - sea_orm::sea_query::ColumnType::Text - } -} - -impl sea_orm::sea_query::Nullable for MultiaddrValue { - fn null() -> sea_orm::Value { - ::null() - } -} \ No newline at end of file diff --git a/core/src/data/value/peer_id.rs b/core/src/data/value/peer_id.rs deleted file mode 100644 index ff106f3..0000000 --- a/core/src/data/value/peer_id.rs +++ /dev/null @@ -1,101 +0,0 @@ -use std::str::FromStr; - -use libp2p::PeerId; -use sea_orm::{sea_query::ValueTypeErr, DbErr}; -use serde::{Deserialize, Serialize}; - -use crate::error::Error; - -#[derive(Clone, Debug, PartialEq)] -pub struct PeerIdValue(PeerId); - -impl<'de> Deserialize<'de> for PeerIdValue { - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de> { - Self::from_str(&String::deserialize(deserializer)?).or(Err(::custom("fail to parse PeerId"))) - - } -} - -impl Serialize for PeerIdValue { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer { - serializer.serialize_str(&self.0.to_string()) - } -} - -impl FromStr for PeerIdValue{ - type Err = libp2p::identity::ParseError; - - fn from_str(s: &str) -> Result { - Ok(Self(PeerId::from_str(s)?)) - } -} - -impl ToString for PeerIdValue { - fn to_string(&self) -> String { - self.0.to_string() - } -} - -impl From for PeerIdValue { - fn from(source: PeerId) -> Self { - Self(source) - } -} -impl From for PeerId { - fn from(source: PeerIdValue) -> Self { - source.0 - } -} - -impl From for sea_orm::Value { - fn from(value: PeerIdValue) -> Self { - Self::from(value.0.to_string()) - } -} - -impl sea_orm::TryGetable for PeerIdValue { - fn try_get_by(res: &sea_orm::QueryResult, idx: I) - -> std::result::Result { - match ::try_get_by(res, idx){ - Ok(x) => match PeerId::from_str(&x) { - Ok(y) => Ok(Self(y)), - Err(_) => Err(DbErr::Type("PeerId".to_string()).into()), - }, - Err(x) => Err(x), - } - } -} - -impl sea_orm::sea_query::ValueType for PeerIdValue { - fn try_from(v: sea_orm::Value) -> std::result::Result { - match ::try_from(v) { - Ok(x) => match PeerId::from_str(&x) { - Ok(y) => Ok(Self(y)), - Err(_) => Err(ValueTypeErr{}), - }, - Err(e) => Err(e) - } - } - - fn type_name() -> std::string::String { - stringify!(PeerIdValue).to_owned() - } - - fn array_type() -> sea_orm::sea_query::ArrayType { - sea_orm::sea_query::ArrayType::String - } - - fn column_type() -> sea_orm::sea_query::ColumnType { - sea_orm::sea_query::ColumnType::Text - } -} - -impl sea_orm::sea_query::Nullable for PeerIdValue { - fn null() -> sea_orm::Value { - ::null() - } -} diff --git a/core/src/message/mod.rs b/core/src/message/mod.rs deleted file mode 100644 index 38ced6a..0000000 --- a/core/src/message/mod.rs +++ /dev/null @@ -1,52 +0,0 @@ -mod node; -use serde::{de::DeserializeOwned, Serialize}; -use uuid::Uuid; - -use crate::{utils::async_convert::{AsyncTryFrom, AsyncTryInto}, error::Error}; - -pub trait Message: DeserializeOwned + Sized + Serialize { - fn into_writer(&self, writer: W) -> Result<(), ciborium::ser::Error> { - ciborium::into_writer(self, writer) - } - fn into_vec_u8(&self) -> Result, ciborium::ser::Error> { - let mut buf: Vec = Vec::new(); - self.into_writer(&mut buf)?; - Ok(buf) - } - fn from_reader(reader: R) -> Result> { - ciborium::from_reader(reader) - } -} - -pub trait Request: Into + From + AsyncTryInto -where T: Message { - type Response: Response; - async fn send_p2p(self) -> Result; - } - -pub trait Response: Into + From + AsyncTryFrom -where T: Message{ - type Request: Request; - async fn from_request_with_local(req: Self::Request) -> Result; - async fn from_request_with_p2p(req: Self::Request) -> Result { - todo!() - } -} - -pub trait FromDatabase { - async fn from_storage(); -} - - -pub trait P2pRequest: Into + From -where T: Message { - type P2pResponse: P2pResponse; - async fn send_p2p(&self) -> Result{ - todo!() - } -} -pub trait P2pResponse: Into + From + AsyncTryFrom<(Self::P2pRequest)> -where T: Message { - type P2pRequest: P2pRequest; - async fn try_from_p2p_request(source: Self::P2pRequest) -> Result; -} \ No newline at end of file diff --git a/core/src/message/node.rs b/core/src/message/node.rs deleted file mode 100644 index 174cbe7..0000000 --- a/core/src/message/node.rs +++ /dev/null @@ -1,10 +0,0 @@ -use serde::{Deserialize, Serialize}; - - -#[derive(Debug, Deserialize, Serialize)] -pub struct ListTrustedNodeRequest; - -#[derive(Debug, Deserialize, Serialize)] -pub struct ListTrustedNodeResponse { - node: Vec -} \ No newline at end of file diff --git a/macros/Cargo.toml b/macros/Cargo.toml index ae30dcc..b6e180a 100644 --- a/macros/Cargo.toml +++ b/macros/Cargo.toml @@ -18,6 +18,5 @@ syn = { version = "2.0.104", features = ["full"] } [dev-dependencies] chrono.workspace = true caretta-sync-core.workspace = true -sea-orm.workspace = true tokio.workspace = true uuid.workspace = true diff --git a/macros/src/lib.rs b/macros/src/lib.rs index 75ac0e9..f41d4a4 100644 --- a/macros/src/lib.rs +++ b/macros/src/lib.rs @@ -7,74 +7,6 @@ use quote::{format_ident, quote, ToTokens}; use syn::{parse_macro_input, Data, DataStruct, DeriveInput, Expr, ExprTuple, Field, Fields, FieldsNamed, Ident}; use derive::*; -#[proc_macro_derive(SyncableModel, attributes(syncable))] -pub fn syncable_model(input: TokenStream) -> TokenStream { - let input = parse_macro_input!(input as DeriveInput); - let struct_name = input.ident; - assert_eq!(format_ident!("{}", struct_name), "Model"); - let fields = extract_fields(&input.data); - let id_snake = extract_unique_field_ident(&fields, "id"); - let id_camel = Ident::new(&id_snake.to_string().to_upper_camel_case(), Span::call_site()); - let timestamp_snake = extract_unique_field_ident(&fields, "timestamp"); - let timestamp_camel = Ident::new(×tamp_snake.to_string().to_upper_camel_case(), Span::call_site()); - let author_id_snake = extract_unique_field_ident(&fields, "author_id"); - let author_id_camel = Ident::new(&author_id_snake.to_string().to_upper_camel_case(), Span::call_site()); - let skips_snake = extract_field_idents(&fields, "skip"); - let output = quote!{ - impl SyncableModel for #struct_name { - type SyncableEntity = Entity; - fn get_id(&self) -> Uuid { - self.#id_snake - } - fn get_timestamp(&self) -> DateTimeUtc { - self.#timestamp_snake - } - fn get_author_id(&self) -> Uuid { - self.#author_id_snake - } - } - impl SyncableEntity for Entity { - type SyncableModel = Model; - type SyncableActiveModel = ActiveModel; - type SyncableColumn = Column; - } - - impl SyncableActiveModel for ActiveModel { - type SyncableEntity = Entity; - fn get_id(&self) -> Option { - self.#id_snake.try_as_ref().cloned() - } - fn get_timestamp(&self) -> Option { - self.#timestamp_snake.try_as_ref().cloned() - } - fn get_author_id(&self) -> Option { - self.#author_id_snake.try_as_ref().cloned() - } - } - impl SyncableColumn for Column { - fn is_id(&self) -> bool { - matches!(self, Column::#id_camel) - } - fn is_timestamp(&self) -> bool { - matches!(self, Column::#timestamp_camel) - } - fn is_author_id(&self) -> bool { - matches!(self, Column::#author_id_camel) - } - fn should_synced(&self) -> bool { - todo!() - } - fn timestamp_after(timestamp: DateTimeUtc) -> sea_orm::sea_query::expr::SimpleExpr { - Column::#timestamp_camel.gte(timestamp) - } - fn author_id_eq(author_id: Uuid) -> sea_orm::sea_query::expr::SimpleExpr { - Column::#author_id_camel.eq(author_id) - } - - } - }; - output.into() -} fn extract_unique_field_ident<'a>(fields: &'a FieldsNamed, attribute_arg: &'static str) -> &'a Ident { let mut fields = extract_field_idents(fields, attribute_arg); if fields.len() == 1 { diff --git a/macros/tests/derive_syncable.rs b/macros/tests/derive_syncable.rs deleted file mode 100644 index 1278081..0000000 --- a/macros/tests/derive_syncable.rs +++ /dev/null @@ -1,35 +0,0 @@ -use chrono::Local; -use sea_orm::{ - prelude::*, - entity::{ - *, - prelude::* - } -}; -use caretta_sync_core::data::syncable::*; -use caretta_sync_macros::SyncableModel; - -#[derive(Clone, Debug, PartialEq, DeriveEntityModel, SyncableModel)] -#[sea_orm(table_name = "syncable")] -pub struct Model { - #[sea_orm(primary_key, auto_increment = false)] - #[syncable(id)] - pub id: Uuid, - #[sea_orm(indexed)] - #[syncable(timestamp)] - pub created_at: DateTimeUtc, - #[syncable(author_id)] - pub created_by: Uuid, -} - -#[derive(Copy, Clone, Debug, DeriveRelation, EnumIter)] -pub enum Relation{} - -impl ActiveModelBehavior for ActiveModel {} - -#[test] -fn test_columns() { - assert!(Column::Id.is_id()); - assert!(Column::CreatedAt.is_timestamp()); - assert!(Column::CreatedBy.is_author_id()); -} \ No newline at end of file