Compare commits

...

8 commits

64 changed files with 1144 additions and 483 deletions

View file

@ -10,11 +10,13 @@ license = "MIT OR Apache-2.0"
repository = "https://forgejo.fireturlte.net/lazy-supplements"
[workspace.dependencies]
chrono = "0.4.41"
ciborium = "0.2.2"
clap = { version = "4.5.38", features = ["derive"] }
dioxus = { version = "0.6.0", features = [] }
lazy-supplements-core.path = "lazy-supplements-core"
libp2p = { version = "0.55.0", features = ["macros", "mdns", "noise", "ping", "tcp", "tokio", "yamux" ] }
sea-orm = { version = "1.1.11", features = ["sqlx-sqlite", "runtime-tokio-native-tls", "macros", "with-chrono", "with-uuid"] }
sea-orm-migration = { version = "1.1.0", features = ["runtime-tokio-rustls", "sqlx-postgres"] }
serde = { version = "1.0.219", features = ["derive"] }
thiserror = "2.0.12"

View file

@ -6,7 +6,9 @@ edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
clap.workspace = true
dioxus.workspace = true
lazy-supplements-desktop.path = "../../lazy-supplements-desktop"
lazy-supplements-examples-core.path = "../core"
[features]

View file

@ -0,0 +1,16 @@
use clap::{Parser, Subcommand};
use lazy_supplements_desktop::cli::*;
#[derive(Debug, Parser)]
pub struct Cli {
#[command(subcommand)]
command: CliCommand
}
#[derive(Debug, Subcommand)]
pub enum CliCommand {
Config(ConfigCommandArgs),
Device(DeviceCommandArgs),
Log(LogCommandArgs),
Server(ServerCommandArgs),
}

View file

View file

@ -1,3 +1,5 @@
mod cli;
mod ipc;
fn main() {
dioxus::launch(lazy_supplements_examples_core::ui::plain::App);
}

View file

View file

@ -8,18 +8,23 @@ repository.workspace = true
[features]
default = []
desktop = ["dep:clap"]
test = ["dep:tempfile"]
desktop = ["dep:clap", "macros"]
mobile = ["macros"]
macros = ["dep:lazy-supplements-macros"]
test = ["dep:tempfile", "macros"]
[dependencies]
base64 = "0.22.1"
chrono = "0.4.41"
chrono.workspace = true
chrono-tz = "0.10.3"
ciborium.workspace = true
clap = {workspace = true, optional = true}
futures = "0.3.31"
lazy-supplements-macros = { path = "../lazy-supplements-macros", optional = true }
libp2p.workspace = true
sea-orm = { version = "1.1.11", features = ["sqlx-sqlite", "runtime-tokio-native-tls", "macros", "with-chrono", "with-uuid"] }
libp2p-core = { version = "0.43.0", features = ["serde"] }
libp2p-identity = { version = "0.2.11", features = ["ed25519", "peerid", "rand", "serde"] }
sea-orm.workspace = true
sea-orm-migration.workspace = true
serde.workspace = true
tempfile = { version = "3.20.0", optional = true }

View file

@ -10,11 +10,11 @@ use serde::{Deserialize, Serialize};
use crate::data::value::{MultiaddrValue, PeerIdValue};
#[derive(Clone, Debug, PartialEq, DeriveEntityModel)]
#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Deserialize, Serialize)]
#[sea_orm(table_name = "peer")]
pub struct Model {
#[sea_orm(primary_key)]
pub id: u32,
pub id: Uuid,
#[sea_orm(indexed)]
pub created_at: DateTimeUtc,
#[sea_orm(indexed)]

View file

@ -42,7 +42,7 @@ impl TableMigration for Peer {
Table::create()
.table(Self::Table)
.if_not_exists()
.col(pk_auto(Self::Id))
.col(pk_uuid(Self::Id))
.col(string_len(Self::PeerId, 255))
.col(timestamp(Self::CreatedAt))
.col(timestamp(Self::UpdatedAt))

View file

@ -2,4 +2,10 @@
pub enum ConfigError {
#[error("missing config: {0}")]
MissingConfig(String),
#[error("Io error: {0}")]
Io(#[from] std::io::Error),
#[error("Toml Deserialization Error")]
TomlDerialization(#[from] toml::de::Error),
#[error("Toml Serialization Error")]
TomlSerialization(#[from] toml::ser::Error),
}

View file

@ -3,32 +3,31 @@ mod storage;
mod p2p;
use std::path::Path;
use crate::error::Error;
use crate::{utils::{emptiable::Emptiable, mergeable::Mergeable}};
pub use error::ConfigError;
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use tokio::{fs::File, io::{AsyncReadExt, AsyncWriteExt}};
pub use storage::{StorageConfig, PartialStorageConfig};
pub use p2p::{P2pConfig, PartialP2pConfig};
pub trait PartialConfig: Serialize + Sized + DeserializeOwned
{
fn default() -> Self;
fn empty() -> Self;
fn merge(&mut self, other: Self);
pub trait Config: TryFrom<Self::PartialConfig>{
type PartialConfig: PartialConfig<Config = Self>;
}
pub trait PartialConfig: Emptiable + From<Self::Config> + Mergeable {
type Config: Config<PartialConfig = Self>;
}
pub trait BaseConfig: DeserializeOwned + Serialize {
fn new() -> Self;
fn from_toml(s: &str) -> Result<Self, toml::de::Error> {
toml::from_str(s)
}
fn into_toml(&self) -> Result<String, toml::ser::Error> {
toml::to_string(self)
}
fn is_empty(&self) -> bool;
}
pub trait ConfigRoot: DeserializeOwned + Serialize {
fn new() -> Self;
async fn read_or_create<T>(path: T) -> Result<Self, Error>
async fn read_or_create<T>(path: T) -> Result<Self, ConfigError>
where
T: AsRef<Path>
{
@ -37,7 +36,7 @@ pub trait ConfigRoot: DeserializeOwned + Serialize {
}
Self::read_from(&path).await
}
async fn read_from<T>(path:T) -> Result<Self, Error>
async fn read_from<T>(path:T) -> Result<Self, ConfigError>
where
T: AsRef<Path>
{
@ -47,7 +46,7 @@ pub trait ConfigRoot: DeserializeOwned + Serialize {
let config: Self = toml::from_str(&content)?;
Ok(config)
}
async fn write_to<T>(&self, path:T) -> Result<(), Error>
async fn write_to<T>(&self, path:T) -> Result<(), ConfigError>
where
T: AsRef<Path>
{
@ -67,7 +66,7 @@ pub trait ConfigRoot: DeserializeOwned + Serialize {
mod tests {
use serde::{Deserialize, Serialize};
use crate::tests::test_toml_serialize_deserialize;
use crate::{tests::test_toml_serialize_deserialize, utils::{emptiable::Emptiable, mergeable::Mergeable}};
use super::{p2p::{P2pConfig, PartialP2pConfig}, PartialConfig};
@ -77,13 +76,14 @@ mod tests {
p2p: Option<PartialP2pConfig>
}
impl PartialConfig for TestConfig {
impl Default for TestConfig {
fn default() -> Self {
Self {
p2p: Some(PartialP2pConfig::default()),
}
}
}
impl Emptiable for TestConfig {
fn empty() -> Self {
Self {
p2p: None,
@ -93,7 +93,8 @@ mod tests {
fn is_empty(&self) -> bool {
self.p2p.is_none()
}
}
impl Mergeable for TestConfig {
fn merge(&mut self, other: Self) {
if let Some(p2p) = other.p2p {
self.p2p = Some(p2p);

View file

@ -3,7 +3,8 @@ use std::{net::{IpAddr, Ipv4Addr}, ops, path::{Path, PathBuf}};
use base64::{prelude::BASE64_STANDARD, Engine};
#[cfg(feature="desktop")]
use clap::Args;
use libp2p::{identity::{self, DecodingError, Keypair}, noise, ping, tcp, yamux, Swarm};
use futures::StreamExt;
use libp2p::{identity::{self, DecodingError, Keypair}, noise, ping, swarm::SwarmEvent, tcp, yamux, Swarm};
use serde::{Deserialize, Serialize};
use tokio::{fs::File, io::{AsyncReadExt, AsyncWriteExt}};
use tracing_subscriber::EnvFilter;
@ -11,7 +12,7 @@ use tracing_subscriber::EnvFilter;
use crate::{
config::PartialConfig,
error::Error, p2p
error::Error, p2p, utils::{emptiable::Emptiable, mergeable::Mergeable}
};
static DEFAULT_P2P_LISTEN_IPS: &[IpAddr] = &[IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))];
@ -30,7 +31,7 @@ fn base64_to_keypair(base64: &str) -> Result<Keypair, Error> {
Ok(Keypair::from_protobuf_encoding(&vec)?)
}
#[derive(Clone, Debug, Deserialize, Serialize)]
#[derive(Clone, Debug, Deserialize, Serialize,)]
pub struct P2pConfig {
#[serde(with = "keypair_parser")]
pub secret: Keypair,
@ -39,7 +40,7 @@ pub struct P2pConfig {
}
impl P2pConfig {
pub async fn try_into_swarm (self) -> Result<Swarm<p2p::Behaviour>, Error> {
async fn try_into_swarm (self) -> Result<Swarm<p2p::Behaviour>, Error> {
let mut swarm = libp2p::SwarmBuilder::with_existing_identity(self.secret)
.with_tokio()
.with_tcp(
@ -52,6 +53,22 @@ impl P2pConfig {
swarm.listen_on("/ip4/0.0.0.0/tcp/0".parse()?)?;
Ok(swarm)
}
pub async fn launch_swarm(self) -> Result<(), Error>{
let mut swarm = self.try_into_swarm().await?;
loop{
let swarm_event = swarm.select_next_some().await;
tokio::spawn(async move{
match swarm_event {
SwarmEvent::NewListenAddr { address, .. } => println!("Listening on {address:?}"),
SwarmEvent::Behaviour(event) => {
println!("{event:?}");
event.run().await;
},
_ => {}
}
});
}
}
}
impl TryFrom<PartialP2pConfig> for P2pConfig {
@ -96,44 +113,10 @@ pub struct PartialP2pConfig {
pub port: Option<u16>,
}
impl PartialP2pConfig {
pub fn with_new_secret(mut self) -> Self {
self.secret = Some(keypair_to_base64(&Keypair::generate_ed25519()));
self
}
pub async fn read_or_create<T>(path: T) -> Result<Self, Error>
where
T: AsRef<Path>
{
if !path.as_ref().exists() {
Self::empty().write_to(&path).await?;
}
Self::read_from(&path).await
}
pub async fn read_from<T>(path:T) -> Result<Self, Error>
where
T: AsRef<Path>
{
let mut file = File::open(path.as_ref()).await?;
let mut content = String::new();
file.read_to_string(&mut content).await?;
let config: Self = toml::from_str(&content)?;
Ok(config)
}
pub async fn write_to<T>(&self, path:T) -> Result<(), Error>
where
T: AsRef<Path>
{
if !path.as_ref().exists() {
if let Some(x) = path.as_ref().parent() {
std::fs::create_dir_all(x)?;
};
let _ = File::create(&path).await?;
}
let mut file = File::create(&path).await?;
file.write_all(toml::to_string(self)?.as_bytes()).await?;
Ok(())
}
}
impl From<P2pConfig> for PartialP2pConfig {
@ -146,29 +129,7 @@ impl From<P2pConfig> for PartialP2pConfig {
}
}
impl PartialConfig for PartialP2pConfig {
fn empty() -> Self {
Self {
secret: None,
listen_ips: None,
port: None,
}
}
fn is_empty(&self) -> bool {
self.secret.is_none() && self.listen_ips.is_none() && self.port.is_none()
}
fn merge(&mut self, another: Self) {
if let Some(x) = another.secret {
self.secret = Some(x);
};
if let Some(x) = another.listen_ips {
self.listen_ips = Some(x);
};
if let Some(x) = another.port {
self.port = Some(x);
};
}
impl Default for PartialP2pConfig {
fn default() -> Self {
Self {
secret: None,
@ -178,7 +139,33 @@ impl PartialConfig for PartialP2pConfig {
}
}
impl Emptiable for PartialP2pConfig {
fn empty() -> Self {
Self{
secret: None,
listen_ips: None,
port: None
}
}
fn is_empty(&self) -> bool {
self.secret.is_none() && self.listen_ips.is_none() && self.port.is_none()
}
}
impl Mergeable for PartialP2pConfig {
fn merge(&mut self, mut other: Self) {
if let Some(x) = other.secret.take() {
let _ = self.secret.insert(x);
};
if let Some(x) = other.listen_ips.take() {
let _ = self.listen_ips.insert(x);
};
if let Some(x) = other.port.take() {
let _ = self.port.insert(x);
};
}
}
#[cfg(test)]

View file

@ -5,7 +5,7 @@ use clap::Args;
#[cfg(any(test, feature="test"))]
use tempfile::tempdir;
use crate::{config::{ConfigError, PartialConfig}};
use crate::{config::{ConfigError, PartialConfig}, utils::{emptiable::Emptiable, mergeable::Mergeable}};
use libp2p::mdns::Config;
use serde::{Deserialize, Serialize};
@ -67,25 +67,25 @@ impl From<StorageConfig> for PartialStorageConfig {
}
}
impl PartialConfig for PartialStorageConfig {
impl Emptiable for PartialStorageConfig {
fn empty() -> Self {
Self{
Self {
data_directory: None,
cache_directory: None,
cache_directory: None
}
}
fn is_empty(&self) -> bool {
self.data_directory.is_none() && self.cache_directory.is_none()
}
fn default() -> Self {
todo!()
}
fn merge(&mut self, other: Self) {
if let Some(x) = other.data_directory {
self.data_directory = Some(x);
}
if let Some(x) = other.cache_directory {
self.cache_directory = Some(x);
}
}
impl Mergeable for PartialStorageConfig {
fn merge(&mut self, mut other: Self) {
if let Some(x) = other.data_directory.take() {
let _ = self.data_directory.insert(x);
};
if let Some(x) = other.cache_directory.take() {
let _ = self.cache_directory.insert(x);
};
}
}

View file

@ -1,11 +1,11 @@
mod trusted_peer;
mod trusted_node;
mod record_deletion;
pub use trusted_peer::{
ActiveModel as TrustedPeerActiveModel,
Column as TrustedPeerColumn,
Entity as TrustedPeerEntity,
Model as TrustedPeerModel,
pub use trusted_node::{
ActiveModel as TrustedNodeActiveModel,
Column as TrustedNodeColumn,
Entity as TrustedNodeEntity,
Model as TrustedNodeModel,
};
pub use record_deletion::{

View file

@ -4,15 +4,21 @@ use sea_orm::entity::{
prelude::*
};
use serde::{Deserialize, Serialize};
use crate::data::syncable::*;
#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Serialize, Deserialize)]
#[cfg_attr(feature="macros", derive(SyncableModel))]
#[sea_orm(table_name = "record_deletion")]
pub struct Model {
#[sea_orm(primary_key, auto_increment = false)]
#[cfg_attr(feature="macros", syncable(id))]
pub id: Uuid,
#[sea_orm(indexed)]
#[cfg_attr(feature="macros", syncable(timestamp))]
pub created_at: DateTimeUtc,
#[cfg_attr(feature="macros", syncable(author_id))]
pub created_by: Uuid,
pub table_name: String,
pub record_id: Uuid,
}

View file

@ -9,7 +9,7 @@ use crate::data::value::PeerIdValue;
#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Serialize, Deserialize)]
#[sea_orm(table_name = "trusted_peer")]
#[sea_orm(table_name = "trusted_node")]
pub struct Model {
#[sea_orm(primary_key, auto_increment = false)]
pub id: Uuid,

View file

@ -8,20 +8,20 @@ pub struct Migration;
#[async_trait::async_trait]
impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
TrustedPeer::up(manager).await?;
TrustedNode::up(manager).await?;
RecordDeletion::up(manager).await?;
Ok(())
}
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
TrustedPeer::down(manager).await?;
TrustedNode::down(manager).await?;
RecordDeletion::down(manager).await?;
Ok(())
}
}
#[derive(DeriveIden)]
enum TrustedPeer {
enum TrustedNode {
Table,
Id,
CreatedAt,
@ -33,7 +33,7 @@ enum TrustedPeer {
}
#[async_trait::async_trait]
impl TableMigration for TrustedPeer {
impl TableMigration for TrustedNode {
async fn up<'a>(manager: &'a SchemaManager<'a>) -> Result<(), DbErr> {
manager.create_table(
Table::create()

View file

@ -1,9 +1,11 @@
use sea_orm::{*, prelude::*, query::*};
use sea_orm::{prelude::*, query::*, sea_query::SimpleExpr, *};
#[cfg(feature="macros")]
pub use lazy_supplements_macros::SyncableModel;
pub trait SyncableModel: ModelTrait<Entity = Self::SyncableEntity> {
type SyncableEntity: SyncableEntity<SyncableModel = Self>;
fn get_updated_at(&self) -> DateTimeUtc;
fn get_uuid(&self) -> Uuid;
fn get_timestamp(&self) -> DateTimeUtc;
fn get_id(&self) -> Uuid;
fn get_author_id(&self) -> Uuid;
}
pub trait SyncableEntity: EntityTrait<
@ -15,9 +17,17 @@ pub trait SyncableEntity: EntityTrait<
type SyncableActiveModel: SyncableActiveModel<SyncableEntity= Self>;
type SyncableColumn: SyncableColumn;
async fn get_updated_after(date: DateTimeUtc, db: &DatabaseConnection) -> Result<Vec<<Self as EntityTrait>::Model>, SyncableError> {
async fn get_updated(from: DateTimeUtc, db: &DatabaseConnection) -> Result<Vec<<Self as EntityTrait>::Model>, SyncableError> {
let result: Vec<Self::SyncableModel> = <Self as EntityTrait>::find()
.filter(Self::SyncableColumn::updated_at().gte(date))
.filter(Self::SyncableColumn::timestamp_after(from))
.all(db)
.await.unwrap();
Ok(result)
}
async fn get_updated_by(author: Uuid, from: DateTimeUtc, db: &DatabaseConnection) -> Result<Vec<<Self as EntityTrait>::Model>, SyncableError> {
let result: Vec<Self::SyncableModel> = <Self as EntityTrait>::find()
.filter(Self::SyncableColumn::timestamp_after(from))
.filter(Self::SyncableColumn::author_id_eq(author))
.all(db)
.await.unwrap();
Ok(result)
@ -30,15 +40,18 @@ pub trait SyncableEntity: EntityTrait<
pub trait SyncableActiveModel: ActiveModelTrait<Entity = Self::SyncableEntity> {
type SyncableEntity: SyncableEntity<SyncableActiveModel = Self>;
fn get_uuid(&self) -> Option<Uuid>;
fn get_updated_at(&self) -> Option<DateTimeUtc>;
fn get_id(&self) -> Option<Uuid>;
fn get_timestamp(&self) -> Option<DateTimeUtc>;
fn get_author_id(&self) -> Option<Uuid>;
fn try_merge(&mut self, other: <Self::SyncableEntity as SyncableEntity>::SyncableModel) -> Result<(), SyncableError> {
if self.get_uuid().ok_or(SyncableError::MissingField("uuid"))? != other.get_uuid() {
if self.get_id().ok_or(SyncableError::MissingField("uuid"))? != other.get_id() {
return Err(SyncableError::MismatchUuid)
}
if self.get_updated_at().ok_or(SyncableError::MissingField("updated_at"))? < other.get_updated_at() {
if self.get_timestamp().ok_or(SyncableError::MissingField("updated_at"))? < other.get_timestamp() {
for column in <<<Self as ActiveModelTrait>::Entity as EntityTrait>::Column as Iterable>::iter() {
self.take(column).set_if_not_equals(other.get(column));
if column.should_synced(){
self.take(column).set_if_not_equals(other.get(column));
}
}
}
Ok(())
@ -47,10 +60,12 @@ pub trait SyncableActiveModel: ActiveModelTrait<Entity = Self::SyncableEntity> {
}
pub trait SyncableColumn: ColumnTrait {
fn is_uuid(&self) -> bool;
fn is_updated_at(&self) -> bool;
fn updated_at() -> Self;
fn should_not_sync(&self);
fn is_id(&self) -> bool;
fn is_timestamp(&self) -> bool;
fn should_synced(&self) -> bool;
fn timestamp_after(from: DateTimeUtc) -> SimpleExpr;
fn author_id_eq(author_id: Uuid) -> SimpleExpr;
fn is_author_id(&self) -> bool;
}

View file

@ -10,9 +10,9 @@ use sea_orm_migration::MigratorTrait;
use tokio::sync::{OnceCell, RwLock, RwLockReadGuard, RwLockWriteGuard};
mod peers;
pub use peers::PEERS;
pub use peers::*;
mod config;
pub use config::STORAGE_CONFIG;
pub use config::*;
mod database_connection;
pub use database_connection::*;
use uuid::{ContextV7, Timestamp, Uuid};

View file

@ -9,3 +9,4 @@ pub mod migration;
pub mod p2p;
#[cfg(any(test, feature="test"))]
pub mod tests;
pub mod utils;

View file

@ -1,15 +0,0 @@
use serde::{de::DeserializeOwned, Serialize};
pub trait Message: DeserializeOwned + Sized + Serialize {
fn into_writer<W: std::io::Write>(&self, writer: W) -> Result<(), ciborium::ser::Error<std::io::Error>> {
ciborium::into_writer(self, writer)
}
fn into_vec_u8(&self) -> Result<Vec<u8>, ciborium::ser::Error<std::io::Error>> {
let mut buf: Vec<u8> = Vec::new();
self.into_writer(&mut buf)?;
Ok(buf)
}
fn from_reader<R: std::io::Read>(reader: R) -> Result<Self, ciborium::de::Error<std::io::Error>> {
ciborium::from_reader(reader)
}
}

View file

@ -0,0 +1,52 @@
mod node;
use serde::{de::DeserializeOwned, Serialize};
use uuid::Uuid;
use crate::{utils::async_convert::{AsyncTryFrom, AsyncTryInto}, error::Error};
pub trait Message: DeserializeOwned + Sized + Serialize {
fn into_writer<W: std::io::Write>(&self, writer: W) -> Result<(), ciborium::ser::Error<std::io::Error>> {
ciborium::into_writer(self, writer)
}
fn into_vec_u8(&self) -> Result<Vec<u8>, ciborium::ser::Error<std::io::Error>> {
let mut buf: Vec<u8> = Vec::new();
self.into_writer(&mut buf)?;
Ok(buf)
}
fn from_reader<R: std::io::Read>(reader: R) -> Result<Self, ciborium::de::Error<std::io::Error>> {
ciborium::from_reader(reader)
}
}
pub trait Request<T>: Into<T> + From<T> + AsyncTryInto<Self::Response>
where T: Message {
type Response: Response<T, Request = Self>;
async fn send_p2p(self) -> Result<Self::Response, Error>;
}
pub trait Response<T>: Into<T> + From<T> + AsyncTryFrom<Self::Request>
where T: Message{
type Request: Request<T, Response = Self>;
async fn from_request_with_local(req: Self::Request) -> Result<Self,Error>;
async fn from_request_with_p2p(req: Self::Request) -> Result<Self, Error> {
todo!()
}
}
pub trait FromDatabase {
async fn from_storage();
}
pub trait P2pRequest<T>: Into<T> + From<T>
where T: Message {
type P2pResponse: P2pResponse<T, P2pRequest = Self>;
async fn send_p2p(&self) -> Result<Self::P2pResponse, crate::p2p::error::P2pError>{
todo!()
}
}
pub trait P2pResponse<T>: Into<T> + From<T> + AsyncTryFrom<(Self::P2pRequest)>
where T: Message {
type P2pRequest: P2pRequest<T, P2pResponse = Self>;
async fn try_from_p2p_request(source: Self::P2pRequest) -> Result<Self, crate::p2p::error::P2pError>;
}

View file

@ -0,0 +1,10 @@
use serde::{Deserialize, Serialize};
#[derive(Debug, Deserialize, Serialize)]
pub struct ListTrustedNodeRequest;
#[derive(Debug, Deserialize, Serialize)]
pub struct ListTrustedNodeResponse {
node: Vec<crate::data::entity::TrustedNodeModel>
}

View file

@ -0,0 +1,4 @@
#[derive(Debug, thiserror::Error)]
pub enum P2pError {
}

View file

@ -1,3 +1,4 @@
pub mod error;
use libp2p::{ identity::Keypair, mdns, ping, swarm};
use sea_orm::{ActiveModelTrait, ActiveValue::Set, ColumnTrait, EntityTrait, QueryFilter};

View file

@ -0,0 +1,29 @@
pub trait AsyncFrom<T> {
async fn async_from(source: T) -> Self;
}
pub trait AsyncInto<T> {
async fn async_into(self) -> T;
}
impl<T, U> AsyncInto<T> for U
where T: AsyncFrom<U> {
async fn async_into(self) -> T {
T::async_from(self).await
}
}
pub trait AsyncTryFrom<T>: Sized {
type Error: Sized;
async fn async_try_from(source: T) -> Result<Self, Self::Error>;
}
pub trait AsyncTryInto<T>: Sized{
type Error: Sized;
async fn async_try_into(self) -> Result<T, Self::Error>;
}
impl<T, U> AsyncTryInto<T> for U
where T: AsyncTryFrom<U> {
type Error = <T as AsyncTryFrom<U>>::Error;
async fn async_try_into(self) -> Result<T, Self::Error> {
T::async_try_from(self).await
}
}

View file

@ -0,0 +1,53 @@
use std::collections::{HashMap, HashSet};
#[cfg(feature="macros")]
pub use lazy_supplements_macros::Emptiable;
pub trait Emptiable{
fn empty() -> Self;
fn is_empty(&self) -> bool;
}
impl<T> Emptiable for Vec<T> {
fn empty() -> Self {
Self::new()
}
fn is_empty(&self) -> bool {
self.is_empty()
}
}
impl<T> Emptiable for Option<T> {
fn empty() -> Self {
None
}
fn is_empty(&self) -> bool {
self.is_none()
}
}
impl Emptiable for String {
fn empty() -> Self {
String::new()
}
fn is_empty(&self) -> bool {
self.is_empty()
}
}
impl<T, U> Emptiable for HashMap<T, U> {
fn empty() -> Self {
HashMap::new()
}
fn is_empty(&self) -> bool {
self.is_empty()
}
}
impl<T> Emptiable for HashSet<T> {
fn empty() -> Self {
HashSet::new()
}
fn is_empty(&self) -> bool {
self.is_empty()
}
}

View file

@ -0,0 +1,16 @@
#[cfg(feature="macros")]
pub use lazy_supplements_macros::Mergeable;
pub trait Mergeable: Sized {
fn merge(&mut self, other: Self);
}
impl<T> Mergeable for Option<T> {
fn merge(&mut self, mut other: Self) {
match other.take() {
Some(x) => {
let _ = self.insert(x);
},
None => {}
};
}
}

View file

@ -0,0 +1,4 @@
pub mod async_convert;
pub mod emptiable;
pub mod mergeable;
pub mod runnable;

View file

@ -0,0 +1,6 @@
#[cfg(feature="macros")]
pub use lazy_supplements_macros::Runnable;
pub trait Runnable {
async fn run(self);
}

View file

@ -16,10 +16,15 @@ clap.workspace = true
dirs = "6.0.0"
lazy-supplements-core = { workspace = true, features = ["desktop"] }
libp2p.workspace = true
prost = "0.14.1"
serde.workspace = true
thiserror.workspace = true
tokio.workspace = true
tonic = "0.14.0"
uuid.workspace = true
[dev-dependencies]
lazy-supplements-core = {workspace = true, features = ["test"]}
[build-dependencies]
tonic-prost-build = "0.14.0"

View file

@ -0,0 +1,4 @@
fn main() -> Result<(), Box<dyn std::error::Error>> {
tonic_prost_build::compile_protos("proto/lazy_supplements.proto")?;
Ok(())
}

View file

@ -0,0 +1,28 @@
syntax = "proto3";
package lazy_supplements;
enum PeerListOrderBy {
CREATED_AT = 0;
UPDATED_AT = 1;
PEER_ID = 2;
}
service CachedPeerService {
rpc List(CachedPeerListRequest) returns (CachedPeerListResponse);
}
message CachedPeerListRequest {
uint32 start = 1;
uint32 count = 2;
PeerListOrderBy order_by = 3;
}
message CachedPeer {
string peer_id = 1;
repeated string multi_addresss = 2;
}
message CachedPeerListResponse {
repeated CachedPeer peers = 1;
}

View file

@ -0,0 +1,35 @@
use std::{net::IpAddr, path::PathBuf};
use clap::Args;
use lazy_supplements_core::config::{BaseConfig, ConfigError};
use crate::config::{PartialP2pConfig, PartialStorageConfig};
use serde::{Deserialize, Serialize};
use crate::{
config::DesktopBaseConfig,
error::Error,
global::DEFAULT_CONFIG_FILE_PATH
};
#[derive(Args, Clone, Debug)]
pub struct ConfigArgs {
#[arg(short = 'c', long = "config")]
pub file_path: Option<PathBuf>,
#[arg(skip)]
pub file_content: Option<DesktopBaseConfig>,
#[command(flatten)]
pub args: DesktopBaseConfig,
}
impl ConfigArgs {
pub fn get_file_path_or_default(&self) -> PathBuf {
self.file_path.clone().unwrap_or((*DEFAULT_CONFIG_FILE_PATH).clone())
}
pub async fn get_or_read_file_content(&mut self) -> &mut DesktopBaseConfig {
self.file_content.get_or_insert(
DesktopBaseConfig::read_from(self.get_file_path_or_default()).await.unwrap()
)
}
}

View file

@ -0,0 +1,12 @@
use clap::Args;
use libp2p::{Multiaddr, PeerId};
use uuid::Uuid;
#[derive(Args, Clone, Debug)]
#[group(multiple = false, required = true)]
pub struct DeviceArgs {
device_number: Option<u32>,
device_id: Option<Uuid>,
peer_id: Option<PeerId>,
multiaddr: Option<Multiaddr>,
}

View file

@ -0,0 +1,7 @@
mod config;
mod device;
mod peer;
pub use config::ConfigArgs;
pub use device::DeviceArgs;
pub use peer::PeerArgs;

View file

@ -0,0 +1,10 @@
use clap::Args;
use libp2p::{Multiaddr, PeerId};
#[derive(Args, Clone, Debug)]
#[group(multiple = false, required = true)]
pub struct PeerArgs {
cache_number: Option<u32>,
peer_id: Option<PeerId>,
multiaddr: Option<Multiaddr>,
}

View file

@ -1,38 +0,0 @@
use std::{net::IpAddr, path::PathBuf};
use clap::Args;
use lazy_supplements_core::config::{PartialConfig, PartialCoreConfig};
use serde::{Deserialize, Serialize};
use crate::{config::{desktop::PartialDesktopConfig, CoreConfig}, error::Error, global::{DEFAULT_CONFIG_FILE_PATH, DEFAULT_PARTIAL_CORE_CONFIG,}};
#[derive(Args, Clone, Debug)]
pub struct ConfigArgs {
#[arg(long)]
pub config: Option<PathBuf>,
#[command(flatten)]
pub core_config: PartialCoreConfig,
#[command(flatten)]
pub desktop_config: PartialDesktopConfig,
}
impl ConfigArgs {
pub fn get_config_path_or_default(&self) -> PathBuf {
if let Some(x) = self.config.as_ref() {
x.clone()
} else {
DEFAULT_CONFIG_FILE_PATH.to_path_buf()
}
}
pub async fn try_into_partial_core_config(self) -> Result<PartialCoreConfig, Error> {
let mut config = PartialCoreConfig::read_from(self.get_config_path_or_default()).await?;
config.merge(self.core_config.into());
Ok(config)
}
pub async fn try_into_core_config(self) -> Result<CoreConfig, Error> {
let mut config = DEFAULT_PARTIAL_CORE_CONFIG.clone();
config.merge(self.try_into_partial_core_config().await?);
config.try_into()
}
}

View file

@ -0,0 +1,24 @@
use clap::Args;
use crate::utils::runnable::Runnable;
use crate::cli::ConfigArgs;
use crate::cli::PeerArgs;
#[derive(Debug, Args)]
pub struct DeviceAddCommandArgs {
#[command(flatten)]
peer: PeerArgs,
#[arg(short, long)]
passcode: Option<String>,
#[command(flatten)]
config: ConfigArgs
}
impl Runnable for DeviceAddCommandArgs {
async fn run(self) {
todo!()
}
}

View file

@ -0,0 +1,15 @@
use clap::Args;
use crate::utils::runnable::Runnable;
use crate::cli::{ConfigArgs, RunnableCommand};
#[derive(Debug, Args)]
pub struct DeviceListCommandArgs{
#[command(flatten)]
config: ConfigArgs
}
impl Runnable for DeviceListCommandArgs {
async fn run(self) {
todo!()
}
}

View file

@ -0,0 +1,33 @@
mod add;
mod list;
mod ping;
mod remove;
mod scan;
pub use add::DeviceAddCommandArgs;
use crate::utils::runnable::Runnable;
use libp2p::{Multiaddr, PeerId};
pub use list::DeviceListCommandArgs;
pub use ping::DevicePingCommandArgs;
pub use remove::DeviceRemoveCommandArgs;
pub use scan::DeviceScanCommandArgs;
use clap::{Args, Parser, Subcommand};
#[derive(Debug, Args, Runnable)]
pub struct DeviceCommandArgs {
#[command(subcommand)]
#[runnable]
pub command: DeviceSubcommand
}
#[derive(Debug, Subcommand, Runnable)]
pub enum DeviceSubcommand {
Add(DeviceAddCommandArgs),
List(DeviceListCommandArgs),
Ping(DevicePingCommandArgs),
Remove(DeviceRemoveCommandArgs),
Scan(DeviceScanCommandArgs),
}

View file

@ -0,0 +1,17 @@
use clap::Args;
use crate::utils::runnable::Runnable;
use crate::cli::{ConfigArgs, PeerArgs};
#[derive(Debug, Args)]
pub struct DevicePingCommandArgs{
#[command(flatten)]
peer: PeerArgs,
#[command(flatten)]
config: ConfigArgs
}
impl Runnable for DevicePingCommandArgs {
async fn run(self) {
todo!()
}
}

View file

@ -0,0 +1,17 @@
use clap::Args;
use crate::utils::runnable::Runnable;
use crate::cli::{ConfigArgs, DeviceArgs, RunnableCommand};
#[derive(Debug, Args)]
pub struct DeviceRemoveCommandArgs{
#[command(flatten)]
device: DeviceArgs,
#[command(flatten)]
config: ConfigArgs
}
impl Runnable for DeviceRemoveCommandArgs {
async fn run(self) {
todo!()
}
}

View file

@ -0,0 +1,15 @@
use clap::Args;
use crate::utils::runnable::Runnable;
use crate::cli::{ConfigArgs, RunnableCommand};
#[derive(Debug, Args)]
pub struct DeviceScanCommandArgs{
#[command(flatten)]
config: ConfigArgs
}
impl Runnable for DeviceScanCommandArgs {
async fn run(self) {
todo!()
}
}

View file

@ -0,0 +1,4 @@
#[derive(Args, Debug)]
pub struct LogsCommandArgs {
}

View file

@ -1,9 +1,13 @@
use std::path::PathBuf;
mod config;
mod node;
mod args;
mod device;
mod server;
pub use config::ConfigArgs;
pub use node::{ NodeArgs, NodeCommand, PeerArgs , ConsoleNodeArgs};
pub use server::ServerArgs;
pub use args::*;
pub use device::*;
pub use server::*;
pub trait RunnableCommand {
async fn run(self);
}

View file

@ -1,81 +0,0 @@
use std::{net::IpAddr, ops::Mul, path::PathBuf, str::FromStr};
use clap::{Args, Parser, Subcommand};
use libp2p::{
multiaddr::Protocol, noise, ping, swarm::SwarmEvent, tcp, yamux, Multiaddr, PeerId
};
use crate::{cli::ServerArgs, error::Error};
use super::ConfigArgs;
#[derive(Debug, Args)]
pub struct NodeArgs {
#[command(subcommand)]
pub command: NodeCommand
}
#[derive(Debug, Parser)]
pub struct ConsoleNodeArgs {
#[command(flatten)]
pub args: NodeArgs,
}
impl NodeArgs {
pub async fn run(self) -> Result<(), Error> {
println!("{self:?}");
Ok(())
}
}
#[derive(Args, Debug)]
pub struct PeerArgs {
#[arg(value_parser = clap::value_parser!(PeerArg))]
pub peer: PeerArg,
}
#[derive(Clone, Debug)]
pub enum PeerArg {
Addr(Multiaddr),
Id(PeerId),
Number(u32),
}
impl FromStr for PeerArg {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
if let Ok(x) = s.parse::<Multiaddr>() {
Ok(Self::Addr(x))
} else if let Ok(x) = s.parse::<PeerId>() {
Ok(Self::Id(x))
} else if let Ok(x) = s.parse::<u32>() {
Ok(Self::Number(x))
} else {
Err(format!("Invalid value: {s}").to_string())
}
}
}
#[derive(Args, Debug)]
pub struct NodeJoinArgs {
#[command(flatten)]
pub peer: PeerArgs,
pub pass: Option<String>,
}
#[derive(Debug, Subcommand)]
pub enum NodeCommand {
Add(PeerArgs),
Ping(PeerArgs),
Join(PeerArgs),
List,
Delete(PeerArgs),
}
impl PeerArgs {
pub async fn run(self) -> Result<(), Error> {
println!("{self:?}");
todo!()
}
}

View file

@ -1,18 +1,18 @@
use clap::Args;
use lazy_supplements_core::utils::runnable::Runnable;
use libp2p::{noise, ping, swarm::{NetworkBehaviour, SwarmEvent}, tcp, yamux, Swarm};
use crate::{error::Error, global::GLOBAL};
use crate::{error::Error, global::P2P_CONFIG};
use super::ConfigArgs;
#[derive(Args, Debug)]
pub struct ServerArgs {
pub struct ServerCommandArgs {
#[command(flatten)]
config: ConfigArgs,
}
impl ServerArgs {
pub async fn start_server(self) -> Result<(), Error>{
let _ = crate::global::GLOBAL.get_or_init_core_config(self.config.try_into_core_config().await?).await;
GLOBAL.launch_swarm().await
impl Runnable for ServerCommandArgs {
async fn run(self) {
P2P_CONFIG.get_and_unwrap().clone().launch_swarm();
}
}

View file

@ -1,15 +1,48 @@
#[cfg(unix)]
pub mod unix;
pub mod rpc;
#[cfg(windows)]
pub mod windows;
pub mod desktop;
use clap::Args;
pub use lazy_supplements_core::config::*;
use lazy_supplements_core::utils::{emptiable::Emptiable, mergeable::Mergeable};
use serde::{Deserialize, Serialize};
#[cfg(unix)]
pub use unix::*;
pub use rpc::*;
#[cfg(windows)]
pub use windows::*;
pub use windows::*;
#[derive(Args, Clone, Debug, Deserialize, Emptiable, Mergeable, Serialize)]
pub struct DesktopBaseConfig {
#[command(flatten)]
p2p: PartialP2pConfig,
#[command(flatten)]
storage: PartialStorageConfig,
#[command(flatten)]
rpc: PartialRpcConfig,
}
impl BaseConfig for DesktopBaseConfig {
fn new() -> Self {
Self {
p2p : PartialP2pConfig::empty().with_new_secret(),
storage: PartialStorageConfig::empty(),
rpc: PartialRpcConfig::empty().with_unused_port(),
}
}
}
impl Into<PartialP2pConfig> for &DesktopBaseConfig {
fn into(self) -> PartialP2pConfig {
self.p2p.clone()
}
}
impl Into<PartialStorageConfig> for &DesktopBaseConfig {
fn into(self) -> PartialStorageConfig {
self.storage.clone()
}
}
impl Into<PartialRpcConfig> for &DesktopBaseConfig {
fn into(self) -> PartialRpcConfig {
self.rpc.clone()
}
}

View file

@ -0,0 +1,60 @@
use std::{net::{IpAddr, Ipv4Addr, SocketAddr, TcpListener}, path::PathBuf};
use clap::Args;
use lazy_supplements_core::{config::PartialConfig, utils::{emptiable::Emptiable, mergeable::Mergeable}};
use libp2p::mdns::Config;
use serde::{Deserialize, Serialize};
use crate::config::error::ConfigError;
pub struct RpcConfig {
pub listen_address: IpAddr,
pub port: u16,
}
impl TryFrom<PartialRpcConfig> for RpcConfig {
type Error = ConfigError;
fn try_from(config: PartialRpcConfig) -> Result<Self, Self::Error> {
Ok(Self{
listen_address: config.listen_address.ok_or(ConfigError::MissingConfig("listen_address".to_string()))?,
port: config.port.ok_or(ConfigError::MissingConfig("port".to_string()))?,
})
}
}
#[derive(Args, Clone, Debug, Deserialize, Emptiable, Mergeable, Serialize)]
pub struct PartialRpcConfig {
pub listen_address: Option<IpAddr>,
pub port: Option<u16>,
}
impl PartialRpcConfig {
pub fn with_unused_port(mut self) -> Self {
let listneer = if let Some(x) = self.listen_address {
TcpListener::bind(SocketAddr::new(x,0)).unwrap()
} else {
TcpListener::bind("127.0.0.1:0").unwrap()
};
self.port = Some(listneer.local_addr().unwrap().port());
self
}
}
impl Default for PartialRpcConfig {
fn default() -> Self {
Self{
listen_address: Some(IpAddr::V4(Ipv4Addr::LOCALHOST)),
port: None,
}
}
}
impl From<RpcConfig> for PartialRpcConfig {
fn from(source: RpcConfig) -> Self {
Self {
listen_address: Some(source.listen_address),
port: Some(source.port),
}
}
}

View file

@ -1,48 +0,0 @@
use std::path::PathBuf;
use clap::Args;
use lazy_supplements_core::config::PartialConfig;
use libp2p::mdns::Config;
use serde::{Deserialize, Serialize};
use crate::config::error::ConfigError;
pub struct UnixConfig {
pub socket_path: PathBuf,
}
impl TryFrom<PartialUnixConfig> for UnixConfig {
type Error = ConfigError;
fn try_from(config: PartialUnixConfig) -> Result<Self, Self::Error> {
Ok(Self{
socket_path: config.socket_path.ok_or(ConfigError::MissingConfig("socket_path".to_string()))?
})
}
}
#[derive(Args, Clone, Debug, Deserialize, Serialize)]
pub struct PartialUnixConfig {
pub socket_path: Option<PathBuf>,
}
impl From<UnixConfig> for PartialUnixConfig {
fn from(source: UnixConfig) -> Self {
Self {
socket_path: Some(source.socket_path)
}
}
}
impl PartialConfig for PartialUnixConfig {
fn empty() -> Self {
Self { socket_path: None }
}
fn default() -> Self {
todo!()
}
fn merge(&mut self, other: Self) {
if let Some(x) = other.socket_path {
self.socket_path = Some(x);
};
}
}

View file

@ -1,3 +0,0 @@
pub struct WindowsConfig {
pub pipe_name: String
}

View file

@ -1,6 +1,5 @@
use std::{path::PathBuf, sync::LazyLock};
use lazy_supplements_core::config::PartialCoreConfig;
pub use lazy_supplements_core::global::*;
pub static DEFAULT_DATA_DIR_PATH: LazyLock<PathBuf> = LazyLock::new(|| {
@ -28,123 +27,3 @@ pub static DEFAULT_CONFIG_FILE_PATH: LazyLock<PathBuf> = LazyLock::new(|| {
pub static DEFAULT_DATABASE_FILE_PATH: LazyLock<PathBuf> = LazyLock::new(|| {
DEFAULT_DATA_DIR_PATH.join(&*DEFAULT_DATABASE_FILE_NAME)
});
pub static DEFAULT_PARTIAL_CORE_CONFIG: LazyLock<PartialCoreConfig> = LazyLock::new(|| {
PartialCoreConfig {
secret: None,
listen_ips: Some(DEFAULT_LISTEN_IPS.to_vec()),
port: Some(0),
}
});
pub struct Global {
pub p2p_config: OnceCell<P2pConfig>,
pub main_database: OnceCell<DatabaseConnection>,
pub cache_database: OnceCell<DatabaseConnection>,
pub peers: OnceCell<RwLock<HashMap<PeerId, Multiaddr>>>,
}
impl Global {
pub fn get_p2p_config(&self) -> Option<&P2pConfig> {
self.p2p_config.get()
}
pub async fn get_or_init_p2p_config(&self, config: P2pConfig) -> &P2pConfig {
self.p2p_config.get_or_init(|| async {config}).await
}
pub async fn get_or_init_peers(&self) -> &RwLock<HashMap<PeerId, Multiaddr>> {
self.peers.get_or_init(|| async {
RwLock::new(HashMap::new())
}).await
}
pub async fn read_peers(&self) -> tokio::sync::RwLockReadGuard<'_, HashMap<PeerId, Multiaddr>>{
self.get_or_init_peers().await.read().await
}
pub async fn write_peers(&self) -> tokio::sync::RwLockWriteGuard<'_, HashMap<PeerId, Multiaddr>>{
self.get_or_init_peers().await.write().await
}
pub async fn launch_swarm(&self) -> Result<(), Error> {
let mut swarm = self.get_p2p_config().unwrap().clone().try_into_swarm().await?;
loop{
let swarm_event = swarm.select_next_some().await;
tokio::spawn(async move{
match swarm_event {
SwarmEvent::NewListenAddr { address, .. } => println!("Listening on {address:?}"),
SwarmEvent::Behaviour(event) => {
println!("{event:?}");
event.run().await;
},
_ => {}
}
});
}
}
}
impl GlobalDatabase for Global {
fn get_main_database(&self) -> Option<&DatabaseConnection> {
self.main_database.get()
}
async fn get_or_try_init_main_database<T, U>(&self, path: T, _: U) -> Result<&DatabaseConnection, Error>
where
T: AsRef<Path>,
U: MigratorTrait,
{
let url = "sqlite://".to_string() + path.as_ref().to_str().unwrap() + "?mode=rwc";
Ok(self.main_database.get_or_try_init(|| async {
let db = Database::connect(&url).await?;
U::up(&db, None).await?;
Ok::<DatabaseConnection, DbErr>(db)
}).await?)
}
fn get_cache_database(&self) -> Option<&DatabaseConnection> {
self.cache_database.get()
}
async fn get_or_try_init_cache_database<T, U>(&self, path: T, _: U) -> Result<&DatabaseConnection, Error>
where
T: AsRef<Path>,
U: MigratorTrait,
{
let url = "sqlite://".to_string() + path.as_ref().to_str().unwrap() + "?mode=rwc";
Ok(self.cache_database.get_or_try_init(|| async {
let db = Database::connect(&url).await?;
U::up(&db, None).await?;
Ok::<DatabaseConnection, DbErr>(db)
}).await?)
}
}
#[cfg(test)]
pub use tests::{get_or_init_temporary_main_database, get_or_init_temporary_cache_database};
#[cfg(test)]
pub mod tests {
use std::sync::LazyLock;
use sea_orm_migration::MigratorTrait;
use crate::{global::GLOBAL, cache::migration::CacheMigrator, data::migration::MainMigrator};
use super::*;
pub async fn get_or_init_temporary_main_database() -> &'static DatabaseConnection {
GLOBAL.get_or_try_init_temporary_main_database(MainMigrator).await.unwrap()
}
pub async fn get_or_init_temporary_cache_database() -> &'static DatabaseConnection {
GLOBAL.get_or_try_init_temporary_cache_database(CacheMigrator).await.unwrap()
}
#[tokio::test]
async fn connect_main_database () {
let db = get_or_init_temporary_main_database().await;
assert!(db.ping().await.is_ok());
}
#[tokio::test]
async fn connect_cache_database () {
let db = get_or_init_temporary_cache_database().await;
assert!(db.ping().await.is_ok());
}
}

View file

@ -1,4 +1,4 @@
mod response;
mod request;
pub use response::*;
pub use request::*;
pub use request::*;

View file

@ -2,6 +2,7 @@ pub mod cli;
pub mod config;
pub mod global;
pub mod ipc;
pub mod utils;
pub use lazy_supplements_core::{
cache,
data,

View file

@ -1,27 +0,0 @@
use clap::{Parser, Subcommand};
use lazy_supplements_desktop::{cli::{ConfigArgs, NodeArgs, NodeCommand, ServerArgs}, global::{Global, GLOBAL}, *};
#[derive(Debug, Parser)]
struct Cli {
#[command(subcommand)]
command: Command,
#[command(flatten)]
pub config: ConfigArgs,
}
#[derive(Debug, Subcommand)]
enum Command {
Node(NodeArgs),
Server(ServerArgs),
}
#[tokio::main]
async fn main() {
let cli = Cli::parse();
let _ = GLOBAL.get_or_init_core_config(cli.config.try_into_core_config().await.unwrap()).await;
match cli.command {
Command::Node(x) => x.run().await.unwrap(),
Command::Server(x) => x.start_server().await.unwrap(),
}
}

View file

@ -0,0 +1 @@
pub use lazy_supplements_core::utils::*;

View file

@ -0,0 +1,23 @@
[package]
name = "lazy-supplements-macros"
edition.workspace = true
version.workspace = true
description.workspace = true
license.workspace = true
repository.workspace = true
[lib]
proc-macro = true
[dependencies]
heck = "0.5.0"
proc-macro2 = "1.0.95"
quote = "1.0.40"
syn = { version = "2.0.104", features = ["full"] }
[dev-dependencies]
chrono.workspace = true
lazy-supplements-core.workspace = true
sea-orm.workspace = true
tokio.workspace = true
uuid.workspace = true

View file

@ -0,0 +1,67 @@
use syn::{DataEnum, DataStruct, Fields, FieldsNamed, Ident, Type};
fn extract_fields_named_from_data_struct(data: &DataStruct) -> &FieldsNamed {
match data.fields {
Fields::Named(ref fields) => fields,
_ => panic!("all fields must be named.")
}
}
fn extract_idents_from_fields_named_with_attribute<'a>(fields: &'a FieldsNamed, attribute: &'static str) -> Vec<&'a Ident>{
fields.named.iter()
.filter_map(|field| {
field.attrs.iter()
.find_map(|attr| {
if attr.path().is_ident(attribute) {
field.ident.as_ref()
} else {
None
}
})
}).collect()
}
pub fn extract_idents_and_types_from_data_struct_with_attribute<'a>(data: &'a DataStruct, attribute: &'static str) -> Vec<(Ident, Type)>{
let fields = extract_fields_named_from_data_struct(data);
fields.named.iter().filter_map(|field| {
field.attrs.iter()
.find_map(|attr| {
if attr.path().is_ident(attribute) {
Some((field.ident.clone().unwrap(), field.ty.clone()))
} else {
None
}
})
}).collect()
}
pub fn extract_idents_and_types_from_data_struct<'a>(data: &'a DataStruct) -> Vec<(Ident, Type)>{
let fields = extract_fields_named_from_data_struct(data);
fields.named.iter().map(|x| {(x.ident.clone().unwrap(), x.ty.clone())}).collect()
}
pub fn unwrap_vec_or_panic<T>(mut source: Vec<T>, msg: &'static str) -> T {
if source.len() == 1 {
source.pop().unwrap()
} else {
panic!("{}", msg)
}
}
pub fn extract_idents_and_types_from_enum_struct<'a>(data: &'a DataEnum) -> Vec<(Ident, Type)> {
data.variants.iter().map(|variant| {
let mut fields: Vec<Type> = match &variant.fields {
Fields::Unnamed(fields_unnamed) => {
fields_unnamed.unnamed.iter().map(|x| {
x.ty.clone()
}).collect()
},
_ => panic!("Fields of enum variant must be unnamed!")
};
if fields.len() == 1 {
(variant.ident.clone(), fields.pop().unwrap())
} else {
panic!("Fields of enum variant must be single!")
}
}).collect()
}

View file

@ -0,0 +1,236 @@
mod derive;
use heck::ToUpperCamelCase;
use proc_macro::{self, TokenStream};
use proc_macro2::Span;
use quote::{format_ident, quote, ToTokens};
use syn::{parse_macro_input, Data, DataStruct, DeriveInput, Expr, ExprTuple, Field, Fields, FieldsNamed, Ident};
use derive::*;
#[proc_macro_derive(SyncableModel, attributes(syncable))]
pub fn syncable_model(input: TokenStream) -> TokenStream {
let input = parse_macro_input!(input as DeriveInput);
let struct_name = input.ident;
assert_eq!(format_ident!("{}", struct_name), "Model");
let fields = extract_fields(&input.data);
let id_snake = extract_unique_field_ident(&fields, "id");
let id_camel = Ident::new(&id_snake.to_string().to_upper_camel_case(), Span::call_site());
let timestamp_snake = extract_unique_field_ident(&fields, "timestamp");
let timestamp_camel = Ident::new(&timestamp_snake.to_string().to_upper_camel_case(), Span::call_site());
let author_id_snake = extract_unique_field_ident(&fields, "author_id");
let author_id_camel = Ident::new(&author_id_snake.to_string().to_upper_camel_case(), Span::call_site());
let skips_snake = extract_field_idents(&fields, "skip");
let output = quote!{
impl SyncableModel for #struct_name {
type SyncableEntity = Entity;
fn get_id(&self) -> Uuid {
self.#id_snake
}
fn get_timestamp(&self) -> DateTimeUtc {
self.#timestamp_snake
}
fn get_author_id(&self) -> Uuid {
self.#author_id_snake
}
}
impl SyncableEntity for Entity {
type SyncableModel = Model;
type SyncableActiveModel = ActiveModel;
type SyncableColumn = Column;
}
impl SyncableActiveModel for ActiveModel {
type SyncableEntity = Entity;
fn get_id(&self) -> Option<Uuid> {
self.#id_snake.try_as_ref().cloned()
}
fn get_timestamp(&self) -> Option<DateTimeUtc> {
self.#timestamp_snake.try_as_ref().cloned()
}
fn get_author_id(&self) -> Option<Uuid> {
self.#author_id_snake.try_as_ref().cloned()
}
}
impl SyncableColumn for Column {
fn is_id(&self) -> bool {
matches!(self, Column::#id_camel)
}
fn is_timestamp(&self) -> bool {
matches!(self, Column::#timestamp_camel)
}
fn is_author_id(&self) -> bool {
matches!(self, Column::#author_id_camel)
}
fn should_synced(&self) -> bool {
todo!()
}
fn timestamp_after(timestamp: DateTimeUtc) -> sea_orm::sea_query::expr::SimpleExpr {
Column::#timestamp_camel.gte(timestamp)
}
fn author_id_eq(author_id: Uuid) -> sea_orm::sea_query::expr::SimpleExpr {
Column::#author_id_camel.eq(author_id)
}
}
};
output.into()
}
fn extract_unique_field_ident<'a>(fields: &'a FieldsNamed, attribute_arg: &'static str) -> &'a Ident {
let mut fields = extract_field_idents(fields, attribute_arg);
if fields.len() == 1 {
return fields.pop().unwrap()
} else {
panic!("Model must need one {} field attribute", attribute_arg);
};
}
fn extract_field_idents<'a>(fields: &'a FieldsNamed, attribute_arg: &'static str) -> Vec<&'a Ident>{
fields.named.iter()
.filter_map(|field| {
field.attrs.iter()
.find_map(|attr| {
if attr.path().is_ident("syncable") {
let args: Expr = attr.parse_args().unwrap();
match args {
Expr::Tuple(arg_tupple) => {
arg_tupple.elems.iter()
.find_map(|arg| {
if let Expr::Path(arg_path) = arg {
if arg_path.path.is_ident(attribute_arg) {
Some(field.ident.as_ref().unwrap())
} else {
None
}
} else {
None
}
})
},
Expr::Path(arg_path) => {
if arg_path.path.is_ident(attribute_arg) {
Some(field.ident.as_ref().unwrap())
} else {
None
}
},
_ => None
}
} else {
None
}
})
}).collect()
}
fn extract_fields(data: &Data) -> &FieldsNamed {
match *data {
Data::Struct(ref data) => match data.fields {
Fields::Named(ref fields) => fields,
_ => panic!("all fields must be named.")
},
_ => panic!("struct expected, but got other item."),
}
}
#[proc_macro_derive(Emptiable)]
pub fn emptiable(input: TokenStream) -> TokenStream {
let input = parse_macro_input!(input as DeriveInput);
let type_ident = input.ident;
match input.data {
Data::Struct(ref data) => {
let field_idents = extract_idents_and_types_from_data_struct(data);
let is_empty_iter = field_idents.iter().map(|(ident, type_name)| {
quote!{
<#type_name as Emptiable>::is_empty(&self.#ident)
}
});
let empty_iter = field_idents.iter().map(|(ident, type_name)| {
quote!{
#ident: <#type_name as Emptiable>::empty(),
}
});
quote!{
impl Emptiable for #type_ident {
fn empty() -> Self {
Self {
#(#empty_iter)*
}
}
fn is_empty(&self) -> bool {
#(#is_empty_iter)&&*
}
}
}.into()
}
_ => panic!("struct or expected, but got other type.")
}
}
#[proc_macro_derive(Mergeable)]
pub fn mergeable(input: TokenStream) -> TokenStream {
let input = parse_macro_input!(input as DeriveInput);
let type_ident = input.ident;
match input.data {
Data::Struct(ref data) => {
let field_idents = extract_idents_and_types_from_data_struct(data);
let merge_iter = field_idents.iter().map(|(ident, type_name)| {
quote!{
<#type_name as Mergeable>::merge(&mut self.#ident, other.#ident);
}
});
quote!{
impl Mergeable for #type_ident {
fn merge(&mut self, mut other: Self){
#(#merge_iter)*
}
}
}.into()
}
_ => panic!("struct expected, but got other type.")
}
}
#[proc_macro_derive(Runnable, attributes(runnable))]
pub fn runnable(input: TokenStream) -> TokenStream {
let input = parse_macro_input!(input as DeriveInput);
let type_ident = input.ident;
match input.data {
Data::Struct(ref data) => {
let mut idents = extract_idents_and_types_from_data_struct_with_attribute(data, "runnable");
let (field_ident, field_type) = unwrap_vec_or_panic(idents, "Runnable struct must have one field with runnable attribute");
quote!{
impl Runnable for #type_ident {
async fn run(self) {
<#field_type as Runnable>::run(self.#field_ident).await
}
}
}.into()
}
Data::Enum(ref variants) => {
let quote_vec = extract_idents_and_types_from_enum_struct(&variants);
let quote_iter = quote_vec.iter().map(|(variant_ident, variant_type)|{
quote!{
Self::#variant_ident(x) => <#variant_type as Runnable>::run(x).await,
}
});
quote!{
impl Runnable for #type_ident {
async fn run(self) {
match self {
#(#quote_iter)*
}
}
}
}.into()
},
_ => panic!("struct or enum expected, but got union.")
}
}

View file

@ -0,0 +1,26 @@
use std::collections::{HashMap, HashSet};
use lazy_supplements_core::utils::emptiable::Emptiable;
use lazy_supplements_macros::Emptiable;
#[derive(Debug, PartialEq, Emptiable)]
struct EmptiableStruct{
vec: Vec<u8>,
text: String,
map: HashMap<u8, u8>,
set: HashSet<u8>,
opt: Option<u8>,
}
#[cfg(test)]
fn test() {
use std::hash::Hash;
let empty = EmptiableStruct::empty();
assert_eq!(&empty, &EmptiableStruct{
vec: Vec::new(),
text: String::new(),
map: HashMap::new(),
set: HashSet::new(),
opt: None,
})
}

View file

@ -0,0 +1,32 @@
use std::collections::{HashMap, HashSet};
use lazy_supplements_core::utils::mergeable::Mergeable;
use lazy_supplements_macros::Mergeable;
#[derive(Clone, Debug, PartialEq, Mergeable)]
struct MergeableStruct {
opt: Option<u8>,
}
#[cfg(test)]
fn test() {
let zero = MergeableStruct{
opt: Some(0),
};
let one = MergeableStruct {
opt: Some(1),
};
let none = MergeableStruct{
opt: None,
};
let mut zero_with_one = zero.clone();
zero_with_one.merge(one.clone());
let mut none_with_zero = none.clone();
none_with_zero.merge(zero.clone());
let mut zero_with_none = zero.clone();
zero_with_none.merge(none.clone());
assert_eq!(zero_with_one.clone(), one.clone());
assert_eq!(none_with_zero, zero.clone());
assert_eq!(zero_with_none, zero.clone());
}

View file

@ -0,0 +1,32 @@
use lazy_supplements_core::utils::runnable::Runnable;
use lazy_supplements_macros::Runnable;
struct RunnableStruct1;
impl Runnable for RunnableStruct1 {
async fn run(self) {
print!("Run {}", stringify!(RunnableStruct1::run()))
}
}
#[derive(Runnable)]
enum RunnableEnum {
Struct1(RunnableStruct1),
}
#[derive(Runnable)]
struct RunnableStruct2 {
#[runnable]
runnable: RunnableEnum,
}
#[tokio::test]
async fn test() {
let runnable = RunnableStruct2{
runnable: RunnableEnum::Struct1(RunnableStruct1)
};
runnable.run().await;
}

View file

@ -0,0 +1,35 @@
use chrono::Local;
use sea_orm::{
prelude::*,
entity::{
*,
prelude::*
}
};
use lazy_supplements_core::data::syncable::*;
use lazy_supplements_macros::SyncableModel;
#[derive(Clone, Debug, PartialEq, DeriveEntityModel, SyncableModel)]
#[sea_orm(table_name = "syncable")]
pub struct Model {
#[sea_orm(primary_key, auto_increment = false)]
#[syncable(id)]
pub id: Uuid,
#[sea_orm(indexed)]
#[syncable(timestamp)]
pub created_at: DateTimeUtc,
#[syncable(author_id)]
pub created_by: Uuid,
}
#[derive(Copy, Clone, Debug, DeriveRelation, EnumIter)]
pub enum Relation{}
impl ActiveModelBehavior for ActiveModel {}
#[test]
fn test_columns() {
assert!(Column::Id.is_id());
assert!(Column::CreatedAt.is_timestamp());
assert!(Column::CreatedBy.is_author_id());
}