#13 Implement peer list command
This commit is contained in:
parent
ab05bd10cd
commit
0fce680e1e
10 changed files with 63 additions and 15 deletions
|
@ -39,6 +39,8 @@ tracing = "0.1.41"
|
||||||
tracing-subscriber = { version = "0.3.19", features = ["env-filter"] }
|
tracing-subscriber = { version = "0.3.19", features = ["env-filter"] }
|
||||||
uuid.workspace = true
|
uuid.workspace = true
|
||||||
prost-types = "0.14.1"
|
prost-types = "0.14.1"
|
||||||
|
sysinfo = "0.37.0"
|
||||||
|
whoami = "1.6.1"
|
||||||
|
|
||||||
[target.'cfg(target_os="android")'.dependencies]
|
[target.'cfg(target_os="android")'.dependencies]
|
||||||
jni = "0.21.1"
|
jni = "0.21.1"
|
||||||
|
|
|
@ -120,7 +120,7 @@ impl PartialConfig {
|
||||||
pub fn default_desktop(app_name: &'static str) -> Self {
|
pub fn default_desktop(app_name: &'static str) -> Self {
|
||||||
Self {
|
Self {
|
||||||
p2p: Some(PartialP2pConfig::default()),
|
p2p: Some(PartialP2pConfig::default()),
|
||||||
rpc: Some(PartialRpcConfig::default()),
|
rpc: Some(PartialRpcConfig::default(app_name)),
|
||||||
storage: Some(PartialStorageConfig::default(app_name)),
|
storage: Some(PartialStorageConfig::default(app_name)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,10 +30,11 @@ pub struct PartialRpcConfig {
|
||||||
pub socket_path: Option<PathBuf>,
|
pub socket_path: Option<PathBuf>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for PartialRpcConfig {
|
impl PartialRpcConfig {
|
||||||
fn default() -> Self {
|
pub fn default(app_name: &'static str) -> Self {
|
||||||
|
let username = whoami::username();
|
||||||
Self{
|
Self{
|
||||||
socket_path: Some(PathBuf::from_str(DEFAULT_SOCKET_PATH).unwrap()),
|
socket_path: Some(std::env::temp_dir().join(username).join(String::from(app_name) + ".sock")),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
|
|
||||||
|
use dirs::cache_dir;
|
||||||
use sea_orm::{ConnectOptions, Database, DbErr, DatabaseConnection};
|
use sea_orm::{ConnectOptions, Database, DbErr, DatabaseConnection};
|
||||||
use sea_orm_migration::MigratorTrait;
|
use sea_orm_migration::MigratorTrait;
|
||||||
use crate::{cache::migration::CacheMigrator, config::StorageConfig, error::Error};
|
use crate::{cache::migration::CacheMigrator, config::StorageConfig, error::Error};
|
||||||
|
@ -80,15 +81,23 @@ impl GlobalDatabaseConnections {
|
||||||
T: AsRef<StorageConfig>,
|
T: AsRef<StorageConfig>,
|
||||||
U: MigratorTrait,
|
U: MigratorTrait,
|
||||||
{
|
{
|
||||||
|
let data_path = Self::get_data_file_path(&config);
|
||||||
|
if let Some(x) = data_path.parent() {
|
||||||
|
std::fs::create_dir_all(x).expect("Failed to create directory for data database");
|
||||||
|
}
|
||||||
|
let cache_path = Self::get_cache_file_path(&config);
|
||||||
|
if let Some(x) = cache_path.parent() {
|
||||||
|
std::fs::create_dir_all(x).expect("Failed to create directory for cache database");
|
||||||
|
}
|
||||||
DatabaseConnections{
|
DatabaseConnections{
|
||||||
data: Self::get_or_init_database_connection_unchecked(
|
data: Self::get_or_init_database_connection_unchecked(
|
||||||
&self.data,
|
&self.data,
|
||||||
Self::get_url_unchecked(Self::get_data_file_path(&config)),
|
Self::get_url_unchecked(data_path),
|
||||||
_migrator
|
_migrator
|
||||||
).await,
|
).await,
|
||||||
cache: Self::get_or_init_database_connection_unchecked(
|
cache: Self::get_or_init_database_connection_unchecked(
|
||||||
&self.cache,
|
&self.cache,
|
||||||
Self::get_url_unchecked(Self::get_cache_file_path(&config)),
|
Self::get_url_unchecked(cache_path),
|
||||||
CacheMigrator
|
CacheMigrator
|
||||||
).await,
|
).await,
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,7 +3,7 @@ use std::{path::PathBuf, sync::LazyLock};
|
||||||
use sea_orm::{sea_query::{FromValueTuple, IntoValueTuple, ValueType}, ActiveModelBehavior, ActiveModelTrait, ColumnTrait, Condition, DatabaseConnection, EntityTrait, IntoActiveModel, ModelTrait, PrimaryKeyToColumn, PrimaryKeyTrait, Value};
|
use sea_orm::{sea_query::{FromValueTuple, IntoValueTuple, ValueType}, ActiveModelBehavior, ActiveModelTrait, ColumnTrait, Condition, DatabaseConnection, EntityTrait, IntoActiveModel, ModelTrait, PrimaryKeyToColumn, PrimaryKeyTrait, Value};
|
||||||
use sea_orm::QueryFilter;
|
use sea_orm::QueryFilter;
|
||||||
use tempfile::TempDir;
|
use tempfile::TempDir;
|
||||||
use crate::{ config::{Config, PartialConfig, PartialP2pConfig, PartialRpcConfig, StorageConfig}, message::Message};
|
use crate::{ config::{Config, PartialConfig, PartialP2pConfig, PartialRpcConfig, RpcConfig, StorageConfig}, message::Message};
|
||||||
|
|
||||||
use serde::{de::DeserializeOwned, Deserialize, Serialize};
|
use serde::{de::DeserializeOwned, Deserialize, Serialize};
|
||||||
|
|
||||||
|
@ -19,6 +19,8 @@ pub static TEST_CONFIG: LazyLock<Config> = LazyLock::new(|| {
|
||||||
data_directory: data_dir,
|
data_directory: data_dir,
|
||||||
cache_directory: cache_dir,
|
cache_directory: cache_dir,
|
||||||
},
|
},
|
||||||
rpc: PartialRpcConfig::default().try_into().unwrap(),
|
rpc: RpcConfig{
|
||||||
|
socket_path: test_dir.join("socket.sock"),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
|
@ -1,5 +1,8 @@
|
||||||
use clap::Args;
|
use clap::Args;
|
||||||
use caretta_core::utils::runnable::Runnable;
|
use caretta_core::{
|
||||||
|
utils::runnable::Runnable,
|
||||||
|
proto::*,
|
||||||
|
};
|
||||||
use crate::cli::ConfigArgs;
|
use crate::cli::ConfigArgs;
|
||||||
|
|
||||||
#[derive(Debug, Args)]
|
#[derive(Debug, Args)]
|
||||||
|
@ -10,6 +13,11 @@ pub struct PeerListCommandArgs{
|
||||||
|
|
||||||
impl Runnable for PeerListCommandArgs {
|
impl Runnable for PeerListCommandArgs {
|
||||||
async fn run(self, app_name: &'static str) {
|
async fn run(self, app_name: &'static str) {
|
||||||
todo!()
|
let config = self.config.into_config(app_name).await;
|
||||||
|
let path = String::from("unix://") + config.rpc.socket_path.as_os_str().to_str().expect("Invalid string");
|
||||||
|
let mut client = caretta_core::proto::cached_peer_service_client::CachedPeerServiceClient::connect(path).await.expect("Unix socket should be accessible");
|
||||||
|
let request = tonic::Request::new(CachedPeerListRequest {});
|
||||||
|
let response = client.list(request).await.expect("Faild to request/response");
|
||||||
|
println!("{:?}", response);
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -10,3 +10,5 @@ repository.workspace = true
|
||||||
caretta.path = "../.."
|
caretta.path = "../.."
|
||||||
libp2p.workspace = true
|
libp2p.workspace = true
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
|
tokio-stream = { version = "0.1.17", features = ["net"] }
|
||||||
|
tonic.workspace = true
|
||||||
|
|
|
@ -1,5 +1,12 @@
|
||||||
use caretta::{config::P2pConfig, server::ServerTrait};
|
use caretta::{
|
||||||
|
config::P2pConfig,
|
||||||
|
proto::cached_peer_service_server::CachedPeerServiceServer,
|
||||||
|
server::ServerTrait,
|
||||||
|
rpc::service::cached_peer::CachedPeerService
|
||||||
|
};
|
||||||
use libp2p::{futures::StreamExt, noise, swarm::SwarmEvent, tcp, yamux};
|
use libp2p::{futures::StreamExt, noise, swarm::SwarmEvent, tcp, yamux};
|
||||||
|
use tokio::net::UnixListener;
|
||||||
|
use tokio_stream::wrappers::UnixListenerStream;
|
||||||
pub struct Server{}
|
pub struct Server{}
|
||||||
|
|
||||||
impl ServerTrait for Server {
|
impl ServerTrait for Server {
|
||||||
|
@ -34,6 +41,21 @@ impl ServerTrait for Server {
|
||||||
|
|
||||||
async fn serve_rpc<T>(config: &T) -> Result<(), caretta::error::Error>
|
async fn serve_rpc<T>(config: &T) -> Result<(), caretta::error::Error>
|
||||||
where T: AsRef<caretta::config::RpcConfig> {
|
where T: AsRef<caretta::config::RpcConfig> {
|
||||||
todo!()
|
let path = config.as_ref().socket_path.clone();
|
||||||
|
if let Some(x) = path.parent() {
|
||||||
|
if !x.exists() {
|
||||||
|
std::fs::create_dir_all(x).expect("Failed to create directory for socket file!");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if path.exists() {
|
||||||
|
std::fs::remove_file(&path).expect("Failed to remove existing socket file!")
|
||||||
|
}
|
||||||
|
let uds = UnixListener::bind(path).unwrap();
|
||||||
|
let uds_stream = UnixListenerStream::new(uds);
|
||||||
|
tonic::transport::Server::builder()
|
||||||
|
.add_service(CachedPeerServiceServer::new(CachedPeerService::default()))
|
||||||
|
.serve_with_incoming(uds_stream)
|
||||||
|
.await.unwrap();
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -7,7 +7,7 @@ edition = "2021"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
clap.workspace = true
|
clap.workspace = true
|
||||||
caretta = { path = "../..", features = ["desktop"] }
|
caretta = { path = "../..", features = ["desktop", "test"] }
|
||||||
caretta-example-core.path = "../core"
|
caretta-example-core.path = "../core"
|
||||||
libp2p.workspace = true
|
libp2p.workspace = true
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
|
use caretta_example_core::server::Server;
|
||||||
use clap::Args;
|
use clap::Args;
|
||||||
use caretta::{config::Config, error::Error, global::CONFIG, utils::runnable::Runnable};
|
use caretta::{config::Config, data::migration::DataMigrator, global::{CONFIG, DATABASE_CONNECTIONS}, server::ServerTrait, utils::runnable::Runnable};
|
||||||
use libp2p::{noise, ping, swarm::{NetworkBehaviour, SwarmEvent}, tcp, yamux, Swarm};
|
use libp2p::{noise, ping, swarm::{NetworkBehaviour, SwarmEvent}, tcp, yamux, Swarm};
|
||||||
|
|
||||||
use super::ConfigArgs;
|
use super::ConfigArgs;
|
||||||
|
@ -12,6 +13,7 @@ pub struct ServerCommandArgs {
|
||||||
impl Runnable for ServerCommandArgs {
|
impl Runnable for ServerCommandArgs {
|
||||||
async fn run(self, app_name: &'static str) {
|
async fn run(self, app_name: &'static str) {
|
||||||
let config = CONFIG.get_or_init::<Config>(self.config.into_config(app_name).await).await;
|
let config = CONFIG.get_or_init::<Config>(self.config.into_config(app_name).await).await;
|
||||||
|
let _ = DATABASE_CONNECTIONS.get_or_init_unchecked(&config, DataMigrator).await;
|
||||||
|
Server::serve_all(config).await.unwrap();
|
||||||
}
|
}
|
||||||
}
|
}
|
Loading…
Add table
Reference in a new issue