Add SeaORM Migration

This commit is contained in:
fluo10 2025-04-28 14:56:28 +09:00
parent 699a36d7ff
commit 0de65e6350
11 changed files with 1155 additions and 58 deletions

8
.gitignore vendored
View file

@ -9,9 +9,5 @@ target/
# MSVC Windows builds of rustc generate these, which store debugging information
*.pdb
# RustRover
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
.env
db

921
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -3,6 +3,7 @@ members = ["dpts-*"]
[workspace.dependencies]
dpts-core = {path = "dpts-core"}
dpts-migration = {path = "dpts-migration"}
chrono = "0.4"
clap = "4.5"
dotenv = "0.15.0"

View file

@ -6,14 +6,17 @@ edition.workspace = true
repository.workspace = true
[dependencies]
dpts-migration = { workspace = true }
anyhow = "1.0"
async-graphql = "7.0"
axum = "0.8"
chrono = {workspace = true}
clap = {workspace = true}
dotenv = {workspace = true}
log = "0.4.27"
serde = { version = "1.0", features = ["derive"] }
thiserror = "2.0"
tokio = "1.44.2"
[dependencies.sea-orm]
version = "1.1"

View file

@ -17,4 +17,5 @@ mod tests {
let result = add(2, 2);
assert_eq!(result, 4);
}
}
}

21
dpts-core/tests/db.rs Normal file
View file

@ -0,0 +1,21 @@
use core::time::Duration;
use sea_orm::{ConnectOptions, Database};
use dpts_migration::{Migrator, MigratorTrait};
#[tokio::test]
async fn main() {
let mut opt = ConnectOptions::new("sqlite::memory:");
opt.max_connections(100)
.min_connections(5)
.connect_timeout(Duration::from_secs(8))
.acquire_timeout(Duration::from_secs(8))
.idle_timeout(Duration::from_secs(8))
.max_lifetime(Duration::from_secs(8))
.sqlx_logging(true)
.sqlx_logging_level(log::LevelFilter::Info);
//.set_schema_search_path("my_schema"); // Setting default PostgreSQL schema
let db= Database::connect(opt).await.unwrap();
Migrator::fresh(&db).await.unwrap();
Migrator::reset(&db).await.unwrap();
db.close().await.unwrap();
}

20
dpts-migration/Cargo.toml Normal file
View file

@ -0,0 +1,20 @@
[package]
name = "dpts-migration"
version = "0.1.0"
edition = "2021"
publish = false
[lib]
name = "dpts_migration"
path = "src/lib.rs"
[dependencies]
async-std = { version = "1", features = ["attributes", "tokio1"] }
[dependencies.sea-orm-migration]
version = "1.1.0"
features = [
"runtime-tokio-rustls",
"sqlx-postgres",
"sqlx-sqlite",
]

41
dpts-migration/README.md Normal file
View file

@ -0,0 +1,41 @@
# Running Migrator CLI
- Generate a new migration file
```sh
cargo run -- generate MIGRATION_NAME
```
- Apply all pending migrations
```sh
cargo run
```
```sh
cargo run -- up
```
- Apply first 10 pending migrations
```sh
cargo run -- up -n 10
```
- Rollback last applied migrations
```sh
cargo run -- down
```
- Rollback last 10 applied migrations
```sh
cargo run -- down -n 10
```
- Drop all tables from the database, then reapply all migrations
```sh
cargo run -- fresh
```
- Rollback all applied migrations, then reapply all migrations
```sh
cargo run -- refresh
```
- Rollback all applied migrations
```sh
cargo run -- reset
```
- Check the status of all migrations
```sh
cargo run -- status
```

15
dpts-migration/src/lib.rs Normal file
View file

@ -0,0 +1,15 @@
pub use sea_orm_migration::prelude::*;
mod m20220101_000001_create_table;
pub struct Migrator;
#[async_trait::async_trait]
impl MigratorTrait for Migrator {
fn migrations() -> Vec<Box<dyn MigrationTrait>> {
vec![
Box::new(m20220101_000001_create_table::Migration),
]
}
}

View file

@ -0,0 +1,174 @@
use sea_orm_migration::{prelude::*, schema::*};
#[derive(DeriveMigrationName)]
pub struct Migration;
#[async_trait::async_trait]
impl MigrationTrait for Migration {
async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> {
manager.create_table(
Table::create()
.table(User::Table)
.if_not_exists()
.col(pk_auto(User::Id))
.col(string_uniq(User::LoginName))
.col(string(User::PasswordHash))
.col(timestamp_with_time_zone(User::CreatedAt))
.col(timestamp_with_time_zone(User::UpdatedAt))
.to_owned(),
).await?;
manager.create_table(
Table::create()
.table(RecordHeader::Table)
.if_not_exists()
.col(pk_auto(RecordHeader::Id))
.col(integer(RecordHeader::UserId))
.col(timestamp_with_time_zone(RecordHeader::RecordedAt))
.col(timestamp_with_time_zone(RecordHeader::CreatedAt))
.col(timestamp_with_time_zone(RecordHeader::UpdatedAt))
.col(string(RecordHeader::Comment))
.foreign_key(
ForeignKey::create()
.name("FK_RecordHeader_User")
.from(RecordHeader::Table, RecordHeader::UserId)
.to(User::Table, User::Id)
.on_delete(ForeignKeyAction::Cascade)
.on_update(ForeignKeyAction::Cascade)
)
.to_owned(),
).await?;
manager.create_table(
Table::create()
.table(RecordTag::Table)
.if_not_exists()
.col(pk_auto(RecordTag::Id))
.col(integer(RecordTag::UserId))
.col(string(RecordTag::Name))
.foreign_key(
ForeignKey::create()
.name("FK_RecordTag_User")
.from(RecordTag::Table, RecordHeader::UserId)
.to(User::Table, User::Id)
.on_delete(ForeignKeyAction::Cascade)
.on_update(ForeignKeyAction::Cascade)
)
.to_owned(),
).await?;
manager.create_table(
Table::create()
.table(RecordDetail::Table)
.if_not_exists()
.col(pk_auto(RecordDetail::Id))
.col(integer(RecordDetail::RecordHeaderId))
.col(integer(RecordDetail::RecordTagId))
.col(string(RecordDetail::Count))
.foreign_key(
ForeignKey::create()
.name("FK_RecordDetail_RecordHeader")
.from(RecordDetail::Table, RecordDetail::RecordHeaderId)
.to(RecordHeader::Table, RecordHeader::Id)
.on_delete(ForeignKeyAction::Cascade)
.on_update(ForeignKeyAction::Cascade)
)
.foreign_key(
ForeignKey::create()
.name("FK_RecordDetail_RecordTag")
.from(RecordDetail::Table, RecordDetail::RecordTagId)
.to(RecordTag::Table, RecordTag::Id)
.on_delete(ForeignKeyAction::Cascade)
.on_update(ForeignKeyAction::Cascade)
)
.to_owned(),
).await?;
manager.create_index(
Index::create()
.name("IDX_User_LoginName")
.table(User::Table)
.col(User::LoginName)
.to_owned(),
).await?;
manager.create_index(
Index::create()
.name("IDX_RecordHeader_RecordedAt")
.table(RecordHeader::Table)
.col(RecordHeader::RecordedAt)
.to_owned(),
).await?;
manager.create_index(
Index::create()
.name("IDX_RecordTag_Name")
.table(RecordTag::Table)
.col(RecordTag::Name)
.to_owned(),
).await?;
Ok(())
}
async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> {
// Replace the sample below with your own migration scripts
manager.drop_table(
Table::drop().table(RecordDetail::Table).to_owned()
).await?;
manager.drop_table(
Table::drop().table(RecordTag::Table).to_owned()
).await?;
manager.drop_table(
Table::drop().table(RecordHeader::Table).to_owned()
).await?;
manager.drop_table(
Table::drop().table(User::Table).to_owned()
).await?;
Ok(())
}
}
#[derive(DeriveIden)]
enum User {
Table,
Id,
CreatedAt,
UpdatedAt,
LoginName,
PasswordHash,
}
#[derive(DeriveIden)]
enum RecordHeader {
Table,
Id,
UserId,
CreatedAt,
UpdatedAt,
RecordedAt,
Comment,
}
#[derive(DeriveIden)]
enum RecordTag {
Table,
Id,
UserId,
Name,
}
#[derive(DeriveIden)]
enum RecordDetail {
Table,
Id,
RecordHeaderId,
RecordTagId,
Count,
}

View file

@ -0,0 +1,6 @@
use sea_orm_migration::prelude::*;
#[async_std::main]
async fn main() {
cli::run_cli(dpts_migration::Migrator).await;
}