WIP: v1.0.0
This commit is contained in:
parent
b1efc76c7c
commit
5cbab43a23
7 changed files with 4104 additions and 0 deletions
3881
Cargo.lock
generated
Normal file
3881
Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load diff
19
Cargo.toml
Normal file
19
Cargo.toml
Normal file
|
@ -0,0 +1,19 @@
|
|||
[package]
|
||||
name = "minna_caos"
|
||||
version = "0.1.0"
|
||||
edition = "2024"
|
||||
|
||||
[dependencies]
|
||||
sqlx = { version = "0.7.4", features = ["runtime-tokio-rustls", "sqlite"] }
|
||||
rocket = { version = "0.5.1", default-features = false, features = ["http2", "json"] }
|
||||
rocket_db_pools = { version = "0.2.0", features = ["sqlx_sqlite"] }
|
||||
opendal = { version = "0.52.0", features = ["services-fs"] }
|
||||
tokio = { version = "1.44.1", features = ["rt-multi-thread", "macros", "parking_lot"] }
|
||||
color-eyre = "0.6.3"
|
||||
log = "0.4.26"
|
||||
env_logger = "0.11.7"
|
||||
figment = { version = "0.10.19", features = ["env", "toml", "parking_lot"] }
|
||||
serde = { version = "1.0.219", features = ["derive"] }
|
||||
validator = { version = "0.20.0", features = ["derive"] }
|
||||
once_cell = "1.21.1"
|
||||
regex = "1.11.1"
|
16
migrations/20250321201214_initial.sql
Normal file
16
migrations/20250321201214_initial.sql
Normal file
|
@ -0,0 +1,16 @@
|
|||
create table objects
|
||||
(
|
||||
hash text not null,
|
||||
media_type text not null,
|
||||
creation_date text not null, -- RFC 3339 format
|
||||
primary key (hash)
|
||||
) without rowid, strict;
|
||||
|
||||
create table object_replicas
|
||||
(
|
||||
hash text not null,
|
||||
bucket_id text not null,
|
||||
is_present integer not null, -- boolean
|
||||
primary key (hash, bucket_id),
|
||||
foreign key (hash) references objects (hash) on delete restrict on update restrict
|
||||
) strict;
|
8
run/config.toml
Normal file
8
run/config.toml
Normal file
|
@ -0,0 +1,8 @@
|
|||
http_address = "0.0.0.0"
|
||||
http_port = 8001
|
||||
|
||||
[[buckets]]
|
||||
id = "local"
|
||||
display_name = "Local"
|
||||
backend = "filesystem"
|
||||
path = "./data"
|
76
src/config.rs
Normal file
76
src/config.rs
Normal file
|
@ -0,0 +1,76 @@
|
|||
use color_eyre::Result;
|
||||
use color_eyre::eyre::WrapErr;
|
||||
use figment::Figment;
|
||||
use figment::providers::Format;
|
||||
use once_cell::sync::Lazy;
|
||||
use regex::Regex;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashSet;
|
||||
use std::net::IpAddr;
|
||||
use std::path::PathBuf;
|
||||
use validator::{Validate, ValidationError};
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Validate)]
|
||||
pub struct Config {
|
||||
pub http_address: IpAddr,
|
||||
pub http_port: u16,
|
||||
#[serde(default)]
|
||||
pub trust_http_reverse_proxy: bool,
|
||||
#[validate(nested, custom(function = "validate_buckets"))]
|
||||
pub buckets: Vec<ConfigBucket>,
|
||||
}
|
||||
|
||||
fn validate_buckets(buckets: &Vec<ConfigBucket>) -> Result<(), ValidationError> {
|
||||
let mut ids = HashSet::new();
|
||||
|
||||
for bucket_config in buckets {
|
||||
if !ids.insert(&bucket_config.id) {
|
||||
return Err(ValidationError::new("duplicate_id").with_message(
|
||||
format!(
|
||||
"There is more than one bucket with this ID: {}",
|
||||
bucket_config.id
|
||||
)
|
||||
.into(),
|
||||
));
|
||||
};
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
static BUCKET_ID_PATTERN: Lazy<Regex> = Lazy::new(|| Regex::new(r"\w*$").unwrap());
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Validate)]
|
||||
pub struct ConfigBucket {
|
||||
#[validate(length(min = 1, max = 32), regex(path = *BUCKET_ID_PATTERN))]
|
||||
pub id: String,
|
||||
#[validate(length(min = 1, max = 128))]
|
||||
pub display_name: String,
|
||||
#[serde(flatten)]
|
||||
pub backend: ConfigBucketBackend,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[serde(tag = "backend", rename_all = "snake_case")]
|
||||
pub enum ConfigBucketBackend {
|
||||
Filesystem(ConfigBucketBackendFilesystem),
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct ConfigBucketBackendFilesystem {
|
||||
pub path: PathBuf,
|
||||
}
|
||||
|
||||
pub fn load_config() -> Result<Config> {
|
||||
let figment = Figment::new()
|
||||
.merge(figment::providers::Toml::file("config.toml"))
|
||||
.merge(figment::providers::Env::raw().only(&["HTTP_ADDRESS", "HTTP_PORT"]));
|
||||
|
||||
let config = figment
|
||||
.extract::<Config>()
|
||||
.wrap_err("Failed to load configuration.")?;
|
||||
config
|
||||
.validate()
|
||||
.wrap_err("Failed to validate configuration.")?;
|
||||
Ok(config)
|
||||
}
|
25
src/http_api.rs
Normal file
25
src/http_api.rs
Normal file
|
@ -0,0 +1,25 @@
|
|||
use crate::config::Config;
|
||||
use color_eyre::Result;
|
||||
|
||||
pub async fn start_http_api_server(config: &Config) -> Result<()> {
|
||||
let rocket_app = rocket::custom(rocket::config::Config {
|
||||
address: config.http_address,
|
||||
port: config.http_port,
|
||||
ident: rocket::config::Ident::try_new("minna-caos".to_owned()).unwrap(),
|
||||
ip_header: if config.trust_http_reverse_proxy {
|
||||
Some("X-Forwarded-For".into())
|
||||
} else {
|
||||
None
|
||||
},
|
||||
shutdown: rocket::config::Shutdown {
|
||||
grace: 5,
|
||||
mercy: 5,
|
||||
..rocket::config::Shutdown::default()
|
||||
},
|
||||
keep_alive: 10,
|
||||
..rocket::Config::default()
|
||||
});
|
||||
|
||||
rocket_app.launch().await?;
|
||||
Ok(())
|
||||
}
|
79
src/main.rs
Normal file
79
src/main.rs
Normal file
|
@ -0,0 +1,79 @@
|
|||
mod config;
|
||||
mod http_api;
|
||||
|
||||
use crate::config::{ConfigBucket, ConfigBucketBackend, load_config};
|
||||
use crate::http_api::start_http_api_server;
|
||||
use color_eyre::Result;
|
||||
use color_eyre::eyre::{WrapErr, eyre};
|
||||
use std::collections::HashSet;
|
||||
use std::io::ErrorKind;
|
||||
use tokio::fs;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<()> {
|
||||
color_eyre::install().unwrap();
|
||||
env_logger::init();
|
||||
|
||||
let config = load_config()?;
|
||||
|
||||
log::debug!("Loaded configuration: {:#?}", config);
|
||||
initialize_buckets(&config.buckets).await?;
|
||||
|
||||
log::info!("Initialization successful.");
|
||||
|
||||
start_http_api_server(&config).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn initialize_buckets(bucket_configs: &Vec<ConfigBucket>) -> Result<()> {
|
||||
let mut filesystem_backend_paths = HashSet::new();
|
||||
|
||||
for bucket_config in bucket_configs {
|
||||
log::info!("Initializing bucket: {}", bucket_config.id);
|
||||
|
||||
match &bucket_config.backend {
|
||||
ConfigBucketBackend::Filesystem(filesystem_backend_config) => {
|
||||
let path = match filesystem_backend_config.path.canonicalize() {
|
||||
Ok(path) => path,
|
||||
Err(error) if error.kind() == ErrorKind::NotFound => {
|
||||
fs::create_dir_all(&filesystem_backend_config.path)
|
||||
.await
|
||||
.wrap_err_with(|| {
|
||||
format!(
|
||||
"Could not create directory: {}",
|
||||
filesystem_backend_config.path.to_string_lossy()
|
||||
)
|
||||
})?;
|
||||
|
||||
filesystem_backend_config.path.canonicalize()?
|
||||
}
|
||||
Err(error) => return Err(error.into()),
|
||||
};
|
||||
|
||||
if filesystem_backend_paths.contains(&path) {
|
||||
return Err(eyre!(
|
||||
"More than one bucket using the filesystem backend is configured to use this path: {}",
|
||||
path.to_string_lossy()
|
||||
));
|
||||
}
|
||||
|
||||
let write_test_file_path = path.join("./minna-caos-write-test");
|
||||
let _ = fs::File::create(&write_test_file_path)
|
||||
.await
|
||||
.wrap_err_with(|| {
|
||||
format!(
|
||||
"The write test file for the {} bucket failed.",
|
||||
&bucket_config.id
|
||||
)
|
||||
})?;
|
||||
|
||||
fs::remove_file(write_test_file_path).await?;
|
||||
|
||||
filesystem_backend_paths.insert(path);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
Loading…
Add table
Reference in a new issue