WIP: v1.0.0

This commit is contained in:
Moritz Ruth 2025-03-29 23:54:55 +01:00
parent cc84c6ac37
commit a811f7750a
Signed by: moritzruth
GPG key ID: C9BBAB79405EE56D
4 changed files with 71 additions and 17 deletions

View file

@ -27,6 +27,7 @@
- outgoing webhooks - outgoing webhooks
## Upload steps ## Upload steps
- client to app: request to upload something, returns `{upload_id}` - client to app: request to upload something, returns `{upload_id}`
@ -35,3 +36,12 @@
- client to caos: `PATCH /uploads/{upload_id}` with upload data, optionally using tus - client to caos: `PATCH /uploads/{upload_id}` with upload data, optionally using tus
- app to caos: `GET /staging-area/{upload_id}`, returns metadata (including `{hash}`) as soon as the upload is complete - app to caos: `GET /staging-area/{upload_id}`, returns metadata (including `{hash}`) as soon as the upload is complete
- app to caos: `POST /staging-area/{upload_id}/accept` with target bucket IDs - app to caos: `POST /staging-area/{upload_id}/accept` with target bucket IDs
## Roadmap
- basic uploading
- upload expiration
- media type detection
- metadata endpoints
- accepting uploads
- more storage backends

View file

@ -1,8 +1,8 @@
create table objects create table objects
( (
hash text not null, hash text not null, -- BLAKE3, 265 bits, base 16
size integer not null, -- in bytes size integer not null, -- in bytes
media_type text not null, media_type text not null, -- RFC 6838 format
creation_date text not null, -- RFC 3339 format creation_date text not null, -- RFC 3339 format
primary key (hash) primary key (hash)
) without rowid, strict; ) without rowid, strict;
@ -16,13 +16,19 @@ create table object_replicas
foreign key (hash) references objects (hash) on delete restrict on update restrict foreign key (hash) references objects (hash) on delete restrict on update restrict
) strict; ) strict;
create table uploads create table ongoing_uploads
( (
id text not null, id text not null,
current_size integer not null, -- in bytes current_size integer not null, -- in bytes
total_size integer, -- in bytes, or null if the upload was not started yet total_size integer, -- in bytes, or null if the upload was not started yet
primary key (id)
) without rowid, strict;
hash text, create table finished_uploads
primary key (id), (
foreign key (hash) references objects (hash) on delete restrict on update restrict id text not null,
) size integer not null, -- in bytes
hash text not null, -- BLAKE3, 265 bits, base 16
media_type text not null, -- RFC 6838 format
primary key (id)
) without rowid, strict;

View file

@ -42,7 +42,8 @@ fn validate_buckets(buckets: &Vec<ConfigBucket>) -> Result<(), ValidationError>
Ok(()) Ok(())
} }
static BUCKET_ID_PATTERN: Lazy<Regex> = Lazy::new(|| Regex::new(r"\w*$").unwrap()); // a-zA-z0-9 and _, but not "staging"
static BUCKET_ID_PATTERN: Lazy<Regex> = Lazy::new(|| Regex::new(r"^(?!staging$)\w*$").unwrap());
#[derive(Debug, Serialize, Deserialize, Validate)] #[derive(Debug, Serialize, Deserialize, Validate)]
pub struct ConfigBucket { pub struct ConfigBucket {

View file

@ -6,9 +6,12 @@ use rocket::form::validate::Len;
use rocket::http::Status; use rocket::http::Status;
use rocket::outcome::Outcome::Success; use rocket::outcome::Outcome::Success;
use rocket::request::{FromRequest, Outcome}; use rocket::request::{FromRequest, Outcome};
use rocket::{Request, State, post, routes}; use rocket::response::Responder;
use serde::Deserialize; use rocket::serde::json::Json;
use rocket::{Request, State, post, response, routes};
use serde::{Deserialize, Serialize};
use sqlx::SqlitePool; use sqlx::SqlitePool;
use std::borrow::Cow;
pub async fn start_http_api_server(config: &Config, database: SqlitePool) -> Result<()> { pub async fn start_http_api_server(config: &Config, database: SqlitePool) -> Result<()> {
let rocket_app = rocket::custom(rocket::config::Config { let rocket_app = rocket::custom(rocket::config::Config {
@ -39,22 +42,56 @@ pub async fn start_http_api_server(config: &Config, database: SqlitePool) -> Res
Ok(()) Ok(())
} }
#[derive(Debug, Deserialize)] #[derive(Debug)]
struct CreateUploadRequest {} enum ApiError {
BodyValidationFailed {
path: Cow<'static, str>,
message: Cow<'static, str>,
},
}
#[post("/uploads")] impl<'r> Responder<'r, 'static> for ApiError {
async fn create_upload(_accessor: AuthorizedApiAccessor, database: &State<SqlitePool>) { fn respond_to(self, _: &Request<'_>) -> response::Result<'static> {
let total_size = 20; todo!()
}
}
#[derive(Debug, Deserialize)]
struct CreateUploadRequest {
size: u64,
}
#[derive(Debug, Serialize)]
struct CreateUploadResponse {
upload_id: String,
}
#[post("/uploads", data = "<request>")]
async fn create_upload(
_accessor: AuthorizedApiAccessor,
database: &State<SqlitePool>,
request: Json<CreateUploadRequest>,
) -> Result<Json<CreateUploadResponse>, ApiError> {
let id = nanoid!(); let id = nanoid!();
let total_size: i64 = request
.size
.try_into()
.map_err(|_| ApiError::BodyValidationFailed {
path: "size".into(),
message: "".into(),
})?;
sqlx::query!( sqlx::query!(
"INSERT INTO uploads (id, current_size, total_size, hash) VALUES(?, 0, ?, null)", "INSERT INTO ongoing_uploads (id, total_size, current_size) VALUES(?, ?, 0)",
id, id,
total_size total_size
) )
.execute(database.inner()) .execute(database.inner())
.await .await
.unwrap(); .unwrap();
Ok(Json(CreateUploadResponse { upload_id: id }))
} }
struct CorrectApiSecret(FStr<64>); struct CorrectApiSecret(FStr<64>);