Compare commits

..

29 Commits

Author SHA1 Message Date
38a30e1898 feat: WIP overview for user 2025-07-03 21:57:26 +02:00
2ec200831f refactor: move overview into calendar 2025-07-03 21:28:47 +02:00
d1e067407b feat: validation of new availability 2025-07-03 21:16:43 +02:00
b42540ac2f feat: show weekday in overview and edit pages
refs #33
2025-07-03 20:15:56 +02:00
45cf6dda10 feat: help for preparing sqlx query data 2025-07-03 17:58:47 +02:00
428f46b853 refactor: clothing and event changeset 2025-07-02 18:56:40 +02:00
bdaf8ff20e feat: finish implementing assignment validation 2025-07-02 10:14:40 +02:00
512b061c7a test: WIP new assignment 2025-07-01 16:40:40 +02:00
2abeeb20df test: new assignment 2025-06-30 14:57:58 +02:00
10e6ba80a2 refactor: assignment validation 2025-06-30 11:28:01 +02:00
e5df98a515 refactor: rename validation trait 2025-06-29 19:48:56 +02:00
93574c3ac5 refactor: move migrations to db folder 2025-06-27 12:11:17 +02:00
9893c37f80 refactor: finished moving models and validation into own crate 2025-06-23 23:00:54 +02:00
f35b343768 refactor: WIP splitting crates 2025-06-22 22:54:11 +02:00
f25e508bbd fix: editing availability 2025-06-22 22:37:12 +02:00
b65b4c7a00 feat: validate availabilit changeset 2025-06-22 22:37:12 +02:00
b2969b988d refactor: custom context for validation 2025-06-22 21:33:01 +02:00
9666932915 feat: custom async validation 2025-06-19 16:24:27 +02:00
0b4248604a doc: bump version 2025-06-15 22:08:15 +02:00
5afaac6197 chore: update npm packages 2025-06-15 22:07:48 +02:00
95f807b51d style: profile and logout button 2025-06-15 22:03:53 +02:00
cca925f4eb feat: enforce implicit lowercase email address 2025-06-15 21:53:31 +02:00
90ac5c306d chore: remove obsolete spec file 2025-06-15 18:42:14 +02:00
03964d3542 refactor: add hostname to customizations 2025-06-10 09:15:27 +02:00
2774c6e48a doc: update readme for release 2025-06-10 09:15:09 +02:00
7f5941ba6a fix: failing tests 2025-06-09 20:03:12 +02:00
e591b419bb feat: calculate dates for export events 2025-06-09 19:26:57 +02:00
784b7cea4e fix: export for area manager 2025-06-09 18:47:16 +02:00
2b9e6cfefd feat: increase registration expiry to 5 days 2025-06-09 14:23:26 +02:00
234 changed files with 3058 additions and 1706 deletions

View File

@ -6,11 +6,14 @@ SQLX_OFFLINE=true
# 64 byte long openssl rand -base64 64 # 64 byte long openssl rand -base64 64
SECRET_KEY="changeInProdOrHandAb11111111111111111111111111111111111111111111" SECRET_KEY="changeInProdOrHandAb11111111111111111111111111111111111111111111"
HOSTNAME="localhost" HOSTNAME="localhost"
WEBMASTER_EMAIL="admin@example.com"
SERVER_ADDRESS="127.0.0.1" SERVER_ADDRESS="127.0.0.1"
SERVER_PORT="8080" SERVER_PORT="8080"
APP_ENVIRONMENT="development"
SMTP_SERVER="localhost" SMTP_SERVER="localhost"
SMTP_PORT="1025" SMTP_PORT="1025"
# SMTP_LOGIN="" # SMTP_LOGIN=""
# SMTP_PASSWORD="" # SMTP_PASSWORD=""
SMTP_TLSTYPE="none" SMTP_TLSTYPE="none"
RUST_LOG="info,brass_web=trace,brass_db=trace"

79
Cargo.lock generated
View File

@ -775,6 +775,7 @@ dependencies = [
"anyhow", "anyhow",
"async-std", "async-std",
"brass-config", "brass-config",
"chrono",
"clap", "clap",
"sqlx", "sqlx",
] ]
@ -787,6 +788,19 @@ dependencies = [
"dotenvy", "dotenvy",
] ]
[[package]]
name = "brass-db"
version = "0.1.0"
dependencies = [
"chrono",
"fake",
"rand 0.9.1",
"regex",
"serde",
"sqlx",
"tracing",
]
[[package]] [[package]]
name = "brass-macros" name = "brass-macros"
version = "0.1.0" version = "0.1.0"
@ -797,7 +811,7 @@ dependencies = [
[[package]] [[package]]
name = "brass-web" name = "brass-web"
version = "1.0.0" version = "1.0.1"
dependencies = [ dependencies = [
"actix-files", "actix-files",
"actix-http", "actix-http",
@ -809,13 +823,13 @@ dependencies = [
"argon2", "argon2",
"askama", "askama",
"brass-config", "brass-config",
"brass-db",
"brass-macros", "brass-macros",
"built", "built",
"change-detection", "change-detection",
"chrono", "chrono",
"fake", "fake",
"futures-util", "futures-util",
"garde",
"insta", "insta",
"lettre", "lettre",
"maud", "maud",
@ -890,15 +904,6 @@ dependencies = [
"bytes", "bytes",
] ]
[[package]]
name = "castaway"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0abae9be0aaf9ea96a3b1b8b1b55c602ca751eba1b1500220cea4ecbafe7c0d5"
dependencies = [
"rustversion",
]
[[package]] [[package]]
name = "cc" name = "cc"
version = "1.2.22" version = "1.2.22"
@ -936,6 +941,7 @@ dependencies = [
"iana-time-zone", "iana-time-zone",
"js-sys", "js-sys",
"num-traits", "num-traits",
"pure-rust-locales",
"serde", "serde",
"wasm-bindgen", "wasm-bindgen",
"windows-link", "windows-link",
@ -1007,20 +1013,6 @@ version = "1.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990"
[[package]]
name = "compact_str"
version = "0.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3b79c4069c6cad78e2e0cdfcbd26275770669fb39fd308a752dc110e83b9af32"
dependencies = [
"castaway",
"cfg-if",
"itoa",
"rustversion",
"ryu",
"static_assertions",
]
[[package]] [[package]]
name = "concurrent-queue" name = "concurrent-queue"
version = "2.5.0" version = "2.5.0"
@ -1643,31 +1635,6 @@ dependencies = [
"slab", "slab",
] ]
[[package]]
name = "garde"
version = "0.22.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6a989bd2fd12136080f7825ff410d9239ce84a2a639487fc9d924ee42e2fb84f"
dependencies = [
"compact_str",
"garde_derive",
"once_cell",
"regex",
"smallvec",
]
[[package]]
name = "garde_derive"
version = "0.22.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1f7f0545bbbba0a37d4d445890fa5759814e0716f02417b39f6fab292193df68"
dependencies = [
"proc-macro2",
"quote",
"regex",
"syn 2.0.101",
]
[[package]] [[package]]
name = "generic-array" name = "generic-array"
version = "0.14.7" version = "0.14.7"
@ -2677,6 +2644,12 @@ dependencies = [
"cc", "cc",
] ]
[[package]]
name = "pure-rust-locales"
version = "0.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1190fd18ae6ce9e137184f207593877e70f39b015040156b1e05081cdfe3733a"
[[package]] [[package]]
name = "quick-xml" name = "quick-xml"
version = "0.37.5" version = "0.37.5"
@ -3383,12 +3356,6 @@ dependencies = [
"path-slash", "path-slash",
] ]
[[package]]
name = "static_assertions"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f"
[[package]] [[package]]
name = "stringprep" name = "stringprep"
version = "0.1.5" version = "0.1.5"

View File

@ -1,5 +1,5 @@
[workspace] [workspace]
members = [ "cli", "config", "macros", "web", ] members = [ "cli", "config", "db", "macros", "web", ]
resolver = "2" resolver = "2"
default-members = ["web"] default-members = ["web"]

100
README.md
View File

@ -1,69 +1,49 @@
# Brass
A webservice to plan and organize personnel deployment for [Brandsicherheitswachen](https://de.wikipedia.org/wiki/Brandsicherheitswache) (german; fire watch).
# Key Technologies
- [actix-web](https://actix.rs/)
- [sqlx](https://github.com/launchbadge/sqlx)
- [askama](https://github.com/askama-rs/askama)
- [lettre](https://lettre.rs/)
- [htmx](https://htmx.org/)
- [hyperscript](https://hyperscript.org/)
- [bulma](https://bulma.io/)
- great inspiration for project structure and tooling: [gerust.rs](https://gerust.rs)
# Getting started with developing # Getting started with developing
1. Clone the repository. 1. Clone the repository.
2. Install and configure Postgresql. Create a new database for brass: `createdb brass`. 2. Install and configure Postgresql. Create a new database for brass: `createdb brass`.
3. TODO: Configure DB name, DB user & pass, DB connection string, ... 3. Configure database connection string in `.env` config file.
4. Install sqlx-cli: `cargo install sqlx-cli` 4. Install required development tools `cargo install <tool>`
5. Migrate the database: `sqlx database setup` - sqlx-cli
6. Create superuse: `cargo r -- createadmin`
## Useful stuff
- cargo-watch, cargo-add
- mailtutan - mailtutan
- cargo-watch
- cargo-nextest
6. Migrate the development and test database: `cargo db migrate -e development` & `cargo db migrate -e test`
7. Create superuser: `cargo r -- createadmin`
8. Run and recompile application on file change: `cargo w`
9. Run tests via nextest and review possible snapshot changes: `cargo t`
# Build & Deploy
1. Clone the repository.
2. Build release `cargo b --release`.
3. Copy the artifact `target/release/brass-web` to the desired location. Make it executable `chmod +x brass-web`.
4. Create Postgresql database on the target host, configure your mail server.
5. Configuration for Brass is done via Environment Variables, see `.env` for a list.
6. Migrate the database `[LIST_OF_ENV_VARIABLES] brass-web migrate`.
7. Create a superuser `[LIST_OF_ENV_VARIABLES] brass-web createadmin`.
8. Create some sort of service file (systemd .service, openbsd rc.conf, ...) to run Brass in the background. Examples can be found in `docs/` directory.
## Example Deployment OpenBSD # Contributing & Issues
``` Code lies on my private gitea instance, thus there's no easy way for creating issues or making contributions. If you've got an issue or want to contribute, write me an email and we'll figure it out.
#!/bin/ksh
DATABASE_URL=postgresql://brass:pw@localhost/brass # Project Structure
SECRET_KEY="" - TODO
HOSTNAME="brass.tfld.de"
SERVER_ADDRESS="127.0.0.1"
SERVER_PORT="8081"
SMTP_SERVER="localhost"
SMTP_PORT="25"
SMTP_TLSTYPE="none"
ENVLIST="DATABASE_URL=$DATABASE_URL SECRET_KEY=$SECRET_KEY HOSTNAME=$HOSTNAME SERVER_ADDRESS=$SERVER_ADDRESS SERVER_PORT=$SERVER_PORT SMTP_SERVER=$SMTP_SERVER SMTP_PORT=$SMTP_PORT SMTP_TLSTYPE=$SMTP_TLSTYPE"
RUST_LOG="info,actix_server=error"
ENVLIST="DATABASE_URL=$DATABASE_URL SECRET_KEY=$SECRET_KEY HOSTNAME=$HOSTNAME SERVER_ADDRESS=$SERVER_ADDRESS SERVER_PORT=$SERVER_PORT SMTP_SERVER=$SMTP_SERVER SMTP_LOGIN=$SMTP_LOGIN SMTP_PASSWORD=$SMTP_PASSWORD SMTP_PORT=$SMTP_PORT SMTP_TLSTYPE=$SMTP_TLSTYPE RUST_LOG=$RUST_LOG" # Further Reading
More in depth documentation about design decisions, helpful commands and database schema can be found in `docs/` directory.
daemon="$ENVLIST /usr/local/bin/brass" # Copyright & License
daemon_user="www" Copyright 2025 Max Hohlfeld
daemon_logger="daemon.info" Brass is licensed under [GNU AGPLv3](https://www.gnu.org/licenses/agpl-3.0.en.html#license-text).
. /etc/rc.d/rc.subr
pexp=".*/usr/local/bin/brass.*"
rc_bg=YES
rc_cmd $1
```
```ini
# Postgres
# DATABASE_URL=postgres://postgres@localhost/my_database
# SQLite
DATABASE_URL=postgresql://brass:password@localhost/brass
# 64 byte long
SECRET_KEY="secret key"
HOSTNAME="brass.tfld.de"
ADDRESS="127.0.0.1"
PORT="8081"
SMTP_SERVER="localhost"
SMTP_PORT="25"
# SMTP_LOGIN=""
# SMTP_PASSWORD=""
SMTP_TLSTYPE="none"
```
## drop test databases
```bash
for dbname in $(psql -c "copy (select datname from pg_database where datname like 'brass_test_%') to stdout") ; do
echo "$dbname"
#dropdb -i "$dbname"
done
```

View File

@ -15,3 +15,4 @@ brass-config = { path = "../config" }
async-std = { version = "1.13.0", features = ["attributes"] } async-std = { version = "1.13.0", features = ["attributes"] }
sqlx = { version = "0.8.2", features = ["runtime-async-std", "postgres"] } sqlx = { version = "0.8.2", features = ["runtime-async-std", "postgres"] }
anyhow = "1.0.94" anyhow = "1.0.94"
chrono = "0.4.41"

View File

@ -1,6 +1,9 @@
use anyhow::Context; use anyhow::Context;
use chrono::Local;
use sqlx::migrate::Migrate; use sqlx::migrate::Migrate;
use sqlx::{migrate::Migrator, Executor}; use sqlx::{migrate::Migrator, Executor};
use std::fs::File;
use std::io::Write;
use std::{ use std::{
collections::HashMap, collections::HashMap,
path::{Path, PathBuf}, path::{Path, PathBuf},
@ -28,9 +31,14 @@ enum Command {
Reset, Reset,
#[command(about = "Run all pending migrations on database")] #[command(about = "Run all pending migrations on database")]
Migrate, Migrate,
#[command(about = "Create a new migration")]
NewMigration { title: String },
#[command(about = "Prepare sqlx query metadata for offline compile-time verification")]
Prepare,
} }
#[async_std::main] #[async_std::main]
#[allow(unused)]
async fn main() { async fn main() {
let cli = Cli::parse(); let cli = Cli::parse();
let config = load_config(&cli.environment).expect("Could not load config!"); let config = load_config(&cli.environment).expect("Could not load config!");
@ -42,7 +50,6 @@ async fn main() {
create_db(&db_config) create_db(&db_config)
.await .await
.expect("Failed creating database."); .expect("Failed creating database.");
migrate_db(&db_config) migrate_db(&db_config)
.await .await
.expect("Failed migrating database."); .expect("Failed migrating database.");
@ -51,20 +58,24 @@ async fn main() {
drop_db(&db_config) drop_db(&db_config)
.await .await
.expect("Failed dropping database."); .expect("Failed dropping database.");
create_db(&db_config) create_db(&db_config)
.await .await
.expect("Failed creating database."); .expect("Failed creating database.");
migrate_db(&db_config) migrate_db(&db_config)
.await .await
.expect("Failed migrating database."); .expect("Failed migrating database.");
}, }
Command::Migrate => { Command::Migrate => {
migrate_db(&db_config) migrate_db(&db_config)
.await .await
.expect("Failed migrating database."); .expect("Failed migrating database.");
} }
Command::NewMigration { title } => {
create_new_migration(&title)
.await
.expect("Failed creating new migration.");
}
Command::Prepare => prepare().await.expect("Failed preparing query metadata."),
} }
} }
@ -111,13 +122,7 @@ async fn migrate_db(db_config: &PgConnectOptions) -> anyhow::Result<()> {
.await .await
.context("Connection to database failed!")?; .context("Connection to database failed!")?;
let migrations_path = PathBuf::from( let migrations_path = db_package_root()?.join("migrations");
std::env::var("CARGO_MANIFEST_DIR").expect("This command needs to be invoked using cargo"),
)
.join("..")
.join("migrations")
.canonicalize()
.unwrap();
let migrator = Migrator::new(Path::new(&migrations_path)) let migrator = Migrator::new(Path::new(&migrations_path))
.await .await
@ -148,3 +153,56 @@ async fn migrate_db(db_config: &PgConnectOptions) -> anyhow::Result<()> {
Ok(()) Ok(())
} }
async fn create_new_migration(title: &str) -> anyhow::Result<()> {
let now = Local::now();
let timestamp = now.format("%Y%m%d%H%M%S");
let file_name = format!("{timestamp}_{title}.sql");
let path = db_package_root()?.join("migrations").join(&file_name);
let mut file = File::create(&path).context(format!(r#"Could not create file "{:?}""#, path))?;
file.write_all("".as_bytes())
.context(format!(r#"Could not write file "{:?}""#, path))?;
println!("Created migration {file_name}.");
Ok(())
}
async fn prepare() -> anyhow::Result<()> {
let cargo = std::env::var("CARGO")
.map_err(|_| anyhow::anyhow!("Please invoke me using Cargo, e.g.: `cargo db <ARGS>`"))
.expect("Existence of CARGO env var is asserted by calling `ensure_sqlx_cli_installed`");
let mut sqlx_prepare_command = {
let mut cmd = std::process::Command::new(&cargo);
cmd.args(["sqlx", "prepare", "--", "--all-targets", "--all-features"]);
let cmd_cwd = db_package_root().context("Error finding the root of the db package!")?;
cmd.current_dir(cmd_cwd);
cmd
};
let o = sqlx_prepare_command
.output()
.context("Could not run {cargo} sqlx prepare!")?;
if !o.status.success() {
let error = anyhow::anyhow!(String::from_utf8_lossy(&o.stdout).to_string()).context("Error generating query metadata. Are you sure the database is running and all migrations are applied?");
return Err(error);
}
println!("Query data written to db/.sqlx directory; please check this into version control.");
Ok(())
}
fn db_package_root() -> Result<PathBuf, anyhow::Error> {
Ok(PathBuf::from(
std::env::var("CARGO_MANIFEST_DIR").expect("This command needs to be invoked using cargo"),
)
.join("..")
.join("db")
.canonicalize()?)
}

View File

@ -0,0 +1,114 @@
{
"db_name": "PostgreSQL",
"query": "\n SELECT\n event.id AS eventId,\n event.startTimestamp,\n event.endTimestamp,\n event.name,\n event.locationId,\n event.voluntaryWachhabender,\n event.voluntaryFuehrungsassistent,\n event.amountOfPosten,\n event.clothing,\n event.canceled,\n event.note,\n location.id,\n location.name AS locationName,\n location.areaId AS locationAreaId,\n clothing.id AS clothingId,\n clothing.name AS clothingName\n FROM event\n JOIN location ON event.locationId = location.id\n JOIN clothing ON event.clothing = clothing.id\n WHERE starttimestamp::date >= $1\n AND starttimestamp::date <= $2\n AND location.areaId = $3;\n ",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "eventid",
"type_info": "Int4"
},
{
"ordinal": 1,
"name": "starttimestamp",
"type_info": "Timestamptz"
},
{
"ordinal": 2,
"name": "endtimestamp",
"type_info": "Timestamptz"
},
{
"ordinal": 3,
"name": "name",
"type_info": "Text"
},
{
"ordinal": 4,
"name": "locationid",
"type_info": "Int4"
},
{
"ordinal": 5,
"name": "voluntarywachhabender",
"type_info": "Bool"
},
{
"ordinal": 6,
"name": "voluntaryfuehrungsassistent",
"type_info": "Bool"
},
{
"ordinal": 7,
"name": "amountofposten",
"type_info": "Int2"
},
{
"ordinal": 8,
"name": "clothing",
"type_info": "Int4"
},
{
"ordinal": 9,
"name": "canceled",
"type_info": "Bool"
},
{
"ordinal": 10,
"name": "note",
"type_info": "Text"
},
{
"ordinal": 11,
"name": "id",
"type_info": "Int4"
},
{
"ordinal": 12,
"name": "locationname",
"type_info": "Text"
},
{
"ordinal": 13,
"name": "locationareaid",
"type_info": "Int4"
},
{
"ordinal": 14,
"name": "clothingid",
"type_info": "Int4"
},
{
"ordinal": 15,
"name": "clothingname",
"type_info": "Text"
}
],
"parameters": {
"Left": [
"Date",
"Date",
"Int4"
]
},
"nullable": [
false,
false,
false,
false,
false,
false,
false,
false,
false,
false,
true,
false,
false,
false,
false,
false
]
},
"hash": "10b4b80f351b66ac5e778a3031288ac5dc66efd0a66b38b7e30f4c954df91bdf"
}

View File

@ -0,0 +1,49 @@
{
"db_name": "PostgreSQL",
"query": "\n SELECT\n availability.id,\n availability.userId,\n availability.startTimestamp,\n availability.endTimestamp,\n availability.comment\n FROM availability\n WHERE availability.userId = $1\n AND (availability.endtimestamp = $2\n OR availability.starttimestamp = $3)\n AND (availability.id <> $4 OR $4 IS NULL);\n ",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "id",
"type_info": "Int4"
},
{
"ordinal": 1,
"name": "userid",
"type_info": "Int4"
},
{
"ordinal": 2,
"name": "starttimestamp",
"type_info": "Timestamptz"
},
{
"ordinal": 3,
"name": "endtimestamp",
"type_info": "Timestamptz"
},
{
"ordinal": 4,
"name": "comment",
"type_info": "Text"
}
],
"parameters": {
"Left": [
"Int4",
"Timestamptz",
"Timestamptz",
"Int4"
]
},
"nullable": [
false,
false,
false,
false,
true
]
},
"hash": "2288f64f63f07e7dd947c036e5c2be4c563788b3b988b721bd12797fd19a7a95"
}

View File

@ -0,0 +1,22 @@
{
"db_name": "PostgreSQL",
"query": "SELECT id FROM user_ WHERE email = $1;",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "id",
"type_info": "Int4"
}
],
"parameters": {
"Left": [
"Text"
]
},
"nullable": [
false
]
},
"hash": "b4edfcac9404060d487db765b8c18ef8b7440699583e0bede95f4d214e668a87"
}

View File

@ -0,0 +1,48 @@
{
"db_name": "PostgreSQL",
"query": "\n SELECT\n availability.id,\n availability.userId,\n availability.startTimestamp,\n availability.endTimestamp,\n availability.comment\n FROM availability\n WHERE availability.userId = $1\n AND availability.starttimestamp::date >= $2\n AND availability.endtimestamp::date <= $3;\n ",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "id",
"type_info": "Int4"
},
{
"ordinal": 1,
"name": "userid",
"type_info": "Int4"
},
{
"ordinal": 2,
"name": "starttimestamp",
"type_info": "Timestamptz"
},
{
"ordinal": 3,
"name": "endtimestamp",
"type_info": "Timestamptz"
},
{
"ordinal": 4,
"name": "comment",
"type_info": "Text"
}
],
"parameters": {
"Left": [
"Int4",
"Date",
"Date"
]
},
"nullable": [
false,
false,
false,
false,
true
]
},
"hash": "f60053118df6a791d31fa258ee3737881f8f97ca41cbebd92eb22c967292d2ee"
}

19
db/Cargo.toml Normal file
View File

@ -0,0 +1,19 @@
[package]
name = "brass-db"
version = "0.1.0"
edition = "2024"
license = "AGPL-3.0"
authors = ["Max Hohlfeld <maxhohlfeld@posteo.de>"]
publish = false
[dependencies]
sqlx = { version = "^0.8", features = ["runtime-async-std-rustls", "postgres", "chrono"] }
chrono = { version = "0.4.33", features = ["serde", "now"] }
serde = { version = "1", features = ["derive"] }
rand = { version = "0.9", features = ["os_rng"] }
regex = "1.11.1"
tracing = "0.1.41"
fake = { version = "4", features = ["chrono", "derive"], optional = true}
[features]
test-helpers = ["dep:fake"]

31
db/src/lib.rs Normal file
View File

@ -0,0 +1,31 @@
pub mod models;
mod support;
pub mod validation;
use std::error::Error;
use std::fmt::Display;
use chrono::NaiveTime;
pub use support::{NoneToken, Token};
const START_OF_DAY: NaiveTime = NaiveTime::from_hms_opt(0, 0, 0).unwrap();
const END_OF_DAY: NaiveTime = NaiveTime::from_hms_opt(23, 59, 59).unwrap();
#[derive(Debug)]
pub struct UnsupportedEnumValue {
pub value: u8,
pub enum_name: &'static str,
}
impl Display for UnsupportedEnumValue {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"unsupported enum value '{}' given for enum '{}'",
self.value, self.enum_name
)
}
}
impl Error for UnsupportedEnumValue {}

View File

@ -1,8 +1,9 @@
use chrono::NaiveDateTime; use chrono::NaiveDateTime;
use sqlx::{query, PgPool}; use sqlx::{PgPool, query};
use super::{assignment_changeset::AssignmentChangeset, Function, Result}; use super::{AssignmentChangeset, Function, Result};
#[derive(Debug)]
pub struct Assignment { pub struct Assignment {
pub event_id: i32, pub event_id: i32,
pub availability_id: i32, pub availability_id: i32,

View File

@ -0,0 +1,186 @@
use chrono::NaiveDateTime;
use sqlx::PgPool;
use tracing::debug;
use crate::validation::{
AsyncValidate, AsyncValidateError, start_date_time_lies_before_end_date_time,
};
use super::{Assignment, Availability, Event, Function, Role, User};
pub struct AssignmentChangeset {
pub function: Function,
pub time: (NaiveDateTime, NaiveDateTime),
}
pub struct AssignmentContext<'a> {
pub pool: &'a PgPool,
pub user: &'a User,
pub event_id: i32,
pub availability_id: i32,
}
impl<'a> AsyncValidate<'a> for AssignmentChangeset {
type Context = AssignmentContext<'a>;
async fn validate_with_context(
&self,
context: &'a Self::Context,
) -> Result<(), crate::validation::AsyncValidateError> {
let Some(availability) =
Availability::read_by_id_including_user(context.pool, context.availability_id).await?
else {
return Err(AsyncValidateError::new(
"Angegebener Verfügbarkeit des Nutzers existiert nicht.",
));
};
let Some(event) =
Event::read_by_id_including_location(context.pool, context.event_id).await?
else {
return Err(AsyncValidateError::new(
"Angegebene Veranstaltung existiert nicht.",
));
};
user_is_admin_or_area_manager_of_event_area(context.user, &event)?;
availability_user_inside_event_area(&availability, &event)?;
available_time_fits(&self.time, &availability)?;
start_date_time_lies_before_end_date_time(&self.time.0, &self.time.1)?;
availability_not_already_assigned(&self.time, &availability, &event, context.pool).await?;
user_of_availability_has_function(&self.function, &availability)?;
event_has_free_slot_for_function(&self.function, &availability, &event, context.pool)
.await?;
Ok(())
}
}
fn availability_user_inside_event_area(
availability: &Availability,
event: &Event,
) -> Result<(), AsyncValidateError> {
let user = availability.user.as_ref().unwrap();
let location = event.location.as_ref().unwrap();
if user.area_id != location.area_id {
return Err(AsyncValidateError::new(
"Nutzer der Verfügbarkeit ist nicht im gleichen Bereich wie der Ort der Veranstaltung.",
));
}
Ok(())
}
fn available_time_fits(
value: &(NaiveDateTime, NaiveDateTime),
availability: &Availability,
) -> Result<(), AsyncValidateError> {
if value.0 < availability.start || value.1 > availability.end {
return Err(AsyncValidateError::new(
"Die verfügbar gemachte Zeit passt nicht zu der zugewiesenen Zeit für die Veranstaltung.",
));
}
Ok(())
}
fn user_of_availability_has_function(
value: &Function,
availability: &Availability,
) -> Result<(), AsyncValidateError> {
let user_function = &availability.user.as_ref().unwrap().function;
if !user_function.contains(value) {
return Err(AsyncValidateError::new(
"Nutzer der Verfügbarkeit besitzt nicht die benötigte Funktion um für diese Position zugewiesen zu werden.",
));
}
Ok(())
}
async fn event_has_free_slot_for_function(
value: &Function,
availability: &Availability,
event: &Event,
pool: &PgPool,
) -> Result<(), AsyncValidateError> {
debug!(?event, "event parameter");
let assignments_for_event: Vec<Assignment> = Assignment::read_all_by_event(pool, event.id)
.await?
.into_iter()
.filter(|a| a.availability_id != availability.id)
.collect();
debug!(?assignments_for_event, "existing assignments for event");
let assignments_with_function = assignments_for_event
.iter()
.filter(|a| a.function == *value)
.count();
debug!(
assignments_with_function,
"amount of existing assignments for function"
);
if match *value {
Function::Posten => assignments_with_function >= event.amount_of_posten as usize,
Function::Fuehrungsassistent => {
event.voluntary_fuehrungsassistent && assignments_with_function >= 1
}
Function::Wachhabender => event.voluntary_wachhabender && assignments_with_function >= 1,
} {
return Err(AsyncValidateError::new(
"Veranstaltung hat bereits genug Zuweisungen für diese Funktion.",
));
}
Ok(())
}
async fn availability_not_already_assigned(
time: &(NaiveDateTime, NaiveDateTime),
availability: &Availability,
event: &Event,
pool: &PgPool,
) -> Result<(), AsyncValidateError> {
let list: Vec<Assignment> = Assignment::read_all_by_availability(pool, availability.id)
.await?
.into_iter()
.filter(|a| a.event_id != event.id)
.collect();
let has_start_time_during_assignment = |a: &Assignment| a.start >= time.0 && a.start <= time.1;
let has_end_time_during_assignment = |a: &Assignment| a.end >= time.0 && a.end <= time.1;
if list
.iter()
.any(|a| has_start_time_during_assignment(a) || has_end_time_during_assignment(a))
{
return Err(AsyncValidateError::new(
"Die Verfügbarkeit des Nutzers wurde bereits zu einer anderen Veranstaltung zugewiesen.",
));
}
Ok(())
}
// TODO: maybe merge with event changeset
fn user_is_admin_or_area_manager_of_event_area(
user: &User,
event: &Event,
) -> Result<(), AsyncValidateError> {
let user_is_admin = user.role == Role::Admin;
let user_is_area_manager_event_area =
user.role == Role::AreaManager && user.area_id == event.location.as_ref().unwrap().area_id;
if !user_is_admin && !user_is_area_manager_event_area {
return Err(AsyncValidateError::new(
"Du verfügst nicht über die Berechtigung, Zuweisungen zu Veranstaltungen vorzunehmen.",
));
}
Ok(())
}

View File

@ -1,5 +1,5 @@
use chrono::{NaiveDate, NaiveDateTime}; use chrono::{NaiveDate, NaiveDateTime};
use sqlx::{query, PgPool}; use sqlx::{PgPool, query};
use super::{Area, AvailabilityChangeset, Result, Role, User, UserFunction}; use super::{Area, AvailabilityChangeset, Result, Role, User, UserFunction};
@ -341,6 +341,89 @@ impl Availability {
Ok(availabilities) Ok(availabilities)
} }
pub async fn read_by_user_and_daterange(
pool: &PgPool,
user_id: i32,
date_range: (&NaiveDate, &NaiveDate),
) -> Result<Vec<Availability>> {
let records = query!(
r##"
SELECT
availability.id,
availability.userId,
availability.startTimestamp,
availability.endTimestamp,
availability.comment
FROM availability
WHERE availability.userId = $1
AND availability.starttimestamp::date >= $2
AND availability.endtimestamp::date <= $3;
"##,
user_id,
date_range.0,
date_range.1
)
.fetch_all(pool)
.await?;
let availabilities = records
.iter()
.map(|r| Availability {
id: r.id,
user_id: r.userid,
user: None,
start: r.starttimestamp.naive_utc(),
end: r.endtimestamp.naive_utc(),
comment: r.comment.clone(),
})
.collect();
Ok(availabilities)
}
pub async fn find_adjacent_by_time_for_user(
pool: &PgPool,
start: &NaiveDateTime,
end: &NaiveDateTime,
user: i32,
availability_to_ignore: Option<i32>,
) -> Result<Option<Availability>> {
let records = query!(
r##"
SELECT
availability.id,
availability.userId,
availability.startTimestamp,
availability.endTimestamp,
availability.comment
FROM availability
WHERE availability.userId = $1
AND (availability.endtimestamp = $2
OR availability.starttimestamp = $3)
AND (availability.id <> $4 OR $4 IS NULL);
"##,
user,
start.and_utc(),
end.and_utc(),
availability_to_ignore
)
.fetch_all(pool) // possible to find up to two availabilities (upper and lower), for now we only pick one and extend it
.await?;
let adjacent_avaialability = records.first().and_then(|r| {
Some(Availability {
id: r.id,
user_id: r.userid,
user: None,
start: r.starttimestamp.naive_utc(),
end: r.endtimestamp.naive_utc(),
comment: r.comment.clone(),
})
});
Ok(adjacent_avaialability)
}
pub async fn update(pool: &PgPool, id: i32, changeset: AvailabilityChangeset) -> Result<()> { pub async fn update(pool: &PgPool, id: i32, changeset: AvailabilityChangeset) -> Result<()> {
query!( query!(
"UPDATE availability SET startTimestamp = $1, endTimestamp = $2, comment = $3 WHERE id = $4", "UPDATE availability SET startTimestamp = $1, endTimestamp = $2, comment = $3 WHERE id = $4",

View File

@ -0,0 +1,152 @@
use chrono::{Days, NaiveDateTime};
use sqlx::PgPool;
use super::Availability;
use crate::{
END_OF_DAY, START_OF_DAY,
models::Assignment,
validation::{AsyncValidate, AsyncValidateError, start_date_time_lies_before_end_date_time},
};
pub struct AvailabilityChangeset {
pub time: (NaiveDateTime, NaiveDateTime),
pub comment: Option<String>,
}
pub struct AvailabilityContext<'a> {
pub pool: &'a PgPool,
pub user_id: i32,
pub availability: Option<i32>,
}
impl<'a> AsyncValidate<'a> for AvailabilityChangeset {
type Context = AvailabilityContext<'a>;
async fn validate_with_context(
&self,
context: &'a Self::Context,
) -> Result<(), AsyncValidateError> {
let mut existing_availabilities =
Availability::read_by_user_and_date(context.pool, context.user_id, &self.time.0.date())
.await?;
start_date_time_lies_before_end_date_time(&self.time.0, &self.time.1)?;
if let Some(availability) = context.availability {
existing_availabilities = existing_availabilities
.into_iter()
.filter(|a| a.id != availability)
.collect();
time_is_not_already_assigned(&self.time, availability, context.pool).await?;
}
if !existing_availabilities.is_empty() {
time_is_not_already_made_available(&self.time, &existing_availabilities)?;
}
Ok(())
}
}
fn time_is_not_already_made_available(
(start, end): &(NaiveDateTime, NaiveDateTime),
existing_availabilities: &Vec<Availability>,
) -> Result<(), AsyncValidateError> {
let free_slots = find_free_date_time_slots(existing_availabilities);
if free_slots.is_empty() {
return Err(AsyncValidateError::new(
"Verfügbarkeit kann nicht erstellt werden, da bereits alle Zeiträume verfügbar gemacht wurden.",
));
}
let free_block_found_for_start = free_slots.iter().any(|s| s.0 <= *start && s.1 >= *start);
let free_block_found_for_end = free_slots.iter().any(|s| s.0 <= *end && s.1 >= *end);
let is_already_present_as_is = existing_availabilities
.iter()
.any(|a| a.start == *start && a.end == a.end);
if !free_block_found_for_start || !free_block_found_for_end || is_already_present_as_is {
return Err(AsyncValidateError::new(
"Verfügbarkeit kann nicht erstellt werden, da eine vorhandene Verfügbarkeit überschnitten würde.",
));
}
Ok(())
}
async fn time_is_not_already_assigned(
(start, end): &(NaiveDateTime, NaiveDateTime),
availability: i32,
pool: &PgPool,
) -> Result<(), AsyncValidateError> {
let existing_assignments = Assignment::read_all_by_availability(pool, availability).await?;
for a in existing_assignments {
if a.start < *start || a.end > *end {
return Err(AsyncValidateError::new(
"Verfügbarkeitszeit kann nicht verkleinert werden, da bereits eine Planung für diese Zeit existiert.",
));
}
}
Ok(())
}
pub fn find_free_date_time_slots(
availabilities: &[Availability],
) -> Vec<(NaiveDateTime, NaiveDateTime)> {
if availabilities.is_empty() {
return Vec::new();
}
let mut times: Vec<(NaiveDateTime, NaiveDateTime)> =
availabilities.iter().map(|a| (a.start, a.end)).collect();
times.sort();
let mut changed = true;
while changed {
changed = false;
for i in 0..(times.len() - 1) {
let b = times[i + 1];
let a = times.get_mut(i).unwrap();
if a.1 == b.0 {
a.1 = b.1;
times.remove(i + 1);
changed = true;
break;
}
}
}
//println!("zeiten unified {times:?}");
let date = times.first().unwrap().0.date();
let date_next_day = date.checked_add_days(Days::new(1)).unwrap();
let start_of_day = date.and_time(START_OF_DAY);
let end_of_day = date_next_day.and_time(END_OF_DAY);
// now times contains unified list of existing availabilities -> now calculate the "inverse"
let mut available_slots = Vec::new();
let start = times.first().unwrap();
if start.0 != start_of_day {
available_slots.push((start_of_day, start.0));
}
let mut iterator = times.iter().peekable();
while let Some(a) = iterator.next() {
if let Some(b) = iterator.peek() {
available_slots.push((a.1, b.0));
}
}
let end = times.last().unwrap();
if end.1 != end_of_day {
available_slots.push((end.1, end_of_day));
}
available_slots
}

View File

@ -1,7 +1,7 @@
use chrono::{NaiveDate, NaiveDateTime}; use chrono::{NaiveDate, NaiveDateTime};
use sqlx::{query, PgPool}; use sqlx::{PgPool, query};
use super::{event_changeset::EventChangeset, Clothing, Location, Result}; use super::{Clothing, EventChangeset, Location, Result};
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct Event { pub struct Event {
@ -95,6 +95,73 @@ impl Event {
Ok(events) Ok(events)
} }
pub async fn read_all_by_daterange_and_area_including_location(
pool: &PgPool,
date_range: (&NaiveDate, &NaiveDate),
area_id: i32,
) -> Result<Vec<Event>> {
let records = query!(
r#"
SELECT
event.id AS eventId,
event.startTimestamp,
event.endTimestamp,
event.name,
event.locationId,
event.voluntaryWachhabender,
event.voluntaryFuehrungsassistent,
event.amountOfPosten,
event.clothing,
event.canceled,
event.note,
location.id,
location.name AS locationName,
location.areaId AS locationAreaId,
clothing.id AS clothingId,
clothing.name AS clothingName
FROM event
JOIN location ON event.locationId = location.id
JOIN clothing ON event.clothing = clothing.id
WHERE starttimestamp::date >= $1
AND starttimestamp::date <= $2
AND location.areaId = $3;
"#,
date_range.0,
date_range.1,
area_id
)
.fetch_all(pool)
.await?;
let events = records
.into_iter()
.map(|record| Event {
id: record.eventid,
start: record.starttimestamp.naive_utc(),
end: record.endtimestamp.naive_utc(),
name: record.name.to_string(),
location_id: record.locationid,
location: Some(Location {
id: record.locationid,
name: record.locationname.to_string(),
area_id: record.locationareaid,
area: None,
}),
voluntary_wachhabender: record.voluntarywachhabender,
voluntary_fuehrungsassistent: record.voluntaryfuehrungsassistent,
amount_of_posten: record.amountofposten,
clothing: Clothing {
id: record.clothingid,
name: record.clothingname,
},
canceled: record.canceled,
note: record.note,
})
.collect();
Ok(events)
}
pub async fn read_by_id_including_location(pool: &PgPool, id: i32) -> Result<Option<Event>> { pub async fn read_by_id_including_location(pool: &PgPool, id: i32) -> Result<Option<Event>> {
let record = query!( let record = query!(
r#" r#"

View File

@ -0,0 +1,248 @@
use chrono::Days;
use chrono::NaiveDate;
use chrono::NaiveDateTime;
#[cfg(feature = "test-helpers")]
use fake::{Fake, Faker};
use sqlx::PgPool;
use crate::END_OF_DAY;
use crate::START_OF_DAY;
use crate::models::Assignment;
use crate::models::Availability;
use crate::models::Event;
use crate::models::Function;
use crate::models::Location;
use crate::models::Role;
use crate::models::User;
use crate::validation::AsyncValidate;
use crate::validation::AsyncValidateError;
use crate::validation::start_date_time_lies_before_end_date_time;
pub struct EventChangeset {
pub time: (NaiveDateTime, NaiveDateTime),
pub name: String,
pub location_id: i32,
pub voluntary_wachhabender: bool,
pub voluntary_fuehrungsassistent: bool,
pub amount_of_posten: i16,
pub clothing: i32,
pub note: Option<String>,
}
pub struct EventContext<'a> {
pub pool: &'a PgPool,
pub event: Option<i32>,
pub user: &'a User,
}
impl<'a> AsyncValidate<'a> for EventChangeset {
type Context = EventContext<'a>;
async fn validate_with_context(
&self,
context: &'a Self::Context,
) -> Result<(), AsyncValidateError> {
let Some(location) = Location::read_by_id(context.pool, self.location_id).await? else {
return Err(AsyncValidateError::new(
"Der angegebene Veranstaltungsort existiert nicht.",
));
};
user_is_admin_or_area_manager_of_event_location(context.user, &location)?;
start_date_time_lies_before_end_date_time(&self.time.0, &self.time.1)?;
let mut minimum_amount_of_posten = 0_i16;
if let Some(id) = context.event {
let event = Event::read_by_id_including_location(context.pool, id)
.await?
.unwrap();
let assignments_for_event =
Assignment::read_all_by_event(context.pool, event.id).await?;
minimum_amount_of_posten = assignments_for_event
.iter()
.filter(|a| a.function == Function::Posten)
.count() as i16;
time_can_be_extended_if_edit(&self.time, &event, &assignments_for_event, context.pool)
.await?;
date_unchanged_if_edit(&self.time, &event.start.date())?;
can_unset_wachhabender(&self.voluntary_wachhabender, &assignments_for_event)?;
can_unset_fuehrungsassistent(
&self.voluntary_fuehrungsassistent,
&assignments_for_event,
)?;
if location.area_id != event.location.unwrap().area_id {
return Err(AsyncValidateError::new(
"Veranstaltungsort kann nicht zu einem Ort außerhalb des initialen Bereichs geändert werden.",
));
}
}
if !(minimum_amount_of_posten..=100).contains(&self.amount_of_posten) {
return Err(AsyncValidateError::new(
"Die Anzahl der Posten darf nicht kleiner als die Anzahl der bereits geplanten Posten und maximal 100 sein.",
));
}
Ok(())
}
}
fn user_is_admin_or_area_manager_of_event_location(
user: &User,
location: &Location,
) -> Result<(), AsyncValidateError> {
if user.role != Role::Admin
&& !(user.role == Role::AreaManager && user.area_id == location.area_id)
{
return Err(AsyncValidateError::new(
"Du verfügst nicht über die Berechtigung, diese Veranstaltung zu erstellen bzw. zu bearbeiten.",
));
}
Ok(())
}
fn date_unchanged_if_edit(
time: &(NaiveDateTime, NaiveDateTime),
date_in_db: &NaiveDate,
) -> Result<(), AsyncValidateError> {
if time.0.date() != *date_in_db {
return Err(AsyncValidateError::new("event date can't be changed"));
}
Ok(())
}
async fn time_can_be_extended_if_edit(
time: &(NaiveDateTime, NaiveDateTime),
event: &Event,
assignments_for_event: &Vec<Assignment>,
pool: &PgPool,
) -> Result<(), AsyncValidateError> {
let start = event.start.date();
let end = event.start.date().checked_add_days(Days::new(1)).unwrap();
let mut common_time = (start.and_time(START_OF_DAY), end.and_time(END_OF_DAY));
for assignment in assignments_for_event {
let availability = Availability::read_by_id(pool, assignment.availability_id)
.await?
.unwrap();
let all_assignments =
Assignment::read_all_by_availability(pool, assignment.availability_id).await?;
if all_assignments.len() == 1 {
if availability.start > common_time.0 {
common_time.0 = availability.start;
}
if availability.end < common_time.1 {
common_time.1 = availability.end;
}
} else {
let mut slots = vec![(availability.start, availability.end)];
for a in all_assignments
.iter()
.filter(|x| x.event_id != assignment.event_id)
{
let (fit, rest) = slots
.into_iter()
.partition(|s| s.0 >= a.start && s.1 <= a.end);
slots = rest;
let fit = fit.first().unwrap();
if fit.0 != a.start {
slots.push((fit.0, a.start));
}
if fit.1 != a.end {
slots.push((a.end, fit.1));
}
}
let slot = slots
.into_iter()
.find(|s| s.0 >= assignment.start && s.1 <= assignment.end)
.unwrap();
if slot.0 > common_time.0 {
common_time.0 = slot.0;
}
if slot.1 < common_time.1 {
common_time.1 = slot.1;
}
}
}
let old_start_time = common_time.0;
let new_start_time = time.0;
let old_end_time = common_time.1;
let new_end_time = time.1;
if new_start_time < old_start_time {
return Err(AsyncValidateError::new(
"starttime lies outside of available time for assigned people",
));
}
if new_end_time > old_end_time {
return Err(AsyncValidateError::new(
"endtime lies ouside of available time for assigned people",
));
}
Ok(())
}
fn can_unset_fuehrungsassistent(
fuehrungsassistent_required: &bool,
assignments_for_event: &Vec<Assignment>,
) -> Result<(), AsyncValidateError> {
if !*fuehrungsassistent_required
&& assignments_for_event
.iter()
.any(|a| a.function == Function::Fuehrungsassistent)
{
return Err(AsyncValidateError::new(
"fuehrungsassistent can't be set to not by ff, because a person is already assigned",
));
}
Ok(())
}
fn can_unset_wachhabender(
voluntary_wachhabender: &bool,
assignments_for_event: &Vec<Assignment>,
) -> Result<(), AsyncValidateError> {
if !*voluntary_wachhabender
&& assignments_for_event
.iter()
.any(|a| a.function == Function::Wachhabender)
{
return Err(AsyncValidateError::new(
"wachhabender can't be set to not by ff, because a person is already assigned",
));
}
Ok(())
}
#[cfg(feature = "test-helpers")]
impl EventChangeset {
pub fn create_for_test(start: NaiveDateTime, end: NaiveDateTime) -> EventChangeset {
let changeset = EventChangeset {
time: (start, end),
name: Faker.fake(),
location_id: 1,
voluntary_wachhabender: true,
voluntary_fuehrungsassistent: true,
amount_of_posten: 5,
clothing: 1,
note: None,
};
changeset
}
}

View File

@ -1,12 +1,11 @@
use chrono::{NaiveDate, NaiveDateTime}; use chrono::{NaiveDate, NaiveDateTime};
use sqlx::{ use sqlx::{
PgPool,
postgres::{PgHasArrayType, PgTypeInfo}, postgres::{PgHasArrayType, PgTypeInfo},
query, PgPool, query,
}; };
use crate::utils::ApplicationError; use super::{Function, Result};
use super::Function;
pub struct ExportEventRow { pub struct ExportEventRow {
pub start_timestamp: NaiveDateTime, pub start_timestamp: NaiveDateTime,
@ -38,7 +37,7 @@ impl ExportEventRow {
pool: &PgPool, pool: &PgPool,
time: (NaiveDate, NaiveDate), time: (NaiveDate, NaiveDate),
area: i32, area: i32,
) -> Result<Vec<ExportEventRow>, ApplicationError> { ) -> Result<Vec<ExportEventRow>> {
let rows = query!( let rows = query!(
"select "select
event.starttimestamp, event.starttimestamp,

View File

@ -1,8 +1,9 @@
use std::fmt::Display; use std::fmt::Display;
use crate::utils::ApplicationError;
use serde::Serialize; use serde::Serialize;
use crate::UnsupportedEnumValue;
#[derive(sqlx::Type, Debug, Clone, Copy, PartialEq, Eq, Serialize, PartialOrd, Ord)] #[derive(sqlx::Type, Debug, Clone, Copy, PartialEq, Eq, Serialize, PartialOrd, Ord)]
#[sqlx(type_name = "function", rename_all = "lowercase")] #[sqlx(type_name = "function", rename_all = "lowercase")]
pub enum Function { pub enum Function {
@ -22,16 +23,16 @@ impl Display for Function {
} }
impl TryFrom<u8> for Function { impl TryFrom<u8> for Function {
type Error = ApplicationError; type Error = UnsupportedEnumValue;
fn try_from(value: u8) -> Result<Self, Self::Error> { fn try_from(value: u8) -> Result<Self, Self::Error> {
match value { match value {
1 => Ok(Function::Posten), 1 => Ok(Function::Posten),
5 => Ok(Function::Fuehrungsassistent), 5 => Ok(Function::Fuehrungsassistent),
10 => Ok(Function::Wachhabender), 10 => Ok(Function::Wachhabender),
_ => Err(ApplicationError::UnsupportedEnumValue { _ => Err(UnsupportedEnumValue {
value: value.to_string(), value,
enum_name: String::from("Function"), enum_name: "Function",
}), }),
} }
} }

View File

@ -1,8 +1,6 @@
use sqlx::{query, PgPool}; use sqlx::{PgPool, query};
use super::Area; use super::{Area, Result};
use super::Result;
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct Location { pub struct Location {

Some files were not shown because too many files have changed in this diff Show More