Cleanup db/schedule namespace
This commit is contained in:
parent
09c50411d1
commit
effd3f3b18
8 changed files with 155 additions and 209 deletions
|
@ -5,7 +5,7 @@ use sqlx::{Pool, Sqlite};
|
||||||
|
|
||||||
use crate::db::errors::DatabaseError;
|
use crate::db::errors::DatabaseError;
|
||||||
use crate::db::model_utils::Period;
|
use crate::db::model_utils::Period;
|
||||||
use crate::db::models::{Periods, Schedule};
|
use crate::db::schedules::{Periods, Schedule};
|
||||||
use crate::types::EmgauwaUid;
|
use crate::types::EmgauwaUid;
|
||||||
|
|
||||||
pub mod errors;
|
pub mod errors;
|
||||||
|
@ -29,7 +29,7 @@ async fn init_schedule(
|
||||||
periods: Periods,
|
periods: Periods,
|
||||||
) -> Result<(), DatabaseError> {
|
) -> Result<(), DatabaseError> {
|
||||||
trace!("Initializing schedule {:?}", name);
|
trace!("Initializing schedule {:?}", name);
|
||||||
match schedules::get_schedule_by_uid(&mut pool.acquire().await.unwrap(), uid).await {
|
match Schedule::get_by_uid(&mut pool.acquire().await.unwrap(), uid).await {
|
||||||
Ok(_) => Ok(()),
|
Ok(_) => Ok(()),
|
||||||
Err(err) => match err {
|
Err(err) => match err {
|
||||||
DatabaseError::NotFound => {
|
DatabaseError::NotFound => {
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
use crate::db::schedules::Periods;
|
||||||
use chrono::{NaiveTime, Timelike};
|
use chrono::{NaiveTime, Timelike};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use sqlx::database::HasArguments;
|
use sqlx::database::HasArguments;
|
||||||
|
@ -6,8 +7,6 @@ use sqlx::error::BoxDynError;
|
||||||
use sqlx::sqlite::{SqliteTypeInfo, SqliteValueRef};
|
use sqlx::sqlite::{SqliteTypeInfo, SqliteValueRef};
|
||||||
use sqlx::{Decode, Encode, Sqlite, Type};
|
use sqlx::{Decode, Encode, Sqlite, Type};
|
||||||
|
|
||||||
use crate::db::models::Periods;
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)]
|
#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)]
|
||||||
pub struct Period {
|
pub struct Period {
|
||||||
#[serde(with = "period_format")]
|
#[serde(with = "period_format")]
|
||||||
|
@ -52,54 +51,6 @@ impl Period {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//impl ToSql<Binary, Sqlite> for Periods
|
|
||||||
//where
|
|
||||||
// Vec<u8>: ToSql<Binary, Sqlite>,
|
|
||||||
//{
|
|
||||||
// fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Sqlite>) -> serialize::Result {
|
|
||||||
// let periods_u8: Vec<u8> = self
|
|
||||||
// .0
|
|
||||||
// .iter()
|
|
||||||
// .flat_map(|period| {
|
|
||||||
// let vec = vec![
|
|
||||||
// period.start.hour() as u8,
|
|
||||||
// period.start.minute() as u8,
|
|
||||||
// period.end.hour() as u8,
|
|
||||||
// period.end.minute() as u8,
|
|
||||||
// ];
|
|
||||||
// vec
|
|
||||||
// })
|
|
||||||
// .collect();
|
|
||||||
//
|
|
||||||
// out.set_value(periods_u8);
|
|
||||||
//
|
|
||||||
// Ok(IsNull::No)
|
|
||||||
// }
|
|
||||||
//}
|
|
||||||
//
|
|
||||||
//impl<DB> FromSql<Binary, DB> for Periods
|
|
||||||
//where
|
|
||||||
// DB: diesel::backend::Backend,
|
|
||||||
// Vec<u8>: FromSql<Binary, DB>,
|
|
||||||
//{
|
|
||||||
// fn from_sql(bytes: DB::RawValue<'_>) -> deserialize::Result<Self> {
|
|
||||||
// let blob: Vec<u8> = Vec::from_sql(bytes).unwrap();
|
|
||||||
//
|
|
||||||
// let mut vec = Vec::new();
|
|
||||||
// for i in (3..blob.len()).step_by(4) {
|
|
||||||
// let start_val_h: u32 = blob[i - 3] as u32;
|
|
||||||
// let start_val_m: u32 = blob[i - 2] as u32;
|
|
||||||
// let end_val_h: u32 = blob[i - 1] as u32;
|
|
||||||
// let end_val_m: u32 = blob[i] as u32;
|
|
||||||
// vec.push(Period {
|
|
||||||
// start: NaiveTime::from_hms_opt(start_val_h, start_val_m, 0).unwrap(),
|
|
||||||
// end: NaiveTime::from_hms_opt(end_val_h, end_val_m, 0).unwrap(),
|
|
||||||
// });
|
|
||||||
// }
|
|
||||||
// Ok(Periods(vec))
|
|
||||||
// }
|
|
||||||
//}
|
|
||||||
|
|
||||||
impl Type<Sqlite> for Periods {
|
impl Type<Sqlite> for Periods {
|
||||||
fn type_info() -> SqliteTypeInfo {
|
fn type_info() -> SqliteTypeInfo {
|
||||||
<&[u8] as Type<Sqlite>>::type_info()
|
<&[u8] as Type<Sqlite>>::type_info()
|
||||||
|
|
|
@ -1,7 +1,4 @@
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::Serialize;
|
||||||
|
|
||||||
use crate::db::model_utils::Period;
|
|
||||||
use crate::types::EmgauwaUid;
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize)]
|
#[derive(Debug, Serialize)]
|
||||||
pub struct Relay {
|
pub struct Relay {
|
||||||
|
@ -10,19 +7,6 @@ pub struct Relay {
|
||||||
// TODO
|
// TODO
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Clone)]
|
|
||||||
pub struct Schedule {
|
|
||||||
#[serde(skip)]
|
|
||||||
pub id: i64,
|
|
||||||
#[serde(rename(serialize = "id"))]
|
|
||||||
pub uid: EmgauwaUid,
|
|
||||||
pub name: String,
|
|
||||||
pub periods: Periods,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)]
|
|
||||||
pub struct Periods(pub Vec<Period>);
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Clone)]
|
#[derive(Debug, Serialize, Clone)]
|
||||||
pub struct Tag {
|
pub struct Tag {
|
||||||
pub id: i64,
|
pub id: i64,
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
use serde_derive::{Deserialize, Serialize};
|
||||||
use std::borrow::Borrow;
|
use std::borrow::Borrow;
|
||||||
use std::ops::DerefMut;
|
use std::ops::DerefMut;
|
||||||
|
|
||||||
|
@ -5,134 +6,148 @@ use sqlx::pool::PoolConnection;
|
||||||
use sqlx::Sqlite;
|
use sqlx::Sqlite;
|
||||||
|
|
||||||
use crate::db::errors::DatabaseError;
|
use crate::db::errors::DatabaseError;
|
||||||
|
use crate::db::model_utils::Period;
|
||||||
use crate::db::models::*;
|
use crate::db::models::*;
|
||||||
use crate::db::tag::{create_junction_tag_schedule, create_tag};
|
use crate::db::tag::{create_junction_tag_schedule, create_tag};
|
||||||
use crate::types::EmgauwaUid;
|
use crate::types::EmgauwaUid;
|
||||||
|
|
||||||
pub async fn get_schedule_tags(
|
#[derive(Debug, Serialize, Clone)]
|
||||||
conn: &mut PoolConnection<Sqlite>,
|
pub struct Schedule {
|
||||||
schedule: &Schedule,
|
#[serde(skip)]
|
||||||
) -> Result<Vec<String>, DatabaseError> {
|
pub id: i64,
|
||||||
Ok(sqlx::query_scalar!("SELECT tag FROM tags INNER JOIN junction_tag ON junction_tag.tag_id = tags.id WHERE junction_tag.schedule_id = ?", schedule.id)
|
#[serde(rename(serialize = "id"))]
|
||||||
.fetch_all(conn.deref_mut())
|
pub uid: EmgauwaUid,
|
||||||
.await?)
|
pub name: String,
|
||||||
|
pub periods: Periods,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_schedules(
|
#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)]
|
||||||
conn: &mut PoolConnection<Sqlite>,
|
pub struct Periods(pub Vec<Period>);
|
||||||
) -> Result<Vec<Schedule>, DatabaseError> {
|
|
||||||
Ok(sqlx::query_as!(Schedule, "SELECT * FROM schedules")
|
|
||||||
.fetch_all(conn.deref_mut())
|
|
||||||
.await?)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_schedule_by_uid(
|
impl Schedule {
|
||||||
conn: &mut PoolConnection<Sqlite>,
|
pub async fn get_all(
|
||||||
filter_uid: &EmgauwaUid,
|
conn: &mut PoolConnection<Sqlite>,
|
||||||
) -> Result<Schedule, DatabaseError> {
|
) -> Result<Vec<Schedule>, DatabaseError> {
|
||||||
sqlx::query_as!(
|
Ok(sqlx::query_as!(Schedule, "SELECT * FROM schedules")
|
||||||
Schedule,
|
.fetch_all(conn.deref_mut())
|
||||||
"SELECT * FROM schedules WHERE uid = ?",
|
.await?)
|
||||||
filter_uid
|
}
|
||||||
)
|
|
||||||
.fetch_optional(conn.deref_mut())
|
|
||||||
.await
|
|
||||||
.map(|s| s.ok_or(DatabaseError::NotFound))?
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_schedules_by_tag(
|
pub async fn get_by_uid(
|
||||||
conn: &mut PoolConnection<Sqlite>,
|
conn: &mut PoolConnection<Sqlite>,
|
||||||
tag: &Tag,
|
filter_uid: &EmgauwaUid,
|
||||||
) -> Result<Vec<Schedule>, DatabaseError> {
|
) -> Result<Schedule, DatabaseError> {
|
||||||
Ok(sqlx::query_as!(Schedule, "SELECT schedule.* FROM schedules AS schedule INNER JOIN junction_tag ON junction_tag.schedule_id = schedule.id WHERE junction_tag.tag_id = ?", tag.id)
|
sqlx::query_as!(
|
||||||
.fetch_all(conn.deref_mut())
|
Schedule,
|
||||||
.await?)
|
"SELECT * FROM schedules WHERE uid = ?",
|
||||||
}
|
filter_uid
|
||||||
|
)
|
||||||
pub async fn delete_schedule_by_uid(
|
.fetch_optional(conn.deref_mut())
|
||||||
conn: &mut PoolConnection<Sqlite>,
|
|
||||||
filter_uid: EmgauwaUid,
|
|
||||||
) -> Result<(), DatabaseError> {
|
|
||||||
let filter_uid = match filter_uid {
|
|
||||||
EmgauwaUid::Off => Err(DatabaseError::Protected),
|
|
||||||
EmgauwaUid::On => Err(DatabaseError::Protected),
|
|
||||||
EmgauwaUid::Any(_) => Ok(filter_uid),
|
|
||||||
}?;
|
|
||||||
|
|
||||||
sqlx::query!("DELETE FROM schedules WHERE uid = ?", filter_uid)
|
|
||||||
.execute(conn.deref_mut())
|
|
||||||
.await
|
.await
|
||||||
.map(|res| match res.rows_affected() {
|
.map(|s| s.ok_or(DatabaseError::NotFound))?
|
||||||
0 => Err(DatabaseError::DeleteError),
|
}
|
||||||
_ => Ok(()),
|
|
||||||
})?
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn create_schedule(
|
pub async fn get_by_tag(
|
||||||
conn: &mut PoolConnection<Sqlite>,
|
conn: &mut PoolConnection<Sqlite>,
|
||||||
new_name: &str,
|
tag: &Tag,
|
||||||
new_periods: &Periods,
|
) -> Result<Vec<Schedule>, DatabaseError> {
|
||||||
) -> Result<Schedule, DatabaseError> {
|
Ok(sqlx::query_as!(Schedule, "SELECT schedule.* FROM schedules AS schedule INNER JOIN junction_tag ON junction_tag.schedule_id = schedule.id WHERE junction_tag.tag_id = ?", tag.id)
|
||||||
let uid = EmgauwaUid::default();
|
.fetch_all(conn.deref_mut())
|
||||||
sqlx::query_as!(
|
.await?)
|
||||||
Schedule,
|
}
|
||||||
"INSERT INTO schedules (uid, name, periods) VALUES (?, ?, ?) RETURNING *",
|
|
||||||
uid,
|
|
||||||
new_name,
|
|
||||||
new_periods,
|
|
||||||
)
|
|
||||||
.fetch_optional(conn.deref_mut())
|
|
||||||
.await?
|
|
||||||
.ok_or(DatabaseError::InsertGetError)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn update_schedule(
|
pub async fn delete_by_uid(
|
||||||
conn: &mut PoolConnection<Sqlite>,
|
conn: &mut PoolConnection<Sqlite>,
|
||||||
schedule: &Schedule,
|
filter_uid: EmgauwaUid,
|
||||||
new_name: &str,
|
) -> Result<(), DatabaseError> {
|
||||||
new_periods: &Periods,
|
let filter_uid = match filter_uid {
|
||||||
) -> Result<Schedule, DatabaseError> {
|
EmgauwaUid::Off => Err(DatabaseError::Protected),
|
||||||
// overwrite periods on protected schedules
|
EmgauwaUid::On => Err(DatabaseError::Protected),
|
||||||
let new_periods = match schedule.uid {
|
EmgauwaUid::Any(_) => Ok(filter_uid),
|
||||||
EmgauwaUid::Off | EmgauwaUid::On => schedule.periods.borrow(),
|
}?;
|
||||||
EmgauwaUid::Any(_) => new_periods,
|
|
||||||
};
|
|
||||||
|
|
||||||
sqlx::query!(
|
sqlx::query!("DELETE FROM schedules WHERE uid = ?", filter_uid)
|
||||||
"UPDATE schedules SET name = ?, periods = ? WHERE id = ?",
|
.execute(conn.deref_mut())
|
||||||
new_name,
|
.await
|
||||||
new_periods,
|
.map(|res| match res.rows_affected() {
|
||||||
schedule.id,
|
0 => Err(DatabaseError::DeleteError),
|
||||||
)
|
_ => Ok(()),
|
||||||
.execute(conn.deref_mut())
|
})?
|
||||||
.await?;
|
}
|
||||||
|
|
||||||
get_schedule_by_uid(conn, &schedule.uid).await
|
pub async fn create(
|
||||||
}
|
conn: &mut PoolConnection<Sqlite>,
|
||||||
|
new_name: &str,
|
||||||
|
new_periods: &Periods,
|
||||||
|
) -> Result<Schedule, DatabaseError> {
|
||||||
|
let uid = EmgauwaUid::default();
|
||||||
|
sqlx::query_as!(
|
||||||
|
Schedule,
|
||||||
|
"INSERT INTO schedules (uid, name, periods) VALUES (?, ?, ?) RETURNING *",
|
||||||
|
uid,
|
||||||
|
new_name,
|
||||||
|
new_periods,
|
||||||
|
)
|
||||||
|
.fetch_optional(conn.deref_mut())
|
||||||
|
.await?
|
||||||
|
.ok_or(DatabaseError::InsertGetError)
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn set_schedule_tags(
|
pub async fn update(
|
||||||
conn: &mut PoolConnection<Sqlite>,
|
&self,
|
||||||
schedule: &Schedule,
|
conn: &mut PoolConnection<Sqlite>,
|
||||||
new_tags: &[String],
|
new_name: &str,
|
||||||
) -> Result<(), DatabaseError> {
|
new_periods: &Periods,
|
||||||
sqlx::query!(
|
) -> Result<Schedule, DatabaseError> {
|
||||||
"DELETE FROM junction_tag WHERE schedule_id = ?",
|
// overwrite periods on protected schedules
|
||||||
schedule.id
|
let new_periods = match self.uid {
|
||||||
)
|
EmgauwaUid::Off | EmgauwaUid::On => self.periods.borrow(),
|
||||||
.execute(conn.deref_mut())
|
EmgauwaUid::Any(_) => new_periods,
|
||||||
.await?;
|
|
||||||
|
|
||||||
for new_tag in new_tags {
|
|
||||||
let tag: Option<Tag> = sqlx::query_as!(Tag, "SELECT * FROM tags WHERE tag = ?", new_tag)
|
|
||||||
.fetch_optional(conn.deref_mut())
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let tag = match tag {
|
|
||||||
Some(id) => id,
|
|
||||||
None => create_tag(conn, new_tag).await?,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
create_junction_tag_schedule(conn, tag, schedule).await?;
|
sqlx::query!(
|
||||||
|
"UPDATE schedules SET name = ?, periods = ? WHERE id = ?",
|
||||||
|
new_name,
|
||||||
|
new_periods,
|
||||||
|
self.id,
|
||||||
|
)
|
||||||
|
.execute(conn.deref_mut())
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Schedule::get_by_uid(conn, &self.uid).await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn get_tags(
|
||||||
|
&self,
|
||||||
|
conn: &mut PoolConnection<Sqlite>,
|
||||||
|
) -> Result<Vec<String>, DatabaseError> {
|
||||||
|
Ok(sqlx::query_scalar!("SELECT tag FROM tags INNER JOIN junction_tag ON junction_tag.tag_id = tags.id WHERE junction_tag.schedule_id = ?", self.id)
|
||||||
|
.fetch_all(conn.deref_mut())
|
||||||
|
.await?)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn set_tags(
|
||||||
|
&self,
|
||||||
|
conn: &mut PoolConnection<Sqlite>,
|
||||||
|
new_tags: &[String],
|
||||||
|
) -> Result<(), DatabaseError> {
|
||||||
|
sqlx::query!("DELETE FROM junction_tag WHERE schedule_id = ?", self.id)
|
||||||
|
.execute(conn.deref_mut())
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
for new_tag in new_tags {
|
||||||
|
let tag: Option<Tag> =
|
||||||
|
sqlx::query_as!(Tag, "SELECT * FROM tags WHERE tag = ?", new_tag)
|
||||||
|
.fetch_optional(conn.deref_mut())
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let tag = match tag {
|
||||||
|
Some(id) => id,
|
||||||
|
None => create_tag(conn, new_tag).await?,
|
||||||
|
};
|
||||||
|
|
||||||
|
create_junction_tag_schedule(conn, tag, self).await?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,6 +5,7 @@ use sqlx::Sqlite;
|
||||||
|
|
||||||
use crate::db::errors::DatabaseError;
|
use crate::db::errors::DatabaseError;
|
||||||
use crate::db::models::*;
|
use crate::db::models::*;
|
||||||
|
use crate::db::schedules::Schedule;
|
||||||
|
|
||||||
pub async fn create_tag(
|
pub async fn create_tag(
|
||||||
conn: &mut PoolConnection<Sqlite>,
|
conn: &mut PoolConnection<Sqlite>,
|
||||||
|
|
|
@ -7,7 +7,6 @@ use sqlx::pool::PoolConnection;
|
||||||
use sqlx::{Pool, Sqlite};
|
use sqlx::{Pool, Sqlite};
|
||||||
|
|
||||||
use crate::db::errors::DatabaseError;
|
use crate::db::errors::DatabaseError;
|
||||||
use crate::db::models::{Periods, Schedule};
|
|
||||||
use crate::db::schedules::*;
|
use crate::db::schedules::*;
|
||||||
use crate::db::tag::get_tag;
|
use crate::db::tag::get_tag;
|
||||||
use crate::handlers::errors::ApiError;
|
use crate::handlers::errors::ApiError;
|
||||||
|
@ -26,7 +25,7 @@ pub struct RequestSchedule {
|
||||||
pub async fn index(pool: web::Data<Pool<Sqlite>>) -> Result<HttpResponse, ApiError> {
|
pub async fn index(pool: web::Data<Pool<Sqlite>>) -> Result<HttpResponse, ApiError> {
|
||||||
let mut pool_conn = pool.acquire().await?;
|
let mut pool_conn = pool.acquire().await?;
|
||||||
|
|
||||||
let schedules = get_schedules(&mut pool_conn).await?;
|
let schedules = Schedule::get_all(&mut pool_conn).await?;
|
||||||
|
|
||||||
let mut return_schedules: Vec<ReturnSchedule> =
|
let mut return_schedules: Vec<ReturnSchedule> =
|
||||||
schedules.iter().map(ReturnSchedule::from).collect();
|
schedules.iter().map(ReturnSchedule::from).collect();
|
||||||
|
@ -47,7 +46,7 @@ pub async fn tagged(
|
||||||
let (tag,) = path.into_inner();
|
let (tag,) = path.into_inner();
|
||||||
let tag_db = get_tag(&mut pool_conn, &tag).await?;
|
let tag_db = get_tag(&mut pool_conn, &tag).await?;
|
||||||
|
|
||||||
let schedules = get_schedules_by_tag(&mut pool_conn, &tag_db).await?;
|
let schedules = Schedule::get_by_tag(&mut pool_conn, &tag_db).await?;
|
||||||
|
|
||||||
let mut return_schedules: Vec<ReturnSchedule> =
|
let mut return_schedules: Vec<ReturnSchedule> =
|
||||||
schedules.iter().map(ReturnSchedule::from).collect();
|
schedules.iter().map(ReturnSchedule::from).collect();
|
||||||
|
@ -67,7 +66,7 @@ pub async fn show(
|
||||||
let (schedule_uid,) = path.into_inner();
|
let (schedule_uid,) = path.into_inner();
|
||||||
let emgauwa_uid = EmgauwaUid::try_from(schedule_uid.as_str()).or(Err(ApiError::BadUid))?;
|
let emgauwa_uid = EmgauwaUid::try_from(schedule_uid.as_str()).or(Err(ApiError::BadUid))?;
|
||||||
|
|
||||||
let schedule = get_schedule_by_uid(&mut pool_conn, &emgauwa_uid).await?;
|
let schedule = Schedule::get_by_uid(&mut pool_conn, &emgauwa_uid).await?;
|
||||||
|
|
||||||
let mut return_schedule = ReturnSchedule::from(schedule);
|
let mut return_schedule = ReturnSchedule::from(schedule);
|
||||||
return_schedule.load_tags(&mut pool_conn);
|
return_schedule.load_tags(&mut pool_conn);
|
||||||
|
@ -81,9 +80,11 @@ pub async fn add(
|
||||||
) -> Result<HttpResponse, ApiError> {
|
) -> Result<HttpResponse, ApiError> {
|
||||||
let mut pool_conn = pool.acquire().await?;
|
let mut pool_conn = pool.acquire().await?;
|
||||||
|
|
||||||
let new_schedule = create_schedule(&mut pool_conn, &data.name, &data.periods).await?;
|
let new_schedule = Schedule::create(&mut pool_conn, &data.name, &data.periods).await?;
|
||||||
|
|
||||||
set_schedule_tags(&mut pool_conn, &new_schedule, data.tags.as_slice()).await?;
|
new_schedule
|
||||||
|
.set_tags(&mut pool_conn, data.tags.as_slice())
|
||||||
|
.await?;
|
||||||
|
|
||||||
let mut return_schedule = ReturnSchedule::from(new_schedule);
|
let mut return_schedule = ReturnSchedule::from(new_schedule);
|
||||||
return_schedule.load_tags(&mut pool_conn);
|
return_schedule.load_tags(&mut pool_conn);
|
||||||
|
@ -95,9 +96,11 @@ async fn add_list_single(
|
||||||
request_schedule: &RequestSchedule,
|
request_schedule: &RequestSchedule,
|
||||||
) -> Result<Schedule, DatabaseError> {
|
) -> Result<Schedule, DatabaseError> {
|
||||||
let new_schedule =
|
let new_schedule =
|
||||||
create_schedule(conn, &request_schedule.name, &request_schedule.periods).await?;
|
Schedule::create(conn, &request_schedule.name, &request_schedule.periods).await?;
|
||||||
|
|
||||||
set_schedule_tags(conn, &new_schedule, request_schedule.tags.as_slice()).await?;
|
new_schedule
|
||||||
|
.set_tags(conn, request_schedule.tags.as_slice())
|
||||||
|
.await?;
|
||||||
|
|
||||||
Ok(new_schedule)
|
Ok(new_schedule)
|
||||||
}
|
}
|
||||||
|
@ -150,17 +153,15 @@ pub async fn update(
|
||||||
let (schedule_uid,) = path.into_inner();
|
let (schedule_uid,) = path.into_inner();
|
||||||
let emgauwa_uid = EmgauwaUid::try_from(schedule_uid.as_str()).or(Err(ApiError::BadUid))?;
|
let emgauwa_uid = EmgauwaUid::try_from(schedule_uid.as_str()).or(Err(ApiError::BadUid))?;
|
||||||
|
|
||||||
let schedule = get_schedule_by_uid(&mut pool_conn, &emgauwa_uid).await?;
|
let schedule = Schedule::get_by_uid(&mut pool_conn, &emgauwa_uid).await?;
|
||||||
|
|
||||||
let schedule = update_schedule(
|
let schedule = schedule
|
||||||
&mut pool_conn,
|
.update(&mut pool_conn, data.name.as_str(), data.periods.borrow())
|
||||||
&schedule,
|
.await?;
|
||||||
data.name.as_str(),
|
|
||||||
data.periods.borrow(),
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
set_schedule_tags(&mut pool_conn, &schedule, data.tags.as_slice()).await?;
|
schedule
|
||||||
|
.set_tags(&mut pool_conn, data.tags.as_slice())
|
||||||
|
.await?;
|
||||||
|
|
||||||
let mut return_schedule = ReturnSchedule::from(schedule);
|
let mut return_schedule = ReturnSchedule::from(schedule);
|
||||||
return_schedule.load_tags(&mut pool_conn);
|
return_schedule.load_tags(&mut pool_conn);
|
||||||
|
@ -181,7 +182,7 @@ pub async fn delete(
|
||||||
EmgauwaUid::Off => Err(ApiError::ProtectedSchedule),
|
EmgauwaUid::Off => Err(ApiError::ProtectedSchedule),
|
||||||
EmgauwaUid::On => Err(ApiError::ProtectedSchedule),
|
EmgauwaUid::On => Err(ApiError::ProtectedSchedule),
|
||||||
EmgauwaUid::Any(_) => {
|
EmgauwaUid::Any(_) => {
|
||||||
delete_schedule_by_uid(&mut pool_conn, emgauwa_uid).await?;
|
Schedule::delete_by_uid(&mut pool_conn, emgauwa_uid).await?;
|
||||||
Ok(HttpResponse::Ok().json("schedule got deleted"))
|
Ok(HttpResponse::Ok().json("schedule got deleted"))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,11 +1,9 @@
|
||||||
|
use crate::db::schedules::Schedule;
|
||||||
use futures::executor;
|
use futures::executor;
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use sqlx::pool::PoolConnection;
|
use sqlx::pool::PoolConnection;
|
||||||
use sqlx::Sqlite;
|
use sqlx::Sqlite;
|
||||||
|
|
||||||
use crate::db::models::Schedule;
|
|
||||||
use crate::db::schedules::get_schedule_tags;
|
|
||||||
|
|
||||||
#[derive(Debug, Serialize)]
|
#[derive(Debug, Serialize)]
|
||||||
pub struct ReturnSchedule {
|
pub struct ReturnSchedule {
|
||||||
#[serde(flatten)]
|
#[serde(flatten)]
|
||||||
|
@ -15,7 +13,7 @@ pub struct ReturnSchedule {
|
||||||
|
|
||||||
impl ReturnSchedule {
|
impl ReturnSchedule {
|
||||||
pub fn load_tags(&mut self, conn: &mut PoolConnection<Sqlite>) {
|
pub fn load_tags(&mut self, conn: &mut PoolConnection<Sqlite>) {
|
||||||
self.tags = executor::block_on(get_schedule_tags(conn, &self.schedule)).unwrap();
|
self.tags = executor::block_on(self.schedule.get_tags(conn)).unwrap();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -136,10 +136,6 @@ impl From<&[u8]> for EmgauwaUid {
|
||||||
|
|
||||||
impl From<Vec<u8>> for EmgauwaUid {
|
impl From<Vec<u8>> for EmgauwaUid {
|
||||||
fn from(value: Vec<u8>) -> Self {
|
fn from(value: Vec<u8>) -> Self {
|
||||||
match value.as_slice() {
|
EmgauwaUid::from(value.as_slice())
|
||||||
[EmgauwaUid::OFF_U8] => EmgauwaUid::Off,
|
|
||||||
[EmgauwaUid::ON_U8] => EmgauwaUid::On,
|
|
||||||
value_bytes => EmgauwaUid::Any(Uuid::from_slice(value_bytes).unwrap()),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue