Add AppState to Controller and split up models

This commit is contained in:
Tobias Reisinger 2023-12-07 01:32:20 +01:00
parent 8dc9072fe8
commit 83c1f033d5
Signed by: serguzim
GPG key ID: 13AD60C237A28DFE
11 changed files with 260 additions and 150 deletions

BIN
Cargo.lock generated

Binary file not shown.

View file

@ -3,7 +3,7 @@ build:
cargo build
sqlx-prepare:
rm ./emgauwa-dev.sqlite || true
rm -f ./emgauwa-dev.sqlite
cargo sqlx database create
cargo sqlx migrate run
@ -14,9 +14,7 @@ build-rpi:
cross build --target arm-unknown-linux-gnueabihf
clean-db:
rm ./emgauwa-dev.sqlite || true
rm ./emgauwa-core.sqlite || true
rm ./emgauwa-controller.sqlite || true
rm ./emgauwa-*.sqlite || true
$(MAKE) sqlx-prepare
format:

View file

@ -7,6 +7,8 @@ authors = ["Tobias Reisinger <tobias@msrg.cc>"]
[dependencies]
emgauwa-lib = { path = "../emgauwa-lib" }
actix = "0.13"
tokio = { version = "1.34", features = ["io-std", "macros", "rt-multi-thread"] }
tokio-tungstenite = "0.20"

View file

@ -0,0 +1,48 @@
use actix::{Actor, Context, Handler, Message};
use emgauwa_lib::errors::EmgauwaError;
use emgauwa_lib::models::Controller;
use futures::executor::block_on;
use sqlx::{Pool, Sqlite};
#[derive(Message)]
#[rtype(result = "Result<(), EmgauwaError>")]
pub struct Reload {}
#[derive(Message)]
#[rtype(result = "Controller")]
pub struct GetThis {}
pub struct AppState {
pub pool: Pool<Sqlite>,
pub this: Controller,
}
impl AppState {
pub fn new(pool: Pool<Sqlite>, this: Controller) -> AppState {
AppState { pool, this }
}
}
impl Actor for AppState {
type Context = Context<Self>;
}
impl Handler<Reload> for AppState {
type Result = Result<(), EmgauwaError>;
fn handle(&mut self, _msg: Reload, _ctx: &mut Self::Context) -> Self::Result {
let mut pool_conn = block_on(self.pool.acquire())?;
self.this.reload(&mut pool_conn)?;
Ok(())
}
}
impl Handler<GetThis> for AppState {
type Result = Controller;
fn handle(&mut self, _msg: GetThis, _ctx: &mut Self::Context) -> Self::Result {
self.this.clone()
}
}

View file

@ -1,21 +1,24 @@
use actix::Actor;
use emgauwa_lib::constants::WEBSOCKET_RETRY_TIMEOUT;
use emgauwa_lib::db;
use emgauwa_lib::db::{DbController, DbJunctionRelaySchedule, DbRelay, DbSchedule};
use emgauwa_lib::errors::EmgauwaError;
use emgauwa_lib::models::{Controller, FromDbModel};
use emgauwa_lib::types::ControllerUid;
use emgauwa_lib::{db, utils};
use emgauwa_lib::utils::init_logging;
use sqlx::pool::PoolConnection;
use sqlx::Sqlite;
use tokio::time;
use utils::init_logging;
use crate::relay_loop::run_relay_loop;
use crate::settings::Settings;
use crate::ws::run_websocket;
mod app_state;
mod driver;
mod relay_loop;
mod settings;
mod utils;
mod ws;
async fn create_this_controller(
@ -55,7 +58,7 @@ async fn create_this_relay(
Ok(relay)
}
#[tokio::main]
#[actix::main]
async fn main() -> Result<(), std::io::Error> {
let settings = settings::init()?;
init_logging(&settings.logging.level)?;
@ -98,6 +101,10 @@ async fn main() -> Result<(), std::io::Error> {
.await
.map_err(EmgauwaError::from)?;
let this = Controller::from_db_model(&mut conn, db_controller).map_err(EmgauwaError::from)?;
let app_state = app_state::AppState::new(pool.clone(), this).start();
let url = format!(
"ws://{}:{}/api/v1/ws/controllers",
settings.core.host, settings.core.port
@ -106,14 +113,7 @@ async fn main() -> Result<(), std::io::Error> {
tokio::spawn(run_relay_loop(settings));
loop {
let db_controller = db_controller
.reload(&mut conn)
.await
.map_err(EmgauwaError::from)?;
let this =
Controller::from_db_model(&mut conn, db_controller).map_err(EmgauwaError::from)?;
let run_result = run_websocket(pool.clone(), this.clone(), &url).await;
let run_result = run_websocket(pool.clone(), &app_state, &url).await;
if let Err(err) = run_result {
log::error!("Error running websocket: {}", err);
}

View file

@ -0,0 +1,9 @@
use actix::Addr;
use emgauwa_lib::errors::EmgauwaError;
use emgauwa_lib::models::Controller;
use crate::app_state::{AppState, GetThis};
pub async fn get_this(app_state: &Addr<AppState>) -> Result<Controller, EmgauwaError> {
app_state.send(GetThis {}).await.map_err(EmgauwaError::from)
}

View file

@ -1,3 +1,4 @@
use actix::Addr;
use emgauwa_lib::db::DbController;
use emgauwa_lib::errors::{DatabaseError, EmgauwaError};
use emgauwa_lib::models::Controller;
@ -8,9 +9,13 @@ use sqlx::{Pool, Sqlite};
use tokio_tungstenite::tungstenite::Message;
use tokio_tungstenite::{connect_async, tungstenite};
use crate::app_state;
use crate::app_state::AppState;
use crate::utils::get_this;
pub async fn run_websocket(
pool: Pool<Sqlite>,
this: Controller,
app_state: &Addr<AppState>,
url: &str,
) -> Result<(), EmgauwaError> {
match connect_async(url).await {
@ -19,7 +24,7 @@ pub async fn run_websocket(
let (mut write, read) = ws_stream.split();
let ws_action = ControllerWsAction::Register(this.clone());
let ws_action = ControllerWsAction::Register(get_this(app_state).await?);
let ws_action_json = serde_json::to_string(&ws_action)?;
if let Err(err) = write.send(Message::text(ws_action_json)).await {
@ -27,7 +32,7 @@ pub async fn run_websocket(
return Ok(());
}
let read_handler = read.for_each(|msg| handle_message(pool.clone(), this.clone(), msg));
let read_handler = read.for_each(|msg| handle_message(pool.clone(), app_state, msg));
read_handler.await;
@ -42,7 +47,7 @@ pub async fn run_websocket(
async fn handle_message(
pool: Pool<Sqlite>,
this: Controller,
app_state: &Addr<AppState>,
message_result: Result<Message, tungstenite::Error>,
) {
let msg = match message_result {
@ -52,8 +57,8 @@ async fn handle_message(
return;
}
};
match msg {
Message::Text(text) => match serde_json::from_str(&text) {
if let Message::Text(text) = msg {
match serde_json::from_str(&text) {
Ok(action) => {
log::debug!("Received action: {:?}", action);
let mut pool_conn = match pool.acquire().await {
@ -63,7 +68,7 @@ async fn handle_message(
return;
}
};
let action_res = handle_action(&mut pool_conn, this, action).await;
let action_res = handle_action(&mut pool_conn, app_state, action).await;
if let Err(e) = action_res {
log::error!("Error handling action: {:?}", e);
}
@ -71,19 +76,18 @@ async fn handle_message(
Err(e) => {
log::error!("Error deserializing action: {:?}", e);
}
},
_ => (),
}
}
}
pub async fn handle_action(
conn: &mut PoolConnection<Sqlite>,
this: Controller,
app_state: &Addr<AppState>,
action: ControllerWsAction,
) -> Result<(), EmgauwaError> {
match action {
ControllerWsAction::Controller(controller) => {
handle_controller(conn, this, controller).await
handle_controller(conn, app_state, controller).await
}
_ => Ok(()),
}
@ -91,9 +95,10 @@ pub async fn handle_action(
pub async fn handle_controller(
conn: &mut PoolConnection<Sqlite>,
this: Controller,
app_state: &Addr<AppState>,
controller: Controller,
) -> Result<(), EmgauwaError> {
let this = get_this(app_state).await?;
if controller.c.uid != this.c.uid {
return Err(EmgauwaError::Other(String::from(
"Controller UID mismatch during update",
@ -105,5 +110,7 @@ pub async fn handle_controller(
.update(conn, controller.c.name.as_str(), this.c.relay_count)
.await?;
app_state.send(app_state::Reload {}).await??;
Ok(())
}

View file

@ -0,0 +1,51 @@
use actix::MessageResponse;
use futures::executor::block_on;
use serde_derive::{Deserialize, Serialize};
use sqlx::pool::PoolConnection;
use sqlx::Sqlite;
use crate::db::DbController;
use crate::errors::{DatabaseError, EmgauwaError};
use crate::models::{convert_db_list_cache, FromDbModel, Relay};
#[derive(Serialize, Deserialize, Debug, Clone, MessageResponse)]
pub struct Controller {
#[serde(flatten)]
pub c: DbController,
pub relays: Vec<Relay>,
}
impl FromDbModel for Controller {
type DbModel = DbController;
type DbModelCache = Vec<Relay>;
fn from_db_model(
conn: &mut PoolConnection<Sqlite>,
db_model: Self::DbModel,
) -> Result<Self, DatabaseError> {
let relays_db = block_on(db_model.get_relays(conn))?;
let cache = convert_db_list_cache(conn, relays_db, db_model.clone())?;
Self::from_db_model_cache(conn, db_model, cache)
}
fn from_db_model_cache(
_conn: &mut PoolConnection<Sqlite>,
db_model: Self::DbModel,
cache: Self::DbModelCache,
) -> Result<Self, DatabaseError> {
Ok(Controller {
c: db_model,
relays: cache,
})
}
}
impl Controller {
pub fn reload(&mut self, conn: &mut PoolConnection<Sqlite>) -> Result<(), EmgauwaError> {
self.c = block_on(self.c.reload(conn))?;
for relay in &mut self.relays {
relay.reload(conn)?;
}
Ok(())
}
}

View file

@ -1,12 +1,14 @@
use futures::executor;
use serde_derive::{Deserialize, Serialize};
mod controller;
mod relay;
mod schedule;
pub use controller::Controller;
pub use relay::Relay;
pub use schedule::Schedule;
use sqlx::pool::PoolConnection;
use sqlx::Sqlite;
use crate::db::{DbController, DbJunctionRelaySchedule, DbRelay, DbSchedule};
use crate::errors::DatabaseError;
use crate::types::{ControllerUid, Weekday};
use crate::utils;
pub trait FromDbModel {
type DbModel: Clone;
@ -28,124 +30,6 @@ pub trait FromDbModel {
Self: Sized;
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Schedule {
#[serde(flatten)]
pub s: DbSchedule,
pub tags: Vec<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Relay {
#[serde(flatten)]
pub r: DbRelay,
pub controller: DbController,
pub controller_id: ControllerUid,
pub schedules: Vec<DbSchedule>,
pub active_schedule: DbSchedule,
pub tags: Vec<String>,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Controller {
#[serde(flatten)]
pub c: DbController,
pub relays: Vec<Relay>,
}
impl FromDbModel for Schedule {
type DbModel = DbSchedule;
type DbModelCache = Vec<String>;
fn from_db_model(
conn: &mut PoolConnection<Sqlite>,
db_model: Self::DbModel,
) -> Result<Self, DatabaseError> {
let cache = executor::block_on(db_model.get_tags(conn))?;
Self::from_db_model_cache(conn, db_model, cache)
}
fn from_db_model_cache(
_conn: &mut PoolConnection<Sqlite>,
db_model: Self::DbModel,
cache: Self::DbModelCache,
) -> Result<Self, DatabaseError> {
let schedule = db_model.clone();
Ok(Schedule {
s: schedule,
tags: cache,
})
}
}
impl FromDbModel for Relay {
type DbModel = DbRelay;
type DbModelCache = DbController;
fn from_db_model(
conn: &mut PoolConnection<Sqlite>,
db_model: Self::DbModel,
) -> Result<Self, DatabaseError> {
let cache = executor::block_on(db_model.get_controller(conn))?;
Self::from_db_model_cache(conn, db_model, cache)
}
fn from_db_model_cache(
conn: &mut PoolConnection<Sqlite>,
db_model: Self::DbModel,
cache: Self::DbModelCache,
) -> Result<Self, DatabaseError> {
let tags = executor::block_on(db_model.get_tags(conn))?;
let controller_id = cache.uid.clone();
let schedules =
executor::block_on(DbJunctionRelaySchedule::get_schedules(conn, &db_model))?;
let weekday = utils::get_weekday();
let active_schedule = executor::block_on(DbJunctionRelaySchedule::get_schedule(
conn,
&db_model,
weekday as Weekday,
))?
.ok_or(DatabaseError::NotFound)?;
Ok(Relay {
r: db_model,
controller: cache,
controller_id,
schedules,
active_schedule,
tags,
})
}
}
impl FromDbModel for Controller {
type DbModel = DbController;
type DbModelCache = Vec<Relay>;
fn from_db_model(
conn: &mut PoolConnection<Sqlite>,
db_model: Self::DbModel,
) -> Result<Self, DatabaseError> {
let relays_db = executor::block_on(db_model.get_relays(conn))?;
let cache = convert_db_list_cache(conn, relays_db, db_model.clone())?;
Self::from_db_model_cache(conn, db_model, cache)
}
fn from_db_model_cache(
_conn: &mut PoolConnection<Sqlite>,
db_model: Self::DbModel,
cache: Self::DbModelCache,
) -> Result<Self, DatabaseError> {
Ok(Controller {
c: db_model,
relays: cache,
})
}
}
fn convert_db_list_generic<T: FromDbModel>(
conn: &mut PoolConnection<Sqlite>,
db_models: Vec<T::DbModel>,

View file

@ -0,0 +1,70 @@
use futures::executor::block_on;
use serde_derive::{Deserialize, Serialize};
use sqlx::pool::PoolConnection;
use sqlx::Sqlite;
use crate::db::{DbController, DbJunctionRelaySchedule, DbRelay, DbSchedule};
use crate::errors::DatabaseError;
use crate::models::FromDbModel;
use crate::types::{ControllerUid, Weekday};
use crate::utils;
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Relay {
#[serde(flatten)]
pub r: DbRelay,
pub controller: DbController,
pub controller_id: ControllerUid,
pub schedules: Vec<DbSchedule>,
pub active_schedule: DbSchedule,
pub tags: Vec<String>,
}
impl FromDbModel for Relay {
type DbModel = DbRelay;
type DbModelCache = DbController;
fn from_db_model(
conn: &mut PoolConnection<Sqlite>,
db_model: Self::DbModel,
) -> Result<Self, DatabaseError> {
let cache = block_on(db_model.get_controller(conn))?;
Self::from_db_model_cache(conn, db_model, cache)
}
fn from_db_model_cache(
conn: &mut PoolConnection<Sqlite>,
db_model: Self::DbModel,
cache: Self::DbModelCache,
) -> Result<Self, DatabaseError> {
let tags = block_on(db_model.get_tags(conn))?;
let controller_id = cache.uid.clone();
let schedules = block_on(DbJunctionRelaySchedule::get_schedules(conn, &db_model))?;
let weekday = utils::get_weekday();
let active_schedule = block_on(DbJunctionRelaySchedule::get_schedule(
conn,
&db_model,
weekday as Weekday,
))?
.ok_or(DatabaseError::NotFound)?;
Ok(Relay {
r: db_model,
controller: cache,
controller_id,
schedules,
active_schedule,
tags,
})
}
}
impl Relay {
pub fn reload(&mut self, conn: &mut PoolConnection<Sqlite>) -> Result<(), DatabaseError> {
self.r = block_on(self.r.reload(conn))?;
Ok(())
}
}

View file

@ -0,0 +1,41 @@
use futures::executor::block_on;
use serde_derive::{Deserialize, Serialize};
use sqlx::pool::PoolConnection;
use sqlx::Sqlite;
use crate::db::DbSchedule;
use crate::errors::DatabaseError;
use crate::models::FromDbModel;
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Schedule {
#[serde(flatten)]
pub s: DbSchedule,
pub tags: Vec<String>,
}
impl FromDbModel for Schedule {
type DbModel = DbSchedule;
type DbModelCache = Vec<String>;
fn from_db_model(
conn: &mut PoolConnection<Sqlite>,
db_model: Self::DbModel,
) -> Result<Self, DatabaseError> {
let cache = block_on(db_model.get_tags(conn))?;
Self::from_db_model_cache(conn, db_model, cache)
}
fn from_db_model_cache(
_conn: &mut PoolConnection<Sqlite>,
db_model: Self::DbModel,
cache: Self::DbModelCache,
) -> Result<Self, DatabaseError> {
let schedule = db_model.clone();
Ok(Schedule {
s: schedule,
tags: cache,
})
}
}