chore: cargo fmt

This commit is contained in:
2025-03-13 20:32:28 +01:00
parent 3cdaacabac
commit f340278236
5 changed files with 318 additions and 288 deletions

View File

@@ -1,22 +1,22 @@
//offer ota and config mode
use std::{
str::from_utf8,
sync::{atomic::AtomicBool, Arc},
};
use crate::{
get_version, log::LogMessage, map_range_moisture, plant_hal::PLANT_COUNT, BOARD_ACCESS
get_version, log::LogMessage, map_range_moisture, plant_hal::PLANT_COUNT, BOARD_ACCESS,
};
use anyhow::bail;
use chrono::DateTime;
use esp_idf_sys::{settimeofday, timeval, vTaskDelay};
use esp_ota::OtaUpdate;
use core::result::Result::Ok;
use embedded_svc::http::Method;
use esp_idf_hal::delay::Delay;
use esp_idf_svc::http::server::{Configuration, EspHttpConnection, EspHttpServer, Request};
use esp_idf_sys::{settimeofday, timeval, vTaskDelay};
use esp_ota::OtaUpdate;
use heapless::String;
use serde::{Deserialize, Serialize};
use std::{
str::from_utf8,
sync::{atomic::AtomicBool, Arc},
};
use url::Url;
use crate::config::PlantControllerConfig;
@@ -49,14 +49,14 @@ pub struct TestPump {
}
#[derive(Serialize, Deserialize, PartialEq, Debug)]
pub struct WebBackupHeader{
pub struct WebBackupHeader {
timestamp: std::string::String,
size: usize
size: usize,
}
#[derive(Deserialize)]
pub struct NightLampCommand {
active: bool
pub struct NightLampCommand {
active: bool,
}
fn write_time(
@@ -66,10 +66,10 @@ fn write_time(
let time: SetTime = serde_json::from_slice(&actual_data)?;
let parsed = DateTime::parse_from_rfc3339(time.time).map_err(|err| anyhow::anyhow!(err))?;
let mut board = BOARD_ACCESS.lock().unwrap();
let now = timeval {
tv_sec: parsed.to_utc().timestamp(),
tv_usec: 0
tv_usec: 0,
};
unsafe { settimeofday(&now, core::ptr::null_mut()) };
board.set_rtc_time(&parsed.to_utc())?;
@@ -173,7 +173,6 @@ fn get_backup_config(
anyhow::Ok(Some(json))
}
fn backup_info(
_request: &mut Request<&mut EspHttpConnection>,
) -> Result<Option<std::string::String>, anyhow::Error> {
@@ -182,13 +181,13 @@ fn backup_info(
let json = match header {
Ok(h) => {
let timestamp = DateTime::from_timestamp_millis(h.timestamp).unwrap();
let wbh = WebBackupHeader{
let wbh = WebBackupHeader {
timestamp: timestamp.to_rfc3339(),
size: h.size,
};
serde_json::to_string(&wbh)?
},
Err(_) => "{\"error\":\"Header could not be parsed\"".to_owned()
}
Err(_) => "{\"error\":\"Header could not be parsed\"".to_owned(),
};
anyhow::Ok(Some(json))
}
@@ -203,7 +202,6 @@ fn set_config(
anyhow::Ok(Some("saved".to_owned()))
}
fn get_battery_state(
_request: &mut Request<&mut EspHttpConnection>,
) -> Result<Option<std::string::String>, anyhow::Error> {
@@ -221,13 +219,14 @@ fn get_log(
}
fn get_log_localization_config() -> Result<std::string::String, anyhow::Error> {
anyhow::Ok(serde_json::to_string(&LogMessage::to_log_localisation_config())?)
anyhow::Ok(serde_json::to_string(
&LogMessage::to_log_localisation_config(),
)?)
}
fn get_version_web(
_request: &mut Request<&mut EspHttpConnection>,
) -> Result<Option<std::string::String>, anyhow::Error> {
anyhow::Ok(Some(serde_json::to_string(&get_version())?))
}
@@ -278,7 +277,7 @@ fn ota(
let mut board = BOARD_ACCESS.lock().unwrap();
let mut ota = OtaUpdate::begin()?;
println!("start ota");
//having a larger buffer is not really faster, requires more stack and prevents the progress bar from working ;)
const BUFFER_SIZE: usize = 512;
let mut buffer: [u8; BUFFER_SIZE] = [0; BUFFER_SIZE];
@@ -289,14 +288,13 @@ fn ota(
total_read += read;
let to_write = &buffer[0..read];
let iter = (total_read/1024)%8;
let iter = (total_read / 1024) % 8;
if iter != lastiter {
for i in 0..PLANT_COUNT {
board.fault(i, iter==i);
board.fault(i, iter == i);
}
lastiter = iter;
}
}
ota.write(to_write)?;
if read == 0 {
@@ -380,15 +378,15 @@ pub fn httpd(reboot_now: Arc<AtomicBool>) -> Box<EspHttpServer<'static>> {
})
.unwrap();
server
.fn_handler("/log", Method::Get, |request| {
handle_error_to500(request, get_log)
})
.unwrap();
server.fn_handler("/log_localization", Method::Get, |request| {
cors_response(request, 200, &get_log_localization_config().unwrap())
})
.unwrap();
.fn_handler("/log", Method::Get, |request| {
handle_error_to500(request, get_log)
})
.unwrap();
server
.fn_handler("/log_localization", Method::Get, |request| {
cors_response(request, 200, &get_log_localization_config().unwrap())
})
.unwrap();
server
.fn_handler("/battery", Method::Get, |request| {
handle_error_to500(request, get_battery_state)
@@ -415,10 +413,10 @@ pub fn httpd(reboot_now: Arc<AtomicBool>) -> Box<EspHttpServer<'static>> {
})
.unwrap();
server
.fn_handler("/lamptest", Method::Post, |request| {
handle_error_to500(request, night_lamp_test)
})
.unwrap();
.fn_handler("/lamptest", Method::Post, |request| {
handle_error_to500(request, night_lamp_test)
})
.unwrap();
server
.fn_handler("/boardtest", Method::Post, move |_| {
BOARD_ACCESS.lock().unwrap().test()
@@ -456,15 +454,15 @@ pub fn httpd(reboot_now: Arc<AtomicBool>) -> Box<EspHttpServer<'static>> {
})
.unwrap();
server
.fn_handler("/backup_config", Method::Post, move |request| {
handle_error_to500(request, backup_config)
})
.unwrap();
.fn_handler("/backup_config", Method::Post, move |request| {
handle_error_to500(request, backup_config)
})
.unwrap();
server
.fn_handler("/backup_info", Method::Get, move |request| {
handle_error_to500(request, backup_info)
})
.unwrap();
.fn_handler("/backup_info", Method::Get, move |request| {
handle_error_to500(request, backup_info)
})
.unwrap();
server
.fn_handler("/files", Method::Get, move |request| {
handle_error_to500(request, list_files)
@@ -472,25 +470,22 @@ pub fn httpd(reboot_now: Arc<AtomicBool>) -> Box<EspHttpServer<'static>> {
.unwrap();
let reboot_now_for_reboot = reboot_now.clone();
server
.fn_handler("/reboot", Method::Post, move |_| {
BOARD_ACCESS
.lock()
.unwrap()
.set_restart_to_conf(true);
reboot_now_for_reboot.store(true, std::sync::atomic::Ordering::Relaxed);
anyhow::Ok(())
})
.unwrap();
.fn_handler("/reboot", Method::Post, move |_| {
BOARD_ACCESS.lock().unwrap().set_restart_to_conf(true);
reboot_now_for_reboot.store(true, std::sync::atomic::Ordering::Relaxed);
anyhow::Ok(())
})
.unwrap();
unsafe { vTaskDelay(1) };
let reboot_now_for_exit = reboot_now.clone();
server
.fn_handler("/exit", Method::Post, move |_| {
reboot_now_for_exit.store(true, std::sync::atomic::Ordering::Relaxed);
anyhow::Ok(())
})
.unwrap();
.fn_handler("/exit", Method::Post, move |_| {
reboot_now_for_exit.store(true, std::sync::atomic::Ordering::Relaxed);
anyhow::Ok(())
})
.unwrap();
server
.fn_handler("/file", Method::Get, move |request| {
let filename = query_param(request.uri(), "filename").unwrap();
@@ -532,28 +527,23 @@ pub fn httpd(reboot_now: Arc<AtomicBool>) -> Box<EspHttpServer<'static>> {
server
.fn_handler("/file", Method::Post, move |mut request| {
let filename = query_param(request.uri(), "filename").unwrap();
let lock = BOARD_ACCESS
.lock()
.unwrap();
let file_handle =
lock.get_file_handle(&filename, true);
let lock = BOARD_ACCESS.lock().unwrap();
let file_handle = lock.get_file_handle(&filename, true);
match file_handle {
//TODO get free filesystem size, check against during write if not to large
Ok(mut file_handle) => {
const BUFFER_SIZE: usize = 512;
let mut buffer: [u8; BUFFER_SIZE] = [0; BUFFER_SIZE];
let mut total_read: usize = 0;
let mut lastiter = 0;
loop {
let iter = (total_read/1024)%8;
let iter = (total_read / 1024) % 8;
if iter != lastiter {
for i in 0..PLANT_COUNT {
lock.fault(i, iter==i);
lock.fault(i, iter == i);
}
lastiter = iter;
}
}
let read = request.read(&mut buffer)?;
total_read += read;
@@ -697,11 +687,14 @@ fn handle_error_to500(
return anyhow::Ok(());
}
fn read_up_to_bytes_from_request(request: &mut Request<&mut EspHttpConnection<'_>>, limit: Option<usize>) -> Result<Vec<u8>, anyhow::Error> {
fn read_up_to_bytes_from_request(
request: &mut Request<&mut EspHttpConnection<'_>>,
limit: Option<usize>,
) -> Result<Vec<u8>, anyhow::Error> {
let max_read = limit.unwrap_or(1024);
let mut data_store = Vec::new();
let mut total_read = 0;
loop{
loop {
let mut buf = [0_u8; 64];
let read = request.read(&mut buf)?;
if read == 0 {
@@ -709,7 +702,7 @@ fn read_up_to_bytes_from_request(request: &mut Request<&mut EspHttpConnection<'_
}
let actual_data = &buf[0..read];
total_read += read;
if total_read > max_read{
if total_read > max_read {
bail!("Request too large {total_read} > {max_read}");
}
data_store.push(actual_data.to_owned());