Seralize index into DB
This commit is contained in:
parent
1f3cc1ea26
commit
b42c6d39e8
9 changed files with 105 additions and 42 deletions
37
Cargo.lock
generated
37
Cargo.lock
generated
|
@ -60,6 +60,12 @@ dependencies = [
|
|||
"byteorder",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "arrayvec"
|
||||
version = "0.7.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711"
|
||||
|
||||
[[package]]
|
||||
name = "async-trait"
|
||||
version = "0.1.81"
|
||||
|
@ -258,6 +264,30 @@ version = "1.6.0"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b"
|
||||
|
||||
[[package]]
|
||||
name = "bitcode"
|
||||
version = "0.6.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ee1bce7608560cd4bf0296a4262d0dbf13e6bcec5ff2105724c8ab88cc7fc784"
|
||||
dependencies = [
|
||||
"arrayvec",
|
||||
"bitcode_derive",
|
||||
"bytemuck",
|
||||
"glam",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bitcode_derive"
|
||||
version = "0.6.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a539389a13af092cd345a2b47ae7dec12deb306d660b2223d25cd3419b253ebe"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.71",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bitflags"
|
||||
version = "1.3.2"
|
||||
|
@ -810,6 +840,12 @@ version = "0.28.0"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0"
|
||||
|
||||
[[package]]
|
||||
name = "glam"
|
||||
version = "0.28.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "779ae4bf7e8421cf91c0b3b64e7e8b40b862fba4d393f59150042de7c4965a94"
|
||||
|
||||
[[package]]
|
||||
name = "hashbrown"
|
||||
version = "0.14.0"
|
||||
|
@ -1556,6 +1592,7 @@ dependencies = [
|
|||
"axum-range",
|
||||
"axum-test",
|
||||
"base64 0.22.1",
|
||||
"bitcode",
|
||||
"branca",
|
||||
"bytes",
|
||||
"crossbeam-channel",
|
||||
|
|
|
@ -13,6 +13,7 @@ ape = "0.5"
|
|||
axum-extra = { version = "0.9.3", features = ["typed-header"] }
|
||||
axum-range = "0.4.0"
|
||||
base64 = "0.22.1"
|
||||
bitcode = { version = "0.6.3", features = ["serde"] }
|
||||
branca = "0.10.1"
|
||||
crossbeam-channel = "0.5.13"
|
||||
futures-util = { version = "0.3.30" }
|
||||
|
|
|
@ -67,7 +67,7 @@ impl App {
|
|||
let auth_secret = settings_manager.get_auth_secret().await?;
|
||||
let ddns_manager = ddns::Manager::new(db.clone());
|
||||
let user_manager = user::Manager::new(db.clone(), auth_secret);
|
||||
let index_manager = collection::IndexManager::new();
|
||||
let index_manager = collection::IndexManager::new(db.clone()).await;
|
||||
let browser = collection::Browser::new(db.clone(), vfs_manager.clone());
|
||||
let updater = collection::Updater::new(
|
||||
db.clone(),
|
||||
|
|
|
@ -4,21 +4,29 @@ use std::{
|
|||
sync::Arc,
|
||||
};
|
||||
|
||||
use log::{error, info};
|
||||
use rand::{rngs::ThreadRng, seq::IteratorRandom};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use crate::app::collection;
|
||||
use crate::{app::collection, db::DB};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct IndexManager {
|
||||
db: DB,
|
||||
index: Arc<RwLock<Index>>,
|
||||
}
|
||||
|
||||
impl IndexManager {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
pub async fn new(db: DB) -> Self {
|
||||
let mut index_manager = Self {
|
||||
db,
|
||||
index: Arc::default(),
|
||||
};
|
||||
if let Err(e) = index_manager.try_restore_index().await {
|
||||
error!("Failed to restore index: {}", e);
|
||||
}
|
||||
index_manager
|
||||
}
|
||||
|
||||
pub(super) async fn replace_index(&mut self, new_index: Index) {
|
||||
|
@ -26,6 +34,37 @@ impl IndexManager {
|
|||
*lock = new_index;
|
||||
}
|
||||
|
||||
pub(super) async fn persist_index(&mut self, index: &Index) -> Result<(), collection::Error> {
|
||||
let serialized = match bitcode::serialize(index) {
|
||||
Ok(s) => s,
|
||||
Err(_) => return Err(collection::Error::IndexSerializationError),
|
||||
};
|
||||
sqlx::query!("UPDATE collection_index SET content = $1", serialized)
|
||||
.execute(self.db.connect().await?.as_mut())
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn try_restore_index(&mut self) -> Result<bool, collection::Error> {
|
||||
let serialized = sqlx::query_scalar!("SELECT content FROM collection_index")
|
||||
.fetch_one(self.db.connect().await?.as_mut())
|
||||
.await?;
|
||||
|
||||
let Some(serialized) = serialized else {
|
||||
info!("Database did not contain a collection to restore");
|
||||
return Ok(false);
|
||||
};
|
||||
|
||||
let index = match bitcode::deserialize(&serialized[..]) {
|
||||
Ok(i) => i,
|
||||
Err(_) => return Err(collection::Error::IndexDeserializationError),
|
||||
};
|
||||
|
||||
self.replace_index(index).await;
|
||||
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
pub async fn get_random_albums(
|
||||
&self,
|
||||
count: usize,
|
||||
|
@ -119,7 +158,7 @@ impl IndexBuilder {
|
|||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
#[derive(Default, Serialize, Deserialize)]
|
||||
pub(super) struct Index {
|
||||
songs: HashMap<SongID, collection::Song>,
|
||||
albums: HashMap<AlbumID, Album>,
|
||||
|
@ -148,10 +187,10 @@ impl Index {
|
|||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Eq, Hash, PartialEq)]
|
||||
#[derive(Clone, Copy, Eq, Hash, PartialEq, Serialize, Deserialize)]
|
||||
pub struct SongID(u64);
|
||||
|
||||
#[derive(Clone, Eq, Hash, PartialEq)]
|
||||
#[derive(Clone, Eq, Hash, PartialEq, Serialize, Deserialize)]
|
||||
pub struct SongKey {
|
||||
pub virtual_path: String,
|
||||
}
|
||||
|
@ -177,7 +216,7 @@ impl collection::Song {
|
|||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
#[derive(Default, Serialize, Deserialize)]
|
||||
struct Album {
|
||||
pub name: Option<String>,
|
||||
pub artwork: Option<String>,
|
||||
|
@ -187,7 +226,7 @@ struct Album {
|
|||
pub songs: HashSet<SongID>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Eq, Hash, PartialEq)]
|
||||
#[derive(Clone, Copy, Eq, Hash, PartialEq, Serialize, Deserialize)]
|
||||
pub struct AlbumID(u64);
|
||||
|
||||
#[derive(Clone, Eq, Hash, PartialEq)]
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
use std::path::PathBuf;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sqlx::prelude::FromRow;
|
||||
|
||||
use crate::{
|
||||
|
@ -7,7 +8,7 @@ use crate::{
|
|||
db,
|
||||
};
|
||||
|
||||
#[derive(Clone, Debug, FromRow, PartialEq, Eq)]
|
||||
#[derive(Clone, Debug, FromRow, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct MultiString(pub Vec<String>);
|
||||
|
||||
impl MultiString {
|
||||
|
@ -33,6 +34,10 @@ pub enum Error {
|
|||
DatabaseConnection(#[from] db::Error),
|
||||
#[error(transparent)]
|
||||
Vfs(#[from] vfs::Error),
|
||||
#[error("Could not serialize collection")]
|
||||
IndexDeserializationError,
|
||||
#[error("Could not deserialize collection")]
|
||||
IndexSerializationError,
|
||||
#[error(transparent)]
|
||||
ThreadPoolBuilder(#[from] rayon::ThreadPoolBuildError),
|
||||
#[error(transparent)]
|
||||
|
@ -45,7 +50,7 @@ pub enum File {
|
|||
Song(Song),
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct Song {
|
||||
pub id: i64,
|
||||
pub path: String,
|
||||
|
|
|
@ -27,7 +27,7 @@ impl Updater {
|
|||
settings_manager: settings::Manager,
|
||||
vfs_manager: vfs::Manager,
|
||||
) -> Result<Self, Error> {
|
||||
let mut updater = Self {
|
||||
let updater = Self {
|
||||
db,
|
||||
index_manager,
|
||||
vfs_manager,
|
||||
|
@ -47,8 +47,6 @@ impl Updater {
|
|||
}
|
||||
});
|
||||
|
||||
updater.rebuild_index().await?;
|
||||
|
||||
Ok(updater)
|
||||
}
|
||||
|
||||
|
@ -76,33 +74,6 @@ impl Updater {
|
|||
});
|
||||
}
|
||||
|
||||
async fn rebuild_index(&mut self) -> Result<(), Error> {
|
||||
let start = Instant::now();
|
||||
info!("Rebuilding index from disk database");
|
||||
|
||||
let mut index_builder = IndexBuilder::default();
|
||||
|
||||
let mut connection = self.db.connect().await?;
|
||||
let songs = sqlx::query_as!(Song, "SELECT * FROM songs")
|
||||
.fetch_all(connection.as_mut())
|
||||
.await?;
|
||||
|
||||
for song in songs {
|
||||
index_builder.add_song(song);
|
||||
}
|
||||
|
||||
self.index_manager
|
||||
.replace_index(index_builder.build())
|
||||
.await;
|
||||
|
||||
info!(
|
||||
"Index rebuild took {} seconds",
|
||||
start.elapsed().as_millis() as f32 / 1000.0
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn update(&mut self) -> Result<(), Error> {
|
||||
let start = Instant::now();
|
||||
info!("Beginning collection scan");
|
||||
|
@ -172,6 +143,7 @@ impl Updater {
|
|||
});
|
||||
|
||||
let index = tokio::join!(scanner.scan(), directory_task, song_task).2?;
|
||||
self.index_manager.persist_index(&index).await?;
|
||||
self.index_manager.replace_index(index).await;
|
||||
|
||||
info!(
|
||||
|
|
|
@ -68,7 +68,7 @@ impl ContextBuilder {
|
|||
ddns_manager.clone(),
|
||||
);
|
||||
let browser = collection::Browser::new(db.clone(), vfs_manager.clone());
|
||||
let index_manager = collection::IndexManager::new();
|
||||
let index_manager = collection::IndexManager::new(db.clone()).await;
|
||||
let updater = collection::Updater::new(
|
||||
db.clone(),
|
||||
index_manager.clone(),
|
||||
|
|
|
@ -75,6 +75,13 @@ CREATE TABLE songs (
|
|||
UNIQUE(path) ON CONFLICT REPLACE
|
||||
);
|
||||
|
||||
CREATE TABLE collection_index (
|
||||
id INTEGER PRIMARY KEY NOT NULL CHECK(id = 0),
|
||||
content BLOB
|
||||
);
|
||||
|
||||
INSERT INTO collection_index (id, content) VALUES (0, NULL);
|
||||
|
||||
CREATE TABLE playlists (
|
||||
id INTEGER PRIMARY KEY NOT NULL,
|
||||
owner INTEGER NOT NULL,
|
||||
|
|
|
@ -89,6 +89,8 @@ impl From<collection::Error> for APIError {
|
|||
collection::Error::Database(e) => APIError::Database(e),
|
||||
collection::Error::DatabaseConnection(e) => e.into(),
|
||||
collection::Error::Vfs(e) => e.into(),
|
||||
collection::Error::IndexDeserializationError => APIError::Internal,
|
||||
collection::Error::IndexSerializationError => APIError::Internal,
|
||||
collection::Error::ThreadPoolBuilder(_) => APIError::Internal,
|
||||
collection::Error::ThreadJoining(_) => APIError::Internal,
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue