Merge branch 'next'

This commit is contained in:
Antoine Gersant 2025-02-04 23:28:09 -08:00
commit 41c4088477
170 changed files with 13270 additions and 9670 deletions
.envrc
.github/workflows
.gitignore
.vscode
CHANGELOG.mdCargo.lockCargo.tomlREADME.mdbuild.rsdiesel.toml
docs
flake.lockflake.nix
migrations
201706250006_init
201706250228_directories_date_added
201706272129_users_table
201706272304_misc_settings_table
201706272313_ddns_config_table
201706272327_mount_points_table
201707091522_playlists_tables
20170929203228_add_prefix_url
20171015224223_add_song_duration
20180303211100_add_last_fm_credentials
2019-08-08-042731_blob_auth_secret
2019-09-28-231910_pbkdf2_simple
2020-01-08-231420_add_theme
2020-11-25-174000_remove_prefix_url
2021-05-01-011426_add_lyricist
res
rust-toolchainrust-toolchain.toml
src

1
.envrc Normal file
View file

@ -0,0 +1 @@
use flake

View file

@ -11,19 +11,14 @@ jobs:
strategy:
matrix:
os: [ubuntu-latest, windows-latest]
features: [--no-default-features, --features bundle-sqlite, --features ui]
exclude:
- os: windows-latest
features: --no-default-features
features: ["", --features ui]
steps:
- name: Install libsqlite3-dev
if: contains(matrix.os, 'ubuntu') && !contains(matrix.features, 'bundle-sqlite')
run: sudo apt-get update && sudo apt-get install libsqlite3-dev
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
profile: minimal
- uses: actions-rust-lang/setup-rust-toolchain@v1
- uses: actions-rs/cargo@v1
with:
command: test

View file

@ -17,8 +17,7 @@ jobs:
- name: Checkout Polaris
uses: actions/checkout@v4
- uses: actions-rust-lang/setup-rust-toolchain@v1
with:
components: llvm-tools-preview
components: llvm-tools-preview
- name: Install grcov
run: cargo install grcov
- name: Run tests
@ -34,11 +33,11 @@ jobs:
--llvm
--branch
--ignore-not-existing
--binary-path ./target/debug
--excl-line "#\[derive\("
--excl-br-line "#\[derive\("
--excl-start "mod tests \{"
--excl-br-start "mod tests \{"
working-directory: lib
- name: Upload Results
uses: codecov/codecov-action@v2
with:

View file

@ -49,9 +49,7 @@ jobs:
with:
ref: release
- name: Install Rust Toolchain
uses: actions-rs/toolchain@v1
with:
profile: minimal
uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Make release
uses: ./.github/actions/make-windows-release
with:

View file

@ -1,10 +1,6 @@
on:
pull_request:
branches:
- master
push:
branches:
- master
name: Validate Install
@ -22,8 +18,9 @@ jobs:
version-number: "0.0.0"
output-file: polaris.tar.gz
- name: Upload packaged release
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v4
with:
if-no-files-found: error
name: linux-release
path: polaris.tar.gz
@ -34,7 +31,7 @@ jobs:
steps:
- name: Download release
uses: actions/download-artifact@v2
uses: actions/download-artifact@v4
with:
name: linux-release
path: .
@ -44,9 +41,7 @@ jobs:
run: make preview
- name: Preview Install w/ Custom Prefix
run: make preview PREFIX=/some/random/prefix
- uses: actions-rs/toolchain@v1
with:
profile: minimal
- uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Install
run: sudo --preserve-env=PATH make install
- name: Run Polaris
@ -65,7 +60,7 @@ jobs:
steps:
- name: Download release
uses: actions/download-artifact@v2
uses: actions/download-artifact@v4
with:
name: linux-release
path: .
@ -75,9 +70,7 @@ jobs:
run: make preview-xdg
- name: Preview Install w/ Custom XDG_DATA_HOME
run: make preview-xdg XDG_DATA_HOME=/my/own/xdg/home
- uses: actions-rs/toolchain@v1
with:
profile: minimal
- uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Install
run: make install-xdg
- name: Run Polaris
@ -97,17 +90,16 @@ jobs:
- name: Checkout Polaris
uses: actions/checkout@v1
- name: Install Rust Toolchain
uses: actions-rs/toolchain@v1
with:
profile: minimal
uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Make release
uses: ./.github/actions/make-windows-release
with:
version-number: "0.0.0"
output-file: polaris.msi
- name: Upload packaged release
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v4
with:
if-no-files-found: error
name: windows-release
path: polaris.msi
@ -118,7 +110,7 @@ jobs:
steps:
- name: Download release
uses: actions/download-artifact@v2
uses: actions/download-artifact@v4
with:
name: windows-release
path: .

9
.gitignore vendored
View file

@ -1,3 +1,6 @@
# Dev environment
.direnv
# Build output
target
@ -8,9 +11,13 @@ test-output
TestConfig.toml
# Runtime artifacts
*.sqlite
auth.secret
collection.index
polaris.log
polaris.ndb
polaris.pid
profile.json
/peaks
/thumbnails
# Release process artifacts (usually runs on CI)

47
.vscode/tasks.json vendored
View file

@ -1,47 +0,0 @@
{
"version": "2.0.0",
"presentation": {
"reveal": "always"
},
"tasks": [
{
"label": "Run",
"options": {
"cwd": "${workspaceRoot}"
},
"command": "cargo",
"args": [
"run",
"--",
"-c",
"./TestConfigWindows.toml",
"-d",
"test/db.sqlite",
"-w",
"../polaris-web"
],
"problemMatcher": []
},
{
"group": "test",
"label": "Test",
"options": {
"cwd": "${workspaceRoot}"
},
"command": "cargo",
"args": [
"test"
]
},
{
"label": "Compile",
"options": {
"cwd": "${workspaceRoot}"
},
"command": "cargo",
"args": [
"check"
]
}
]
}

View file

@ -1,5 +1,63 @@
# Changelog
## Polaris 0.15.0
### Server
- Added support for browsing the music collection by metadata (by artist, by genre, etc.).
- Added support for multi-value metadata for the following song fields: `artist`, `album artist`, `composer`, `genre`, `label` and `lyricist`.
- Added support for structured search query syntax.
- Added capability to extract audio waveform data.
- Configuration data (user credentials, music directories, etc.) is now stored in a plain-text file which Polaris can read and write to.
- ⚠️ The configuration format is now ([documented](docs/CONFIGURATION.md)) and slightly simpler than in previous versions.
- Persistent data, such as playlists, is now saved in a directory that may be configured with the `--data` CLI option or the `POLARIS_DATA_DIR` environment variable.
- ⚠️ Upon first launch, configuration data and playlists will be migrated from the Polaris 0.14.0 database into their new homes. After successful migration, the old database file will be deleted and the server will finally start. This migration functionality will be removed in future Polaris versions.
- Collection scans are now automatically triggered when configuration changes or files are added/removed.
- ⚠️ Dynamic DNS now works with any provider that supports updates over HTTP without header-based auth. This means YDNS is no longer an option, and you need to input a new URL for DDNS updates.
- ⚠️ Removed last.fm integration due to maintenance concerns (abandoned libraries, broken account linking) and mismatch with project goals.
- Removed periodic collection scans.
### Web client
- Every page has been updated to a new visual style.
- The file browser is now displayed as an interactive tree on a single page.
- The file browser now supports common navigation keyboard shortcuts.
- The file browser now supports jumping to a visible file or folder by typing the start of its name.
- The file browser now omits the top-level directory when only one music folder has been configured.
- The current playlist now has two display modes: compact or with album art.
- Songs in the current playlist can now be selected and re-ordered with the mouse.
- Added a button to display statistics about the current playlist.
- Added new pages to browse the music collection by genre.
- Added new pages to browse the music collection by artist.
- Added a new page to browse the music collection by album.
- The Recently Added Albums and Random Albums pages now distinguish albums by file metadata instead of file path.
- When navigating back to the Random Albums page, the shuffle ordering is now preserved.
- The current playlist now supports common navigation keyboard shortcuts.
- The seekbar for the current song being played has been replaced with a waveform visualization.
- The title of the current song in the player can be clicked to display its metadata
- Improved responsiveness when queuing large amounts of songs at once.
- The `Settings > Collection` page now shows the current status of collection scanning.
- Theme preferences have been reset and are now stored client-side.
- Accent color is now configured as a saturation multiplier and base hue, which are used to generate a full color ramp.
### API
- API version is now 8.0.
- Documentation is now served under `/api-docs` instead of `/swagger` (eg. `http://localhost:5050/api-docs`)
- Clients are now expected to send their preferred API major version in a `Accept-Version` header. Omitting this currently defaults to `7`, but will become an error in future Polaris releases. Support for API version 7 will be removed entirely in a future release.
- Most API responses now support gzip compression.
- The response format of the `/browse`, `/flatten`, `/get_playlist`, `/search/<query>` endpoints has been modified to accomodate large lists.
- Added new endpoints to query albums and artists.
- The `/random` and `/recent` albums are deprecated in favor of `/albums/random` and `/albums/recent`. These endpoints now have optional parameters for RNG seeding and pagination.
- The `/search/<query>` endpoint now requires a non-empty query (`/search/` now returns HTTP status code 404, regardless of API version).
- The `/search/<query>` endpoint now supports per-field queries and boolean combinators.
- The `/thumbnail` endpoint supports a new size labeled `tiny`, which returns 40x40px images.
- Added a new `/get_songs` endpoint which returns song metadata in bulk.
- Added a new `/peaks` endpoint which returns audio signal peaks that can be used to draw waveform visualizations.
- Added a new `/index_status` endpoint which returns the status of music collection scans.
- Removed the `/config` and `/preferences` API endpoints.
- Removed the `/ddns` API endpoints, merged into the existing `/settings` endpoints.
## Polaris 0.14.3
### Server

3218
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -6,57 +6,78 @@ edition = "2021"
build = "build.rs"
[features]
default = ["bundle-sqlite"]
bundle-sqlite = ["libsqlite3-sys"]
ui = ["native-windows-gui", "native-windows-derive"]
[profile.release]
lto = "thin"
[dependencies]
actix-files = { version = "0.6" }
actix-web = { version = "4" }
actix-web-httpauth = { version = "0.8" }
ape = "0.5"
base64 = "0.21"
ape = "0.6"
axum-extra = { version = "0.10.0", features = ["typed-header"] }
axum-range = { version = "0.5.0" }
bitcode = { version = "0.6.3", features = ["serde"] }
branca = "0.10.1"
crossbeam-channel = "0.5"
diesel_migrations = { version = "2.0", features = ["sqlite"] }
futures-util = { version = "0.3" }
chumsky = "0.9.3"
enum-map = { version = "2.7.3", features = ["serde"] }
getopts = "0.2.21"
http = "0.2.8"
id3 = "1.7.0"
headers = "0.4"
http = "1.1.0"
icu_collator = "1.5.0"
id3 = "1.14.0"
lasso2 = { version = "0.8.2", features = ["serialize"] }
lewton = "0.10.2"
libsqlite3-sys = { version = "0.26", features = [
"bundled",
"bundled-windows",
], optional = true }
log = "0.4.17"
metaflac = "0.2.5"
log = "0.4.22"
metaflac = "0.2.7"
mp3-duration = "0.1.10"
mp4ameta = "0.11.0"
native_db = "0.8.1"
native_model = "0.4.20"
nohash-hasher = "0.2.0"
notify = { version = "6.1.1", default-features = false }
notify-debouncer-full = { version = "0.3.1", default-features = false }
num_cpus = "1.14.0"
opus_headers = "0.1.2"
# TODO upstream PR: https://github.com/yboettcher/opus_headers/pull/7
opus_headers = { git = "https://github.com/agersant/opus_headers", branch = "multivalue" }
pbkdf2 = "0.11"
percent-encoding = "2.2"
rand = "0.8"
rayon = "1.5"
regex = "1.7.0"
rustfm-scrobble = "1.1.1"
rayon = "1.10.0"
regex = "1.10.5"
rusqlite = { version = "0.32.0", features = ["bundled"] }
serde = { version = "1.0.147", features = ["derive"] }
serde_derive = "1.0.147"
serde_json = "1.0.87"
simplelog = "0.12.0"
thiserror = "1.0.37"
toml = "0.7"
ureq = "2.7"
url = "2.3"
serde_json = "1.0.122"
simplelog = "0.12.2"
symphonia = { version = "0.5.4", features = [
"all-codecs",
"all-formats",
"opt-simd",
] }
tinyvec = { version = "1.8.0", features = ["serde"] }
thiserror = "1.0.62"
tokio = { version = "1.39", features = ["macros", "rt-multi-thread"] }
tokio-util = { version = "0.7.11", features = ["io"] }
toml = "0.8.19"
tower = { version = "0.5.2" }
tower-http = { version = "0.6.2", features = [
"compression-gzip",
"fs",
"normalize-path",
] }
trie-rs = { version = "0.4.2", features = ["serde"] }
unicase = "2.7.0"
ureq = { version = "2.10.0", default-features = false, features = ["tls"] }
utoipa = { version = "5.3", features = ["axum_extras"] }
utoipa-axum = { version = "0.1" }
utoipa-scalar = { version = "0.2", features = ["axum"] }
[dependencies.diesel]
version = "2.0.2"
default_features = false
features = ["libsqlite3-sys", "r2d2", "sqlite"]
[dependencies.axum]
version = "0.8.1"
default-features = false
features = ["http1", "json", "tokio", "tower-log", "query"]
[dependencies.image]
version = "0.24.4"
default_features = false
version = "0.25.2"
default-features = false
features = ["bmp", "gif", "jpeg", "png"]
[target.'cfg(windows)'.dependencies]
@ -71,13 +92,13 @@ native-windows-derive = { version = "1.0.5", optional = true }
[target.'cfg(unix)'.dependencies]
daemonize = "0.5"
sd-notify = "0.4.1"
sd-notify = "0.4.2"
[target.'cfg(windows)'.build-dependencies]
embed-resource = "1.8"
embed-resource = "2.4.2"
winres = "0.1"
[dev-dependencies]
actix-test = "0.1.0"
headers = "0.3"
fs_extra = "1.2.0"
axum-test = "17.0"
bytes = "1.7.1"
percent-encoding = "2.2"

View file

@ -1,46 +1,64 @@
[![Actions Status](https://github.com/agersant/polaris/workflows/Build/badge.svg)](https://github.com/agersant/polaris/actions)
[![codecov.io](http://codecov.io/github/agersant/polaris/branch/master/graphs/badge.svg)](http://codecov.io/github/agersant/polaris)
[![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](LICENSE-MIT)
<div align="center">
<h1><img src="res/readme/logo.png?raw=true"/></h1>
<img src="res/readme/logo.png?raw=true"/>
Polaris is a music streaming application, designed to let you enjoy your music collection from any computer or mobile device. Polaris works by streaming music directly from your computer (or cloud server), without uploading it to a third-party. It is free and open-source software, without any kind of premium version.
[![Actions Status](https://github.com/agersant/polaris/workflows/Build/badge.svg)](https://github.com/agersant/polaris/actions)
[![codecov](https://codecov.io/github/agersant/polaris/graph/badge.svg?token=EQqCmBEf2T)](https://codecov.io/github/agersant/polaris)
[![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](LICENSE-MIT)
## Try It Out!
![Polaris Web UI](res/readme/web_ui.png?raw=true "Polaris Web UI")
</div>
# About
Polaris is a self-hosted music streaming server, to enjoy your music collection from any computer or mobile device. It is free and open-source software, without any kind of premium version.
The goals of this project are:
- 🔥 Exceptional performance and responsiveness
- 📚️ First-class support for large music collections (100,000+ songs)
- 📦️ Ease of installation, deployment and maintenance
- ✨ Beautiful user interface
# Try It Out!
Check out the demo over at https://demo.polaris.stream, featuring a selection of Creative Commons Music. The credentials to access this server are:
Username: `demo_user`
Password: `demo_password`
## Features
# Features
![Polaris Web UI](res/readme/web_ui.png?raw=true "Polaris Web UI")
- Optimized for large music collections
- Can run on Windows, Linux, BSD, or through Docker
- Support for `flac`, `mp3`, `mp4`, `mpc`, `ogg`, `opus`, `ape`, `wav` and `aiff` files
- Easy to setup and administer, no configuration files needed
- Dark mode and customizable color themes
- Listen to your music on the go:
- 🖥️ Runs on Windows, Linux, BSD, or through Docker
- 🔊 Support for `flac`, `mp3`, `mp4`, `mpc`, `ogg`, `opus`, `ape`, `wav` and `aiff` files
- 🌈 Dark mode variants and customizable color palette
- 💿️ Browse your music by album, artist or genre
- 📂 Browse your music as a file tree
- 🌊 Song audio-waveform visualization
- 🏷️ Support for multi-value fields in song metadata (eg. multiple artists per song)
- 🔍️ Powerful search functionality with per-field queries
- ⚙️ Plain-text configuration also editable with built-in UI
- 👥 Setup multiple users, each with their own playlists
- 📱 Listen to your music on the go:
- Polaris Android ([Google Play Store](https://play.google.com/store/apps/details?id=agersant.polaris) · [F-Droid](https://f-droid.org/packages/agersant.polaris/) · [Repository](https://github.com/agersant/polaris-android))
- Polarios ([App Store](https://apps.apple.com/app/polarios/id1662366309) · [Repository](https://gitlab.com/elise/Polarios))
- [Last.fm](https://www.last.fm) scrobbling
- Polarios ([App Store](https://apps.apple.com/app/polarios/id1662366309) · [Repository](https://gitlab.com/elise/Polarios)) [third-party]
## Tutorials
# Installation
- [Getting Started](docs/SETUP.md)
- [Streaming From Remote Devices](docs/DDNS.md)
[Installation documentation](docs/SETUP.md)
## Documentation
[Streaming from remote devices](docs/DDNS.md)
- [Contribute to Polaris](docs/CONTRIBUTING.md)
- [Maintenance Runbooks](docs/MAINTENANCE.md)
[![Packaging status](https://repology.org/badge/vertical-allrepos/polaris-streaming.svg)](https://repology.org/project/polaris-streaming/versions)
### API Documentation
# Documentation
The Polaris server API is documented via [Swagger](https://demo.polaris.stream/swagger/). Every installation of Polaris distributes this documentation, with the ability to use the `Try it out` buttons. To access it, simply open http://localhost:5050/swagger/ in your browser on the machine running Polaris.
- 📒 [Changelog](CHANGELOG.md)
- 🔧 [Configuration](docs/CONFIGURATION.md)
- 👷 [Contribute to Polaris](docs/CONTRIBUTING.md)
- 🛟 [Maintenance Runbooks](docs/MAINTENANCE.md)
## Credits & License Information
The Polaris server API is documented via [OpenAPI](https://demo.polaris.stream/api-docs/). Every installation of Polaris distributes this interactive documentation. To access it, open http://localhost:5050/api-docs/ in your browser on the machine running Polaris.
# Credits & License Information
Music featured in the demo installation:

View file

@ -3,7 +3,10 @@ fn main() {
let mut res = winres::WindowsResource::new();
res.set_icon("./res/windows/application/icon_polaris_512.ico");
res.compile().unwrap();
embed_resource::compile("res/windows/application/polaris-manifest.rc");
embed_resource::compile(
"res/windows/application/polaris-manifest.rc",
embed_resource::NONE,
);
}
#[cfg(unix)]

View file

@ -1,2 +0,0 @@
[print_schema]
file = "src/db/schema.rs"

50
docs/CONFIGURATION.md Normal file
View file

@ -0,0 +1,50 @@
# Configuration
Polaris configuration resides in a single text file whose format is documented below. You can use the Polaris web UI to modify the configuration, or write to it in any text editor. You may edit the configuration file while Polaris is running.
## Location
The location of the configuration file is always logged during Polaris startup. It is determined as follows:
- From the `--config` (or `-c`) CLI option if present. This option must point to the `.toml` file.
- If the CLI option is not specified, Polaris will look for a `polaris.toml` file, inside the directory specified by the `POLARIS_CONFIG_DIR` environment variable _at compilation time_. When using the Windows installer, this will be `%LOCALAPPDATA%/Permafrost/Polaris/polaris.toml`. When using the supplied Makefile, the default is either `/usr/local/etc/polaris` (for a system-wide installations), or `~/.config/polaris` (for a XDG installation).
- If `POLARIS_CONFIG_DIR` was not set when Polaris was compiled, it will default to `.` on Linux, and the `LOCALAPPDATA` location mentioned above on Windows. This behavior on Windows may change in future releases.
## Format
The configuration file uses the [TOML](https://toml.io/) format. Everything in the configuration file is optional and may be ommitted (unless mentioned otherwise).
```toml
# Regular expression used to identify album art in files adjacent to an audio file
album_art_pattern = "Folder.(jpeg|jpg|png)"
# A URL Polaris will regularly make requests to in order to update Dynamic DNS
ddns_url = "https://example.com?token=foobar"
# Array of locations Polaris should scan to find music files
[[mount_dirs]]
# Directory to scan
source = "/home/example/music"
# User-facing name for this directory (must be unique)
name = "My Music 🎧️"
[[mount_dirs]]
source = "/mnt/example/more_music"
name = "Extra Music 🎵"
# Array of user accounts who can connect to the Polaris server
[[users]]
# Username for login
name = "example-user"
# If true, user will have access to all settings in the web UI
admin = true
# Plain text password for this user. Will be ignored if hashed_password is set. Polaris will never write to this field. For each user, at least one of initial_password and hashed_password must be set.
initial_password = "top-secret-password"
# Hashed and salted password for the user. Polaris will create this field if unset.
hashed_password = "$pbkdf2-sha256$i=10000,l=32$SI8LjK1KtvcawhgmWGJgRA$t9btMwhUTQ8r3vqI1xhArn19J7Jezyoi461fFjhZXGU"
[[users]]
name = "other-user"
admin = true
initial_password = "amospheric-strawberry64"
```

View file

@ -1,25 +1,37 @@
# Contributing
## Compiling and Running Polaris
## Guidelines
Compiling and running Polaris is very easy as it only depends on the Rust toolchain.
While Polaris is free and open-source software, it is not very open to code contributions. The reasons behind this are:
- Polaris is a hobby project. I don't want it to feel like my day job, where I do a lot of code reviews, mentoring and tech leadership.
- I am committed to maintaining this software for a very long time. I would rather maintain code that I mostly wrote myself.
1. [Install Rust](https://www.rust-lang.org/en-US/install.html)
2. Clone the polaris depot with this command: `git clone --recursive https://github.com/agersant/polaris.git`
This still leave room for a few avenues to contribute:
- Help answering questions in the issue tracker.
- Package Polaris for a Linux distribution
- Documentation improvements or writing user guides.
- Satellite projects (eg. [docker-polaris](https://github.com/ogarcia/docker-polaris), [polarios](https://gitlab.com/elise/Polarios))
- Bug fixes.
For non-trivial new features, you are welcome to maintain a fork. If you need help finding your way around the code, feel free to open a [discussion thread](https://github.com/agersant/polaris/discussions).
## Compiling and running Polaris
1. [Install Rust](https://www.rust-lang.org/en-US/install.html) (stable toolchain)
2. Clone the polaris depot with this command: `git clone https://github.com/agersant/polaris.git`
3. You can now run compile and run polaris from the newly created directory with the command: `cargo run`
Polaris supports a few command line arguments which are useful during development:
- `-c some/config.toml` sets the location of the [configuration](/docs/CONFIGURATION.md) file.
- `--data some/path` sets the folder Polaris will use to store runtime data such as playlists, collection index and auth secrets.
- `-w some/path/to/web/dir` lets you point to the directory to be served as the web interface. You can find a suitable directory in your Polaris install (under `/web`), or from the [latest polaris-web release](https://github.com/agersant/polaris-web/releases/latest/download/web.zip).
- `-s some/path/to/swagger/dir` lets you point to the directory to be served as the swagger API documentation. You'll probably want to point this to the `/docs/swagger` directory of the polaris repository.
- `-d some/path/to/a/file.db` lets you manually choose where Polaris stores its configuration and music index (you can reuse the same database accross multiple runs).
- `-c some/config.toml` lets you use a configuration file to add content to the database. This can be useful if you frequently delete the database and would like to automate the first time flow. The configuration format is not documented but can be inferred by looking at the `Config` struct in `config.rs`.
- `-f` (on Linux) makes Polaris not fork into a separate process.
Putting it all together, a typical command to compile and run the program would be: `cargo run -- -w web -s docs/swagger -d test-output/my.db`
Putting it all together, a typical command to compile and run the program would be: `cargo run -- -w web -c test-config.toml`
While Polaris is running, access the web UI at [http://localhost:5050](http://localhost:5050).
## Running Unit Tests
## Running unit tests
That's the easy part, simply run `cargo test`!

View file

@ -1,4 +1,10 @@
# Streaming From Other Devices
# Streaming from other devices
These instructions apply to users running Polaris on a home network. When deploying to cloud services or VPS, configurations requirements will differ.
## Port forwarding
Configure port forwarding on your router to redirect port 80 traffic towards port 5050 towards the computer running Polaris. The exact way to do this depends on your router manufacturer and model.
## Dynamic DNS
@ -8,34 +14,8 @@ You can access your Polaris installation from anywhere via your computer's publi
A solution to these problems is to set up Dynamic DNS, so that your installation can always be reached at a fixed URL.
The steps below will walk you through setting up YDNS and Polaris to give your installation a fixed URL. If you have another solution in mind, or prefer using another Dynamic DNS service, skip to the next section.
1. Register for a free account on https://ydns.io
2. On the YDNS website, access the "My Hosts" page and press the + sign for "Add Host"
3. Fill the host form as described below:
- Domain: ydns.eu
- Name: This part is up to you, whatever you enter will be in the URL you use to access Polaris
- Content: Leave the default. Take a note whether the value looks like a IPv4 address (format: xxx.xxx.xxx.xxx) or a IPv6 address (format: xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx)
- Type: Dynamic IP
4. If the content field looked like a IPv4 address: skip to step #6
5. If the content field looked like a IPv6 address:
- Click on your host name (eg. yourdomain.ydns.eu)
- You should now see a page which looks like this:
![YDNS Records](res/ydns_records.png?raw=true "YDNS Records")
- Click on the green "+" icon on the right
- Fill out the new form as described:
- Make sure the `Type` field is set to `A`
- Set content to 0.0.0.0
- You should now be back on the "records" page which was pictured above
- Click on the ID number on the left for the row that has its `Type` listed as `AAAA` (#28717 in the picture above).
- Click on the red trash can icon in the corner to delete this record
- Done!
6. In the Polaris web interface, access the `Dynamic DNS` tab of the settings screen:
- Update the hostname field to match what you set in step 5. (eg. http://yourdomain.ydns.eu)
- Update the username field to the email address you use when creating your YDNS account
- Update the password field with your YDNS API password. You can find this password on https://ydns.io: click on the "User" icon in the top right and then `Preferences > API`.
## Port Forwarding
Configure port forwarding on your router to redirect port 80 towards port 5050 on the computer where you run Polaris. The exact way to do this depends on your router manufacturer and model.
Don't forget to restart Polaris to apply your configuration changes, and access your music from other computers at http://yourdomain.ydns.eu
1. Reserve a URL with a dynamic DNS provider such as https://www.duckdns.org/ or https://freemyip.com/.
2. The dynamic DNS provider gives you a unique Update URL that can be used to tell them where to send traffic. For example, `freemyip.com` gives you this URL immediately after claiming a subdomain. Other providers may show it in your profile page, etc.
3. Access your Polaris instance (http://localhost:5050 by default).
4. Go to the `Setting page` and into the `Dynamic DNS` section.
5. Set the Update URL to the one you obtained in step 2.

View file

@ -8,8 +8,3 @@
- Input a user-facing version name (eg: **0.13.0**)
- Click the **Run workflow** button
- After CI completes, move the release from Draft to Published
## How to change the database schema
- Add a new folder under `migrations` following the existing pattern
- Run `update_db_schema.bat`

View file

@ -1,42 +1,30 @@
# Getting Started
# Installation
## Requirements
## On Windows
One of the following:
- Windows 7 or newer
- Linux (any reasonably modern distribution should do)
### Windows
1. Download the [latest installer](https://github.com/agersant/polaris/releases/latest) (you want the .msi file)
2. Run the installer
3. That's it, you're done!
3. Launch Polaris from the start menu
4. In your web browser, access http://localhost:5050
You can now start Polaris from the start menu or from your desktop, Polaris will also start automatically next time you restart your computer. You can tell when Polaris is running by its icon in the notification area (near the clock and volume controls).
## In a docker container
### Linux
To run polaris from a Docker container, please follow instructions from the [docker-polaris](https://github.com/ogarcia/docker-polaris) repository.
#### Dependencies
## From source on Linux
1. Install OpenSSL, SQLite and their headers, and some development tools. These are available from your distribution's package manager. For instance on Ubuntu, execute `sudo apt-get install binutils pkg-config libssl-dev`
### Dependencies
1. Install OpenSSL, SQLite and their respective headers (eg. `sudo apt-get install libsqlite3-dev libssl-dev`).
2. Install `binutils` and `pkg-config` (eg. `sudo apt-get install binutils pkg-config`).
2. Install the Rust compiler by executing `curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh` or using an [alternative method](https://www.rust-lang.org/en-US/install.html)
#### Polaris installation
### Polaris installation
1. Download the [latest release]((https://github.com/agersant/polaris/releases/latest)) of Polaris (you want the .tar.gz file)
2. Extract the Polaris archive in a directory and open a terminal in that directory
3. To install Polaris within your home directory, execute `make install-xdg`. This installation follows the [XDG Base Directory Specification](https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html). You can use `make preview-xdg` to see which directories the install process would use.
4. If you prefer a system-wide install, execute `make install` (without the `-xdg` suffix). If you use `sudo` to perform such a system install, you may need the `-E` option so that your sudo user find the Rust binaries: `sudo -E make install`. This installation follows the [GNU Standard Installation Directories](https://www.gnu.org/prep/standards/html_node/Directory-Variables.html). You can use `make preview` to see which directories the install process would use.
From here, you might want to adjust your system to run Polaris on login using Systemd, Cron or whichever method your distribution endorses.
If you want to uninstall Polaris, execute `make uninstall-xdg` from the extracted archive's directory (or `make uninstall` if you made a system-wide install). This will delete all the files and directories listed above **including your Polaris database**. If you customized the install process by specifying environment variables like `PREFIX`, make sure they are set to the same values when running the uninstall command.
### In a docker container
To run polaris from a Docker container, please follow instructions from the [docker-polaris](https://github.com/ogarcia/docker-polaris) repository.
## Test Run
- Start Polaris using the shortcut on your desktop (Windows) or by running the Polaris executable
- In your Web browser, access http://localhost:5050
- You will see a welcome page that will guide you through the Polaris configuration
If you want to uninstall Polaris, execute `make uninstall-xdg` from the extracted archive's directory (or `make uninstall` if you made a system-wide install). This will delete all the files and directories listed above (including your configuration, playlists, etc.). If you customized the install process by specifying environment variables like `PREFIX`, make sure they are set to the same values when running the uninstall command.

Binary file not shown.

Before

(image error) Size: 665 B

Binary file not shown.

Before

(image error) Size: 628 B

View file

@ -1,60 +0,0 @@
<!-- HTML for static distribution bundle build -->
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Polaris Swagger UI</title>
<link rel="stylesheet" type="text/css" href="swagger-ui.css">
<link rel="icon" type="image/png" href="favicon-32x32.png" sizes="32x32" />
<link rel="icon" type="image/png" href="favicon-16x16.png" sizes="16x16" />
<style>
html {
box-sizing: border-box;
overflow: -moz-scrollbars-vertical;
overflow-y: scroll;
}
*,
*:before,
*:after {
box-sizing: inherit;
}
body {
margin: 0;
background: #fafafa;
}
</style>
</head>
<body>
<div id="swagger-ui"></div>
<script src="swagger-ui-bundle.js"> </script>
<script src="swagger-ui-standalone-preset.js"> </script>
<script>
window.onload = function() {
// Begin Swagger UI call region
const ui = SwaggerUIBundle({
url: "polaris-api.json",
dom_id: '#swagger-ui',
deepLinking: true,
presets: [
SwaggerUIBundle.presets.apis,
SwaggerUIStandalonePreset
],
plugins: [
SwaggerUIBundle.plugins.DownloadUrl
],
layout: "StandaloneLayout"
})
// End Swagger UI call region
window.ui = ui
}
</script>
</body>
</html>

View file

@ -1,67 +0,0 @@
<!doctype html>
<html lang="en-US">
<body onload="run()">
</body>
</html>
<script>
'use strict';
function run () {
var oauth2 = window.opener.swaggerUIRedirectOauth2;
var sentState = oauth2.state;
var redirectUrl = oauth2.redirectUrl;
var isValid, qp, arr;
if (/code|token|error/.test(window.location.hash)) {
qp = window.location.hash.substring(1);
} else {
qp = location.search.substring(1);
}
arr = qp.split("&")
arr.forEach(function (v,i,_arr) { _arr[i] = '"' + v.replace('=', '":"') + '"';})
qp = qp ? JSON.parse('{' + arr.join() + '}',
function (key, value) {
return key === "" ? value : decodeURIComponent(value)
}
) : {}
isValid = qp.state === sentState
if ((
oauth2.auth.schema.get("flow") === "accessCode"||
oauth2.auth.schema.get("flow") === "authorizationCode"
) && !oauth2.auth.code) {
if (!isValid) {
oauth2.errCb({
authId: oauth2.auth.name,
source: "auth",
level: "warning",
message: "Authorization may be unsafe, passed state was changed in server Passed state wasn't returned from auth server"
});
}
if (qp.code) {
delete oauth2.state;
oauth2.auth.code = qp.code;
oauth2.callback({auth: oauth2.auth, redirectUrl: redirectUrl});
} else {
let oauthErrorMsg
if (qp.error) {
oauthErrorMsg = "["+qp.error+"]: " +
(qp.error_description ? qp.error_description+ ". " : "no accessCode received from the server. ") +
(qp.error_uri ? "More info: "+qp.error_uri : "");
}
oauth2.errCb({
authId: oauth2.auth.name,
source: "auth",
level: "error",
message: oauthErrorMsg || "[Authorization failed]: no accessCode received from the server"
});
}
} else {
oauth2.callback({auth: oauth2.auth, token: qp, isValid: isValid, redirectUrl: redirectUrl});
}
window.close();
}
</script>

File diff suppressed because it is too large Load diff

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View file

@ -1 +0,0 @@
{"version":3,"sources":[],"names":[],"mappings":"","file":"swagger-ui.css","sourceRoot":""}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

46
flake.lock generated Normal file
View file

@ -0,0 +1,46 @@
{
"nodes": {
"nixpkgs": {
"locked": {
"lastModified": 1736701207,
"narHash": "sha256-jG/+MvjVY7SlTakzZ2fJ5dC3V1PrKKrUEOEE30jrOKA=",
"rev": "ed4a395ea001367c1f13d34b1e01aa10290f67d6",
"revCount": 737298,
"type": "tarball",
"url": "https://api.flakehub.com/f/pinned/NixOS/nixpkgs/0.1.737298%2Brev-ed4a395ea001367c1f13d34b1e01aa10290f67d6/01945f5f-4175-7e72-8809-a1e482c4a443/source.tar.gz"
},
"original": {
"type": "tarball",
"url": "https://flakehub.com/f/NixOS/nixpkgs/0.1.%2A.tar.gz"
}
},
"root": {
"inputs": {
"nixpkgs": "nixpkgs",
"rust-overlay": "rust-overlay"
}
},
"rust-overlay": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1736735482,
"narHash": "sha256-QOA4jCDyyUM9Y2Vba+HSZ/5LdtCMGaTE/7NkkUzBr50=",
"owner": "oxalica",
"repo": "rust-overlay",
"rev": "cf960a1938ee91200fe0d2f7b2582fde2429d562",
"type": "github"
},
"original": {
"owner": "oxalica",
"repo": "rust-overlay",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

58
flake.nix Normal file
View file

@ -0,0 +1,58 @@
{
description = "A Nix-flake-based Rust development environment";
inputs = {
nixpkgs.url = "https://flakehub.com/f/NixOS/nixpkgs/0.1.*.tar.gz";
rust-overlay = {
url = "github:oxalica/rust-overlay";
inputs.nixpkgs.follows = "nixpkgs";
};
};
outputs = { self, nixpkgs, rust-overlay }:
let
supportedSystems = [ "x86_64-linux" "aarch64-linux" "x86_64-darwin" "aarch64-darwin" ];
forEachSupportedSystem = f: nixpkgs.lib.genAttrs supportedSystems (system: f {
pkgs = import nixpkgs {
inherit system;
overlays = [ rust-overlay.overlays.default self.overlays.default ];
};
});
in
{
overlays.default = final: prev: {
rustToolchain =
let
rust = prev.rust-bin;
in
if builtins.pathExists ./rust-toolchain.toml then
rust.fromRustupToolchainFile ./rust-toolchain.toml
else if builtins.pathExists ./rust-toolchain then
rust.fromRustupToolchainFile ./rust-toolchain
else
rust.stable.latest.default.override {
extensions = [ "rust-src" "rustfmt" ];
};
};
devShells = forEachSupportedSystem ({ pkgs }: {
default = pkgs.mkShell {
packages = with pkgs; [
rustToolchain
openssl
pkg-config
cargo-deny
cargo-edit
cargo-watch
rust-analyzer
samply
];
env = {
# Required by rust-analyzer
RUST_SRC_PATH = "${pkgs.rustToolchain}/lib/rustlib/src/rust/library";
};
};
});
};
}

View file

@ -1,2 +0,0 @@
DROP TABLE directories;
DROP TABLE songs;

View file

@ -1,25 +0,0 @@
CREATE TABLE directories (
id INTEGER PRIMARY KEY NOT NULL,
path TEXT NOT NULL,
parent TEXT,
artist TEXT,
year INTEGER,
album TEXT,
artwork TEXT,
UNIQUE(path) ON CONFLICT REPLACE
);
CREATE TABLE songs (
id INTEGER PRIMARY KEY NOT NULL,
path TEXT NOT NULL,
parent TEXT NOT NULL,
track_number INTEGER,
disc_number INTEGER,
title TEXT,
artist TEXT,
album_artist TEXT,
year INTEGER,
album TEXT,
artwork TEXT,
UNIQUE(path) ON CONFLICT REPLACE
);

View file

@ -1,15 +0,0 @@
CREATE TEMPORARY TABLE directories_backup(id, path, parent, artist, year, album, artwork);
INSERT INTO directories_backup SELECT id, path, parent, artist, year, album, artwork FROM directories;
DROP TABLE directories;
CREATE TABLE directories (
id INTEGER PRIMARY KEY NOT NULL,
path TEXT NOT NULL,
parent TEXT,
artist TEXT,
year INTEGER,
album TEXT,
artwork TEXT,
UNIQUE(path) ON CONFLICT REPLACE
);
INSERT INTO directories SELECT * FROM directories_backup;
DROP TABLE directories_backup;

View file

@ -1 +0,0 @@
ALTER TABLE directories ADD COLUMN date_added INTEGER DEFAULT 0 NOT NULL;

View file

@ -1 +0,0 @@
DROP TABLE users;

View file

@ -1,8 +0,0 @@
CREATE TABLE users (
id INTEGER PRIMARY KEY NOT NULL,
name TEXT NOT NULL,
password_salt BLOB NOT NULL,
password_hash BLOB NOT NULL,
admin INTEGER NOT NULL,
UNIQUE(name)
);

View file

@ -1 +0,0 @@
DROP TABLE misc_settings;

View file

@ -1,7 +0,0 @@
CREATE TABLE misc_settings (
id INTEGER PRIMARY KEY NOT NULL CHECK(id = 0),
auth_secret TEXT NOT NULL,
index_sleep_duration_seconds INTEGER NOT NULL,
index_album_art_pattern TEXT NOT NULL
);
INSERT INTO misc_settings (id, auth_secret, index_sleep_duration_seconds, index_album_art_pattern) VALUES (0, hex(randomblob(64)), 1800, "Folder.(jpeg|jpg|png)");

View file

@ -1 +0,0 @@
DROP TABLE ddns_config;

View file

@ -1,8 +0,0 @@
CREATE TABLE ddns_config (
id INTEGER PRIMARY KEY NOT NULL CHECK(id = 0),
host TEXT NOT NULL,
username TEXT NOT NULL,
password TEXT NOT NULL
);
INSERT INTO ddns_config (id, host, username, password) VALUES (0, "", "", "");

View file

@ -1 +0,0 @@
DROP TABLE mount_points;

View file

@ -1,6 +0,0 @@
CREATE TABLE mount_points (
id INTEGER PRIMARY KEY NOT NULL,
source TEXT NOT NULL,
name TEXT NOT NULL,
UNIQUE(name)
);

View file

@ -1,2 +0,0 @@
DROP TABLE playlists;
DROP TABLE playlist_songs;

View file

@ -1,16 +0,0 @@
CREATE TABLE playlists (
id INTEGER PRIMARY KEY NOT NULL,
owner INTEGER NOT NULL,
name TEXT NOT NULL,
FOREIGN KEY(owner) REFERENCES users(id) ON DELETE CASCADE,
UNIQUE(owner, name) ON CONFLICT REPLACE
);
CREATE TABLE playlist_songs (
id INTEGER PRIMARY KEY NOT NULL,
playlist INTEGER NOT NULL,
path TEXT NOT NULL,
ordering INTEGER NOT NULL,
FOREIGN KEY(playlist) REFERENCES playlists(id) ON DELETE CASCADE ON UPDATE CASCADE,
UNIQUE(playlist, ordering) ON CONFLICT REPLACE
);

View file

@ -1,11 +0,0 @@
CREATE TEMPORARY TABLE misc_settings_backup(id, auth_secret, index_sleep_duration_seconds, index_album_art_pattern);
INSERT INTO misc_settings_backup SELECT id, auth_secret, index_sleep_duration_seconds, index_album_art_pattern FROM misc_settings;
DROP TABLE misc_settings;
CREATE TABLE misc_settings (
id INTEGER PRIMARY KEY NOT NULL CHECK(id = 0),
auth_secret TEXT NOT NULL,
index_sleep_duration_seconds INTEGER NOT NULL,
index_album_art_pattern TEXT NOT NULL
);
INSERT INTO misc_settings SELECT * FROM misc_settings_backup;
DROP TABLE misc_settings_backup;

View file

@ -1 +0,0 @@
ALTER TABLE misc_settings ADD COLUMN prefix_url TEXT NOT NULL DEFAULT "";

View file

@ -1,19 +0,0 @@
CREATE TEMPORARY TABLE songs_backup(id, path, parent, track_number, disc_number, title, artist, album_artist, year, album, artwork);
INSERT INTO songs_backup SELECT id, path, parent, track_number, disc_number, title, artist, album_artist, year, album, artwork FROM songs;
DROP TABLE songs;
CREATE TABLE songs (
id INTEGER PRIMARY KEY NOT NULL,
path TEXT NOT NULL,
parent TEXT NOT NULL,
track_number INTEGER,
disc_number INTEGER,
title TEXT,
artist TEXT,
album_artist TEXT,
year INTEGER,
album TEXT,
artwork TEXT,
UNIQUE(path) ON CONFLICT REPLACE
);
INSERT INTO songs SELECT * FROM songs_backup;
DROP TABLE songs_backup;

View file

@ -1 +0,0 @@
ALTER TABLE songs ADD COLUMN duration INTEGER;

View file

@ -1,13 +0,0 @@
CREATE TEMPORARY TABLE users_backup(id, name, password_salt, password_hash, admin);
INSERT INTO users_backup SELECT id, name, password_salt, password_hash, admin FROM users;
DROP TABLE users;
CREATE TABLE users (
id INTEGER PRIMARY KEY NOT NULL,
name TEXT NOT NULL,
password_salt BLOB NOT NULL,
password_hash BLOB NOT NULL,
admin INTEGER NOT NULL,
UNIQUE(name)
);
INSERT INTO users SELECT * FROM users_backup;
DROP TABLE users_backup;

View file

@ -1,2 +0,0 @@
ALTER TABLE users ADD COLUMN lastfm_username TEXT;
ALTER TABLE users ADD COLUMN lastfm_session_key TEXT;

View file

@ -1,15 +0,0 @@
CREATE TEMPORARY TABLE misc_settings_backup(id, index_sleep_duration_seconds, index_album_art_pattern, prefix_url);
INSERT INTO misc_settings_backup
SELECT id, index_sleep_duration_seconds, index_album_art_pattern, prefix_url
FROM misc_settings;
DROP TABLE misc_settings;
CREATE TABLE misc_settings (
id INTEGER PRIMARY KEY NOT NULL CHECK(id = 0),
auth_secret BLOB NOT NULL DEFAULT (hex(randomblob(32))),
index_sleep_duration_seconds INTEGER NOT NULL,
index_album_art_pattern TEXT NOT NULL,
prefix_url TEXT NOT NULL DEFAULT ""
);
INSERT INTO misc_settings(id, index_sleep_duration_seconds, index_album_art_pattern, prefix_url)
SELECT * FROM misc_settings_backup;
DROP TABLE misc_settings_backup;

View file

@ -1,15 +0,0 @@
CREATE TEMPORARY TABLE misc_settings_backup(id, index_sleep_duration_seconds, index_album_art_pattern, prefix_url);
INSERT INTO misc_settings_backup
SELECT id, index_sleep_duration_seconds, index_album_art_pattern, prefix_url
FROM misc_settings;
DROP TABLE misc_settings;
CREATE TABLE misc_settings (
id INTEGER PRIMARY KEY NOT NULL CHECK(id = 0),
auth_secret BLOB NOT NULL DEFAULT (randomblob(32)),
index_sleep_duration_seconds INTEGER NOT NULL,
index_album_art_pattern TEXT NOT NULL,
prefix_url TEXT NOT NULL DEFAULT ""
);
INSERT INTO misc_settings(id, index_sleep_duration_seconds, index_album_art_pattern, prefix_url)
SELECT * FROM misc_settings_backup;
DROP TABLE misc_settings_backup;

View file

@ -1,11 +0,0 @@
DROP TABLE users;
CREATE TABLE users (
id INTEGER PRIMARY KEY NOT NULL,
name TEXT NOT NULL,
password_salt BLOB NOT NULL,
password_hash BLOB NOT NULL,
admin INTEGER NOT NULL,
lastfm_username TEXT,
lastfm_session_key TEXT,
UNIQUE(name)
);

View file

@ -1,10 +0,0 @@
DROP TABLE users;
CREATE TABLE users (
id INTEGER PRIMARY KEY NOT NULL,
name TEXT NOT NULL,
password_hash TEXT NOT NULL,
admin INTEGER NOT NULL,
lastfm_username TEXT,
lastfm_session_key TEXT,
UNIQUE(name)
);

View file

@ -1,14 +0,0 @@
CREATE TEMPORARY TABLE users_backup(id, name, password_hash, admin, lastfm_username, lastfm_session_key);
INSERT INTO users_backup SELECT id, name, password_hash, admin, lastfm_username, lastfm_session_key FROM users;
DROP TABLE users;
CREATE TABLE users (
id INTEGER PRIMARY KEY NOT NULL,
name TEXT NOT NULL,
password_hash TEXT NOT NULL,
admin INTEGER NOT NULL,
lastfm_username TEXT,
lastfm_session_key TEXT,
UNIQUE(name)
);
INSERT INTO users SELECT * FROM users_backup;
DROP TABLE users_backup;

View file

@ -1,2 +0,0 @@
ALTER TABLE users ADD COLUMN web_theme_base TEXT;
ALTER TABLE users ADD COLUMN web_theme_accent TEXT;

View file

@ -1 +0,0 @@
ALTER TABLE misc_settings ADD COLUMN prefix_url TEXT NOT NULL DEFAULT "";

View file

@ -1,11 +0,0 @@
CREATE TEMPORARY TABLE misc_settings_backup(id, auth_secret, index_sleep_duration_seconds, index_album_art_pattern);
INSERT INTO misc_settings_backup SELECT id, auth_secret, index_sleep_duration_seconds, index_album_art_pattern FROM misc_settings;
DROP TABLE misc_settings;
CREATE TABLE misc_settings (
id INTEGER PRIMARY KEY NOT NULL CHECK(id = 0),
auth_secret BLOB NOT NULL DEFAULT (randomblob(32)),
index_sleep_duration_seconds INTEGER NOT NULL,
index_album_art_pattern TEXT NOT NULL
);
INSERT INTO misc_settings SELECT * FROM misc_settings_backup;
DROP TABLE misc_settings_backup;

View file

@ -1,20 +0,0 @@
CREATE TEMPORARY TABLE songs_backup(id, path, parent, track_number, disc_number, title, artist, album_artist, year, album, artwork, duration);
INSERT INTO songs_backup SELECT id, path, parent, track_number, disc_number, title, artist, album_artist, year, album, artwork, duration FROM songs;
DROP TABLE songs;
CREATE TABLE songs (
id INTEGER PRIMARY KEY NOT NULL,
path TEXT NOT NULL,
parent TEXT NOT NULL,
track_number INTEGER,
disc_number INTEGER,
title TEXT,
artist TEXT,
album_artist TEXT,
year INTEGER,
album TEXT,
artwork TEXT,
duration INTEGER,
UNIQUE(path) ON CONFLICT REPLACE
);
INSERT INTO songs SELECT * FROM songs_backup;
DROP TABLE songs_backup;

View file

@ -1,4 +0,0 @@
ALTER TABLE songs ADD COLUMN lyricist TEXT;
ALTER TABLE songs ADD COLUMN composer TEXT;
ALTER TABLE songs ADD COLUMN genre TEXT;
ALTER TABLE songs ADD COLUMN label TEXT;

Binary file not shown.

After

(image error) Size: 1.2 MiB

Binary file not shown.

After

(image error) Size: 1.3 MiB

Binary file not shown.

Before

(image error) Size: 107 KiB

After

(image error) Size: 723 KiB

Binary file not shown.

Before

(image error) Size: 256 KiB

After

(image error) Size: 722 KiB

View file

@ -7,21 +7,25 @@ EXEC_PREFIX ?= $(PREFIX)
BINDIR ?= $(EXEC_PREFIX)/bin
DATAROOTDIR ?= $(PREFIX)/share
DATADIR ?= $(DATAROOTDIR)
SYSCONFDIR ?= $(PREFIX)/etc
LOCALSTATEDIR ?= $(PREFIX)/var
RUNSTATEDIR ?= $(LOCALSTATEDIR)/run
%-system: POLARIS_BIN_PATH := $(BINDIR)/polaris
%-system: export POLARIS_WEB_DIR := $(DATADIR)/polaris/web
%-system: export POLARIS_SWAGGER_DIR := $(DATADIR)/polaris/swagger
%-system: export POLARIS_CONFIG_DIR := $(SYSCONFDIR)/polaris
%-system: export POLARIS_DATA_DIR := $(LOCALSTATEDIR)/lib/polaris
%-system: export POLARIS_DB_DIR := $(LOCALSTATEDIR)/lib/polaris
%-system: export POLARIS_LOG_DIR := $(LOCALSTATEDIR)/log/polaris
%-system: export POLARIS_CACHE_DIR := $(LOCALSTATEDIR)/cache/polaris
%-system: export POLARIS_PID_DIR := $(RUNSTATEDIR)/polaris
XDG_CACHE_HOME ?= $(HOME)/.cache
XDG_CONFIG_HOME ?= $(HOME)/.config
XDG_DATA_HOME ?= $(HOME)/.local/share
XDG_BINDIR ?= $(HOME)/.local/bin
XDG_DATADIR ?= $(XDG_DATA_HOME)/polaris
XDG_CACHEDIR ?= $(XDG_CACHE_HOME)/polaris
XDG_CONFIGDIR ?= $(XDG_CONFIG_HOME)/polaris
ifdef $(XDG_RUNTIME_DIR)
XDG_PIDDIR ?= $(XDG_RUNTIME_DIR)/polaris
else
@ -29,7 +33,8 @@ XDG_PIDDIR ?= /tmp/polaris-$(UID)
endif
%-xdg: POLARIS_BIN_PATH := $(XDG_BINDIR)/polaris
%-xdg: export POLARIS_WEB_DIR := $(XDG_DATADIR)/web
%-xdg: export POLARIS_SWAGGER_DIR := $(XDG_DATADIR)/swagger
%-xdg: export POLARIS_CONFIG_DIR := $(XDG_CONFIGDIR)
%-xdg: export POLARIS_DATA_DIR := $(XDG_DATADIR)
%-xdg: export POLARIS_DB_DIR := $(XDG_DATADIR)
%-xdg: export POLARIS_LOG_DIR := $(XDG_CACHEDIR)
%-xdg: export POLARIS_CACHE_DIR := $(XDG_CACHEDIR)
@ -57,7 +62,8 @@ preview: preview-system
list-paths:
$(info POLARIS_BIN_PATH is $(POLARIS_BIN_PATH))
$(info POLARIS_WEB_DIR is $(POLARIS_WEB_DIR))
$(info POLARIS_SWAGGER_DIR is $(POLARIS_SWAGGER_DIR))
$(info POLARIS_CONFIG_DIR is $(POLARIS_CONFIG_DIR))
$(info POLARIS_DATA_DIR is $(POLARIS_DATA_DIR))
$(info POLARIS_DB_DIR is $(POLARIS_DB_DIR))
$(info POLARIS_LOG_DIR is $(POLARIS_LOG_DIR))
$(info POLARIS_CACHE_DIR is $(POLARIS_CACHE_DIR))
@ -74,9 +80,7 @@ install-bin: cargo-build
install-data:
install -d $(POLARIS_WEB_DIR)
install -d $(POLARIS_SWAGGER_DIR)
cp -rT ./web $(POLARIS_WEB_DIR)
cp -rT ./swagger $(POLARIS_SWAGGER_DIR)
# Uninstall
@ -89,7 +93,8 @@ uninstall-bin:
uninstall-data:
rm -rf $(POLARIS_WEB_DIR)
rm -rf $(POLARIS_SWAGGER_DIR)
rm -rf $(POLARIS_CONFIG_DIR)
rm -rf $(POLARIS_DATA_DIR)
rm -rf $(POLARIS_DB_DIR)
rm -rf $(POLARIS_LOG_DIR)
rm -rf $(POLARIS_CACHE_DIR)

View file

@ -3,7 +3,7 @@ echo "Creating output directory"
mkdir -p release/tmp/polaris
echo "Copying package files"
cp -r web docs/swagger src migrations test-data build.rs Cargo.toml Cargo.lock rust-toolchain res/unix/Makefile release/tmp/polaris
cp -r web src test-data build.rs Cargo.toml Cargo.lock rust-toolchain.toml res/unix/Makefile release/tmp/polaris
echo "Creating tarball"
tar -zc -C release/tmp -f release/polaris.tar.gz polaris

View file

@ -49,7 +49,6 @@
<ComponentRef Id="ProgramMenuDir" />
<ComponentRef Id="CleanupExtraData" />
<ComponentGroupRef Id="WebUI" />
<ComponentGroupRef Id="SwaggerUI" />
</Feature>
<Icon Id="polaris.exe" SourceFile="polaris.exe" />
<Property Id="ARPPRODUCTICON" Value="polaris.exe" />

View file

@ -8,7 +8,6 @@ if (!(Test-Path env:POLARIS_VERSION)) {
# And remove the code setting these as defaults in `service/mod.rs`
# $script:INSTALL_DIR = "%LOCALAPPDATA%\Permafrost\Polaris"
# $env:POLARIS_WEB_DIR = "$INSTALL_DIR\web"
# $env:POLARIS_SWAGGER_DIR = "$INSTALL_DIR\swagger"
# $env:POLARIS_DB_DIR = "$INSTALL_DIR"
# $env:POLARIS_LOG_DIR = "$INSTALL_DIR"
# $env:POLARIS_CACHE_DIR = "$INSTALL_DIR"
@ -29,7 +28,6 @@ Copy-Item .\res\windows\installer\dialog.bmp .\release\tmp\
Copy-Item .\target\release\polaris.exe .\release\tmp\
Copy-Item .\target\release\polaris-cli.exe .\release\tmp\
Copy-Item .\web .\release\tmp\web -recurse
Copy-Item .\docs\swagger .\release\tmp\swagger -recurse
""
"Inserting version number in installer config"
@ -41,15 +39,13 @@ $wxs.Save('.\res\windows\installer\installer.wxs')
"Creating installer"
$heat_exe = Join-Path $env:WIX bin\heat.exe
& $heat_exe dir .\release\tmp\web\ -ag -g1 -dr AppDataPolaris -cg WebUI -sfrag -var wix.WebUIDir -out .\release\tmp\web_ui_fragment.wxs
& $heat_exe dir .\release\tmp\swagger\ -ag -g1 -dr AppDataPolaris -cg SwaggerUI -sfrag -var wix.SwaggerUIDir -out .\release\tmp\swagger_ui_fragment.wxs
$candle_exe = Join-Path $env:WIX bin\candle.exe
& $candle_exe -wx -ext WixUtilExtension -arch x64 -out .\release\tmp\web_ui_fragment.wixobj .\release\tmp\web_ui_fragment.wxs
& $candle_exe -wx -ext WixUtilExtension -arch x64 -out .\release\tmp\swagger_ui_fragment.wixobj .\release\tmp\swagger_ui_fragment.wxs
& $candle_exe -wx -ext WixUtilExtension -arch x64 -out .\release\tmp\installer.wixobj .\res\windows\installer\installer.wxs
$light_exe = Join-Path $env:WIX bin\light.exe
& $light_exe -dWebUIDir=".\release\tmp\web" -dSwaggerUIDir=".\release\tmp\swagger" -wx -ext WixUtilExtension -ext WixUIExtension -spdb -sw1076 -sice:ICE38 -sice:ICE64 -out .\release\polaris.msi .\release\tmp\installer.wixobj .\release\tmp\web_ui_fragment.wixobj .\release\tmp\swagger_ui_fragment.wixobj
& $light_exe -dWebUIDir=".\release\tmp\web" -wx -ext WixUtilExtension -ext WixUIExtension -spdb -sw1076 -sice:ICE38 -sice:ICE64 -out .\release\polaris.msi .\release\tmp\installer.wixobj .\release\tmp\web_ui_fragment.wixobj
"Cleaning up"
Remove-Item -Recurse .\release\tmp

View file

@ -1 +0,0 @@
stable

4
rust-toolchain.toml Normal file
View file

@ -0,0 +1,4 @@
[toolchain]
channel = "stable"
components = [ "rust-src", "rustfmt" ]
profile = "default"

View file

@ -1,18 +1,26 @@
use std::fs;
use std::path::PathBuf;
use std::path::{Path, PathBuf};
use crate::db::{self, DB};
use log::info;
use rand::rngs::OsRng;
use rand::RngCore;
use tokio::fs::try_exists;
use tokio::task::spawn_blocking;
use crate::app::legacy::*;
use crate::paths::Paths;
pub mod auth;
pub mod config;
pub mod ddns;
pub mod formats;
pub mod index;
pub mod lastfm;
pub mod legacy;
pub mod ndb;
pub mod peaks;
pub mod playlist;
pub mod settings;
pub mod scanner;
pub mod thumbnail;
pub mod user;
pub mod vfs;
#[cfg(test)]
pub mod test;
@ -20,83 +28,290 @@ pub mod test;
#[derive(thiserror::Error, Debug)]
pub enum Error {
#[error(transparent)]
Config(#[from] config::Error),
ThreadPoolBuilder(#[from] rayon::ThreadPoolBuildError),
#[error(transparent)]
Database(#[from] db::Error),
ThreadJoining(#[from] tokio::task::JoinError),
#[error("Filesystem error for `{0}`: `{1}`")]
Io(PathBuf, std::io::Error),
#[error(transparent)]
Settings(#[from] settings::Error),
FileWatch(#[from] notify::Error),
#[error(transparent)]
SQL(#[from] rusqlite::Error),
#[error(transparent)]
Ape(#[from] ape::Error),
#[error("ID3 error in `{0}`: `{1}`")]
Id3(PathBuf, id3::Error),
#[error("Metaflac error in `{0}`: `{1}`")]
Metaflac(PathBuf, metaflac::Error),
#[error("Mp4aMeta error in `{0}`: `{1}`")]
Mp4aMeta(PathBuf, mp4ameta::Error),
#[error(transparent)]
Opus(#[from] opus_headers::ParseError),
#[error(transparent)]
Vorbis(#[from] lewton::VorbisError),
#[error("Could not find a Vorbis comment within flac file")]
VorbisCommentNotFoundInFlacFile,
#[error("Could not read thumbnail image in `{0}`:\n\n{1}")]
Image(PathBuf, image::error::ImageError),
#[error("This file format is not supported: {0}")]
UnsupportedFormat(&'static str),
#[error("No tracks found in audio file: {0}")]
MediaEmpty(PathBuf),
#[error(transparent)]
MediaDecodeError(symphonia::core::errors::Error),
#[error(transparent)]
MediaDecoderError(symphonia::core::errors::Error),
#[error(transparent)]
MediaPacketError(symphonia::core::errors::Error),
#[error(transparent)]
MediaProbeError(symphonia::core::errors::Error),
#[error(transparent)]
PeaksSerialization(bitcode::Error),
#[error(transparent)]
PeaksDeserialization(bitcode::Error),
#[error(transparent)]
NativeDatabase(#[from] native_db::db_type::Error),
#[error("Could not initialize database")]
NativeDatabaseCreationError(native_db::db_type::Error),
#[error("DDNS update query failed with HTTP status code `{0}`")]
UpdateQueryFailed(u16),
#[error("DDNS update query failed due to a transport error")]
UpdateQueryTransport,
#[error("Auth secret does not have the expected format")]
AuthenticationSecretInvalid,
#[error("Missing auth secret")]
AuthenticationSecretNotFound,
#[error("Missing settings")]
MiscSettingsNotFound,
#[error("Index album art pattern is not a valid regex")]
IndexAlbumArtPatternInvalid,
#[error("DDNS update URL is invalid")]
DDNSUpdateURLInvalid,
#[error("Could not deserialize configuration: `{0}`")]
ConfigDeserialization(toml::de::Error),
#[error("Could not serialize configuration: `{0}`")]
ConfigSerialization(toml::ser::Error),
#[error("Could not deserialize collection")]
IndexDeserializationError,
#[error("Could not serialize collection")]
IndexSerializationError,
#[error("Invalid Directory")]
InvalidDirectory(String),
#[error("The following virtual path could not be mapped to a real path: `{0}`")]
CouldNotMapToRealPath(PathBuf),
#[error("The following real path could not be mapped to a virtual path: `{0}`")]
CouldNotMapToVirtualPath(PathBuf),
#[error("User not found")]
UserNotFound,
#[error("Directory not found: {0}")]
DirectoryNotFound(PathBuf),
#[error("Artist not found")]
ArtistNotFound,
#[error("Album not found")]
AlbumNotFound,
#[error("Genre not found")]
GenreNotFound,
#[error("Song not found")]
SongNotFound,
#[error("Invalid search query syntax")]
SearchQueryParseError,
#[error("Playlist not found")]
PlaylistNotFound,
#[error("No embedded artwork was found in `{0}`")]
EmbeddedArtworkNotFound(PathBuf),
#[error("Cannot use empty username")]
EmptyUsername,
#[error("Cannot use empty password")]
EmptyPassword,
#[error("Username already exists")]
DuplicateUsername,
#[error("Username does not exist")]
IncorrectUsername,
#[error("Password does not match username")]
IncorrectPassword,
#[error("Invalid auth token")]
InvalidAuthToken,
#[error("Incorrect authorization scope")]
IncorrectAuthorizationScope,
#[error("Failed to hash password")]
PasswordHashing,
#[error("Failed to encode authorization token")]
AuthorizationTokenEncoding,
#[error("Failed to encode Branca token")]
BrancaTokenEncoding,
}
#[derive(Clone)]
pub struct App {
pub port: u16,
pub auth_secret: settings::AuthSecret,
pub web_dir_path: PathBuf,
pub swagger_dir_path: PathBuf,
pub db: DB,
pub index: index::Index,
pub config_manager: config::Manager,
pub ddns_manager: ddns::Manager,
pub lastfm_manager: lastfm::Manager,
pub scanner: scanner::Scanner,
pub index_manager: index::Manager,
pub config_manager: config::Manager,
pub peaks_manager: peaks::Manager,
pub playlist_manager: playlist::Manager,
pub settings_manager: settings::Manager,
pub thumbnail_manager: thumbnail::Manager,
pub user_manager: user::Manager,
pub vfs_manager: vfs::Manager,
}
impl App {
pub fn new(port: u16, paths: Paths) -> Result<Self, Error> {
let db = DB::new(&paths.db_file_path)?;
pub async fn new(port: u16, paths: Paths) -> Result<Self, Error> {
fs::create_dir_all(&paths.data_dir_path)
.map_err(|e| Error::Io(paths.data_dir_path.clone(), e))?;
fs::create_dir_all(&paths.web_dir_path)
.map_err(|e| Error::Io(paths.web_dir_path.clone(), e))?;
fs::create_dir_all(&paths.swagger_dir_path)
.map_err(|e| Error::Io(paths.swagger_dir_path.clone(), e))?;
let peaks_dir_path = paths.cache_dir_path.join("peaks");
fs::create_dir_all(&peaks_dir_path).map_err(|e| Error::Io(peaks_dir_path.clone(), e))?;
let thumbnails_dir_path = paths.cache_dir_path.join("thumbnails");
fs::create_dir_all(&thumbnails_dir_path)
.map_err(|e| Error::Io(thumbnails_dir_path.clone(), e))?;
let vfs_manager = vfs::Manager::new(db.clone());
let settings_manager = settings::Manager::new(db.clone());
let auth_secret = settings_manager.get_auth_secret()?;
let ddns_manager = ddns::Manager::new(db.clone());
let user_manager = user::Manager::new(db.clone(), auth_secret);
let index = index::Index::new(db.clone(), vfs_manager.clone(), settings_manager.clone());
let config_manager = config::Manager::new(
settings_manager.clone(),
user_manager.clone(),
vfs_manager.clone(),
ddns_manager.clone(),
);
let playlist_manager = playlist::Manager::new(db.clone(), vfs_manager.clone());
let thumbnail_manager = thumbnail::Manager::new(thumbnails_dir_path);
let lastfm_manager = lastfm::Manager::new(index.clone(), user_manager.clone());
let auth_secret_file_path = paths.data_dir_path.join("auth.secret");
Self::migrate_legacy_auth_secret(&paths.db_file_path, &auth_secret_file_path).await?;
let auth_secret = Self::get_or_create_auth_secret(&auth_secret_file_path).await?;
if let Some(config_path) = paths.config_file_path {
let config = config::Config::from_path(&config_path)?;
config_manager.apply(&config)?;
let config_manager = config::Manager::new(&paths.config_file_path, auth_secret).await?;
let ddns_manager = ddns::Manager::new(config_manager.clone());
let ndb_manager = ndb::Manager::new(&paths.data_dir_path)?;
let index_manager = index::Manager::new(&paths.data_dir_path).await?;
let scanner = scanner::Scanner::new(index_manager.clone(), config_manager.clone()).await?;
let peaks_manager = peaks::Manager::new(peaks_dir_path);
let playlist_manager = playlist::Manager::new(ndb_manager);
let thumbnail_manager = thumbnail::Manager::new(thumbnails_dir_path);
let app = Self {
port,
web_dir_path: paths.web_dir_path,
ddns_manager,
scanner,
index_manager,
config_manager,
peaks_manager,
playlist_manager,
thumbnail_manager,
};
app.migrate_legacy_db(&paths.db_file_path).await?;
Ok(app)
}
async fn migrate_legacy_auth_secret(
db_file_path: &PathBuf,
secret_file_path: &PathBuf,
) -> Result<(), Error> {
if !try_exists(db_file_path)
.await
.map_err(|e| Error::Io(db_file_path.clone(), e))?
{
return Ok(());
}
let auth_secret = settings_manager.get_auth_secret()?;
if try_exists(secret_file_path)
.await
.map_err(|e| Error::Io(secret_file_path.clone(), e))?
{
return Ok(());
}
Ok(Self {
port,
auth_secret,
web_dir_path: paths.web_dir_path,
swagger_dir_path: paths.swagger_dir_path,
index,
config_manager,
ddns_manager,
lastfm_manager,
playlist_manager,
settings_manager,
thumbnail_manager,
user_manager,
vfs_manager,
db,
info!(
"Migrating auth secret from database at `{}`",
db_file_path.to_string_lossy()
);
let secret = spawn_blocking({
let db_file_path = db_file_path.clone();
move || read_legacy_auth_secret(&db_file_path)
})
.await??;
tokio::fs::write(secret_file_path, &secret)
.await
.map_err(|e| Error::Io(secret_file_path.clone(), e))?;
Ok(())
}
async fn migrate_legacy_db(&self, db_file_path: &PathBuf) -> Result<(), Error> {
if !try_exists(db_file_path)
.await
.map_err(|e| Error::Io(db_file_path.clone(), e))?
{
return Ok(());
}
let Some(config) = tokio::task::spawn_blocking({
let db_file_path = db_file_path.clone();
move || read_legacy_config(&db_file_path)
})
.await??
else {
return Ok(());
};
info!(
"Found usable config in legacy database at `{}`, beginning migration process",
db_file_path.to_string_lossy()
);
info!("Migrating configuration");
self.config_manager.apply_config(config).await?;
self.config_manager.save_config().await?;
info!("Migrating playlists");
for (name, owner, songs) in read_legacy_playlists(
db_file_path,
self.index_manager.clone(),
self.scanner.clone(),
)
.await?
{
self.playlist_manager
.save_playlist(&name, &owner, songs)
.await?;
}
info!(
"Deleting legacy database at `{}`",
db_file_path.to_string_lossy()
);
delete_legacy_db(db_file_path).await?;
info!(
"Completed migration from `{}`",
db_file_path.to_string_lossy()
);
Ok(())
}
async fn get_or_create_auth_secret(path: &Path) -> Result<auth::Secret, Error> {
match tokio::fs::read(&path).await {
Ok(s) => Ok(auth::Secret(
s.try_into()
.map_err(|_| Error::AuthenticationSecretInvalid)?,
)),
Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
let mut secret = auth::Secret::default();
OsRng.fill_bytes(secret.as_mut());
tokio::fs::write(&path, &secret)
.await
.map_err(|_| Error::AuthenticationSecretInvalid)?;
Ok(secret)
}
Err(e) => return Err(Error::Io(path.to_owned(), e)),
}
}
}

95
src/app/auth.rs Normal file
View file

@ -0,0 +1,95 @@
use std::time::{SystemTime, UNIX_EPOCH};
use pbkdf2::password_hash::{PasswordHash, PasswordHasher, PasswordVerifier, SaltString};
use pbkdf2::Pbkdf2;
use rand::rngs::OsRng;
use serde::{Deserialize, Serialize};
use crate::app::Error;
#[derive(Clone, Default)]
pub struct Secret(pub [u8; 32]);
impl AsRef<[u8]> for Secret {
fn as_ref(&self) -> &[u8] {
&self.0
}
}
impl AsMut<[u8]> for Secret {
fn as_mut(&mut self) -> &mut [u8] {
&mut self.0
}
}
#[derive(Debug)]
pub struct Token(pub String);
#[derive(Debug, PartialEq, Eq, Deserialize, Serialize)]
pub enum Scope {
PolarisAuth,
}
#[derive(Debug, PartialEq, Eq, Deserialize, Serialize)]
pub struct Authorization {
pub username: String,
pub scope: Scope,
}
pub fn hash_password(password: &str) -> Result<String, Error> {
if password.is_empty() {
return Err(Error::EmptyPassword);
}
let salt = SaltString::generate(&mut OsRng);
match Pbkdf2.hash_password(password.as_bytes(), &salt) {
Ok(h) => Ok(h.to_string()),
Err(_) => Err(Error::PasswordHashing),
}
}
pub fn verify_password(password_hash: &str, attempted_password: &str) -> bool {
match PasswordHash::new(password_hash) {
Ok(h) => Pbkdf2
.verify_password(attempted_password.as_bytes(), &h)
.is_ok(),
Err(_) => false,
}
}
pub fn generate_auth_token(
authorization: &Authorization,
auth_secret: &Secret,
) -> Result<Token, Error> {
let serialized_authorization =
serde_json::to_string(&authorization).or(Err(Error::AuthorizationTokenEncoding))?;
branca::encode(
serialized_authorization.as_bytes(),
auth_secret.as_ref(),
SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap_or_default()
.as_secs() as u32,
)
.or(Err(Error::BrancaTokenEncoding))
.map(Token)
}
pub fn decode_auth_token(
auth_token: &Token,
scope: Scope,
auth_secret: &Secret,
) -> Result<Authorization, Error> {
let Token(data) = auth_token;
let ttl = match scope {
Scope::PolarisAuth => 0, // permanent
};
let authorization =
branca::decode(data, auth_secret.as_ref(), ttl).map_err(|_| Error::InvalidAuthToken)?;
let authorization: Authorization =
serde_json::from_slice(&authorization[..]).map_err(|_| Error::InvalidAuthToken)?;
if authorization.scope != scope {
return Err(Error::IncorrectAuthorizationScope);
}
Ok(authorization)
}

View file

@ -1,195 +1,338 @@
use serde::Deserialize;
use std::io::Read;
use std::path::{Path, PathBuf};
use std::{
path::{Path, PathBuf},
sync::Arc,
time::Duration,
};
use crate::app::{ddns, settings, user, vfs};
use log::{error, info};
use notify::{RecommendedWatcher, RecursiveMode, Watcher};
use notify_debouncer_full::{Debouncer, FileIdMap};
use regex::Regex;
use tokio::sync::{futures::Notified, Notify, RwLock};
#[derive(thiserror::Error, Debug)]
pub enum Error {
#[error(transparent)]
Ddns(#[from] ddns::Error),
#[error("Filesystem error for `{0}`: `{1}`")]
Io(PathBuf, std::io::Error),
#[error(transparent)]
Settings(#[from] settings::Error),
#[error(transparent)]
Toml(#[from] toml::de::Error),
#[error(transparent)]
User(#[from] user::Error),
#[error(transparent)]
Vfs(#[from] vfs::Error),
}
use crate::app::Error;
#[derive(Default, Deserialize)]
mod mounts;
pub mod storage;
mod user;
pub use mounts::*;
pub use user::*;
use super::auth;
#[derive(Debug, Clone, Default)]
pub struct Config {
pub settings: Option<settings::NewSettings>,
pub mount_dirs: Option<Vec<vfs::MountDir>>,
pub ydns: Option<ddns::Config>,
pub users: Option<Vec<user::NewUser>>,
pub album_art_pattern: Option<Regex>,
pub ddns_update_url: Option<http::Uri>,
pub mount_dirs: Vec<MountDir>,
pub users: Vec<User>,
}
impl Config {
pub fn from_path(path: &Path) -> Result<Config, Error> {
let mut config_file =
std::fs::File::open(path).map_err(|e| Error::Io(path.to_owned(), e))?;
let mut config_file_content = String::new();
config_file
.read_to_string(&mut config_file_content)
.map_err(|e| Error::Io(path.to_owned(), e))?;
let config = toml::de::from_str::<Self>(&config_file_content)?;
impl TryFrom<storage::Config> for Config {
type Error = Error;
fn try_from(c: storage::Config) -> Result<Self, Self::Error> {
let mut config = Config::default();
config.set_mounts(c.mount_dirs)?;
config.set_users(c.users)?;
config.album_art_pattern = match c.album_art_pattern.as_deref().map(Regex::new) {
Some(Ok(u)) => Some(u),
Some(Err(_)) => return Err(Error::IndexAlbumArtPatternInvalid),
None => None,
};
config.ddns_update_url = match c.ddns_update_url.map(http::Uri::try_from) {
Some(Ok(u)) => Some(u),
Some(Err(_)) => return Err(Error::DDNSUpdateURLInvalid),
None => None,
};
Ok(config)
}
}
impl From<Config> for storage::Config {
fn from(c: Config) -> Self {
Self {
album_art_pattern: c.album_art_pattern.map(|p| p.as_str().to_owned()),
mount_dirs: c.mount_dirs.into_iter().map(|d| d.into()).collect(),
ddns_update_url: c.ddns_update_url.map(|u| u.to_string()),
users: c.users.into_iter().map(|u| u.into()).collect(),
}
}
}
#[derive(Clone)]
pub struct Manager {
settings_manager: settings::Manager,
user_manager: user::Manager,
vfs_manager: vfs::Manager,
ddns_manager: ddns::Manager,
config_file_path: PathBuf,
config: Arc<RwLock<Config>>,
auth_secret: auth::Secret,
#[allow(dead_code)]
file_watcher: Arc<Debouncer<RecommendedWatcher, FileIdMap>>,
change_notify: Arc<Notify>,
}
impl Manager {
pub fn new(
settings_manager: settings::Manager,
user_manager: user::Manager,
vfs_manager: vfs::Manager,
ddns_manager: ddns::Manager,
) -> Self {
Self {
settings_manager,
user_manager,
vfs_manager,
ddns_manager,
pub async fn new(config_file_path: &Path, auth_secret: auth::Secret) -> Result<Self, Error> {
if let Some(parent) = config_file_path.parent() {
tokio::fs::create_dir_all(parent)
.await
.map_err(|e| Error::Io(parent.to_owned(), e))?;
}
match tokio::fs::File::create_new(config_file_path).await {
Ok(_) => (),
Err(e) if e.kind() == std::io::ErrorKind::AlreadyExists => (),
Err(e) => {
error!("Failed to create config file at {config_file_path:#?}: {e}");
return Err(Error::Io(config_file_path.to_owned(), e));
}
};
let notify = Arc::new(Notify::new());
let mut debouncer = notify_debouncer_full::new_debouncer(Duration::from_secs(1), None, {
let notify = notify.clone();
move |_| {
notify.notify_waiters();
}
})?;
debouncer
.watcher()
.watch(&config_file_path, RecursiveMode::NonRecursive)?;
let manager = Self {
config_file_path: config_file_path.to_owned(),
config: Arc::new(RwLock::new(Config::default())),
auth_secret,
file_watcher: Arc::new(debouncer),
change_notify: Arc::default(),
};
tokio::task::spawn({
let manager = manager.clone();
async move {
loop {
notify.notified().await;
if let Err(e) = manager.reload_config().await {
error!("Configuration error: {e}");
} else {
info!("Sucessfully applied configuration change");
}
}
}
});
manager.reload_config().await?;
Ok(manager)
}
pub fn apply(&self, config: &Config) -> Result<(), Error> {
if let Some(new_settings) = &config.settings {
self.settings_manager.amend(new_settings)?;
}
pub fn on_config_change(&self) -> Notified {
self.change_notify.notified()
}
if let Some(mount_dirs) = &config.mount_dirs {
self.vfs_manager.set_mount_dirs(mount_dirs)?;
}
async fn reload_config(&self) -> Result<(), Error> {
let config = Self::read_config(&self.config_file_path).await?;
self.apply_config(config).await
}
if let Some(ddns_config) = &config.ydns {
self.ddns_manager.set_config(ddns_config)?;
}
if let Some(ref users) = config.users {
let old_users: Vec<user::User> = self.user_manager.list()?;
// Delete users that are not in new list
for old_user in old_users
.iter()
.filter(|old_user| !users.iter().any(|u| u.name == old_user.name))
{
self.user_manager.delete(&old_user.name)?;
}
// Insert new users
for new_user in users
.iter()
.filter(|u| !old_users.iter().any(|old_user| old_user.name == u.name))
{
self.user_manager.create(new_user)?;
}
// Update users
for user in users {
self.user_manager.set_password(&user.name, &user.password)?;
self.user_manager.set_is_admin(&user.name, user.admin)?;
}
}
async fn read_config(config_file_path: &Path) -> Result<storage::Config, Error> {
let config_content = tokio::fs::read_to_string(config_file_path)
.await
.map_err(|e| Error::Io(config_file_path.to_owned(), e))?;
toml::de::from_str::<storage::Config>(&config_content).map_err(Error::ConfigDeserialization)
}
pub async fn save_config(&self) -> Result<(), Error> {
let serialized = toml::ser::to_string_pretty::<storage::Config>(
&self.config.read().await.clone().into(),
)
.map_err(Error::ConfigSerialization)?;
tokio::fs::write(&self.config_file_path, serialized.as_bytes())
.await
.map_err(|e| Error::Io(self.config_file_path.clone(), e))?;
Ok(())
}
pub async fn apply_config(&self, new_config: storage::Config) -> Result<(), Error> {
let mut config = self.config.write().await;
*config = new_config.try_into()?;
self.change_notify.notify_waiters();
Ok(())
}
async fn mutate<F: FnOnce(&mut Config)>(&self, op: F) -> Result<(), Error> {
self.mutate_fallible(|c| {
op(c);
Ok(())
})
.await
}
async fn mutate_fallible<F: FnOnce(&mut Config) -> Result<(), Error>>(
&self,
op: F,
) -> Result<(), Error> {
{
let mut config = self.config.write().await;
op(&mut config)?;
}
self.change_notify.notify_waiters();
self.save_config().await?;
Ok(())
}
pub async fn get_index_album_art_pattern(&self) -> Regex {
let config = self.config.read().await;
let pattern = config.album_art_pattern.clone();
pattern.unwrap_or_else(|| Regex::new("Folder.(jpeg|jpg|png)").unwrap())
}
pub async fn set_index_album_art_pattern(&self, regex: Regex) -> Result<(), Error> {
self.mutate(|c| {
c.album_art_pattern = Some(regex);
})
.await
}
pub async fn get_ddns_update_url(&self) -> Option<http::Uri> {
self.config.read().await.ddns_update_url.clone()
}
pub async fn set_ddns_update_url(&self, url: Option<http::Uri>) -> Result<(), Error> {
self.mutate(|c| {
c.ddns_update_url = url;
})
.await
}
pub async fn get_users(&self) -> Vec<User> {
self.config.read().await.users.iter().cloned().collect()
}
pub async fn get_user(&self, username: &str) -> Result<User, Error> {
let config = self.config.read().await;
config
.get_user(username)
.cloned()
.ok_or(Error::UserNotFound)
}
pub async fn create_user(
&self,
username: &str,
password: &str,
admin: bool,
) -> Result<(), Error> {
self.mutate_fallible(|c| c.create_user(username, password, admin))
.await
}
pub async fn login(&self, username: &str, password: &str) -> Result<auth::Token, Error> {
let config = self.config.read().await;
config.login(username, password, &self.auth_secret)
}
pub async fn set_is_admin(&self, username: &str, is_admin: bool) -> Result<(), Error> {
self.mutate_fallible(|c| c.set_is_admin(username, is_admin))
.await
}
pub async fn set_password(&self, username: &str, password: &str) -> Result<(), Error> {
self.mutate_fallible(|c| c.set_password(username, password))
.await
}
pub async fn authenticate(
&self,
auth_token: &auth::Token,
scope: auth::Scope,
) -> Result<auth::Authorization, Error> {
let config = self.config.read().await;
config.authenticate(auth_token, scope, &self.auth_secret)
}
pub async fn delete_user(&self, username: &str) -> Result<(), Error> {
self.mutate(|c| c.delete_user(username)).await
}
pub async fn get_mounts(&self) -> Vec<MountDir> {
let config = self.config.read().await;
config.mount_dirs.iter().cloned().collect()
}
pub async fn resolve_virtual_path<P: AsRef<Path>>(
&self,
virtual_path: P,
) -> Result<PathBuf, Error> {
let config = self.config.read().await;
config.resolve_virtual_path(virtual_path)
}
pub async fn set_mounts(&self, mount_dirs: Vec<storage::MountDir>) -> Result<(), Error> {
self.mutate_fallible(|c| c.set_mounts(mount_dirs)).await
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::app::test;
use crate::test_name;
#[test]
fn apply_saves_misc_settings() {
let ctx = test::ContextBuilder::new(test_name!()).build();
let new_config = Config {
settings: Some(settings::NewSettings {
album_art_pattern: Some("🖼️\\.jpg".into()),
reindex_every_n_seconds: Some(100),
}),
..Default::default()
};
use super::*;
#[tokio::test]
async fn blank_config_round_trip() {
let config_path = PathBuf::from_iter(["test-data", "blank.toml"]);
let manager = Manager::new(&config_path, auth::Secret([0; 32]))
.await
.unwrap();
let config: storage::Config = manager.config.read().await.clone().into();
assert_eq!(config, storage::Config::default());
}
#[tokio::test]
async fn can_read_config() {
let config_path = PathBuf::from_iter(["test-data", "config.toml"]);
let manager = Manager::new(&config_path, auth::Secret([0; 32]))
.await
.unwrap();
let config: storage::Config = manager.config.read().await.clone().into();
ctx.config_manager.apply(&new_config).unwrap();
let settings = ctx.settings_manager.read().unwrap();
let new_settings = new_config.settings.unwrap();
assert_eq!(
settings.index_album_art_pattern,
new_settings.album_art_pattern.unwrap()
config.album_art_pattern,
Some(r#"^Folder\.(png|jpg|jpeg)$"#.to_owned())
);
assert_eq!(
settings.index_sleep_duration_seconds,
new_settings.reindex_every_n_seconds.unwrap()
config.mount_dirs,
vec![storage::MountDir {
source: PathBuf::from("test-data/small-collection"),
name: "root".to_owned(),
}]
);
assert_eq!(config.users[0].name, "test_user");
assert_eq!(config.users[0].admin, Some(true));
assert_eq!(
config.users[0].initial_password,
Some("very_secret_password".to_owned())
);
assert!(config.users[0].hashed_password.is_some());
}
#[test]
fn apply_saves_mount_points() {
let ctx = test::ContextBuilder::new(test_name!()).build();
#[tokio::test]
async fn can_write_config() {
let ctx = test::ContextBuilder::new(test_name!()).build().await;
ctx.config_manager
.create_user("Walter", "example_password", false)
.await
.unwrap();
let new_config = Config {
mount_dirs: Some(vec![vfs::MountDir {
source: "/home/music".into(),
name: "🎵📁".into(),
}]),
..Default::default()
};
ctx.config_manager.apply(&new_config).unwrap();
let actual_mount_dirs: Vec<vfs::MountDir> = ctx.vfs_manager.mount_dirs().unwrap();
assert_eq!(actual_mount_dirs, new_config.mount_dirs.unwrap());
}
#[test]
fn apply_saves_ddns_settings() {
let ctx = test::ContextBuilder::new(test_name!()).build();
let new_config = Config {
ydns: Some(ddns::Config {
host: "🐸🐸🐸.ydns.eu".into(),
username: "kfr🐸g".into(),
password: "tasty🐞".into(),
}),
..Default::default()
};
ctx.config_manager.apply(&new_config).unwrap();
let actual_ddns = ctx.ddns_manager.config().unwrap();
assert_eq!(actual_ddns, new_config.ydns.unwrap());
}
#[test]
fn apply_can_toggle_admin() {
let ctx = test::ContextBuilder::new(test_name!())
.user("Walter", "Tasty🍖", true)
.build();
assert!(ctx.user_manager.list().unwrap()[0].is_admin());
let new_config = Config {
users: Some(vec![user::NewUser {
name: "Walter".into(),
password: "Tasty🍖".into(),
admin: false,
}]),
..Default::default()
};
ctx.config_manager.apply(&new_config).unwrap();
assert!(!ctx.user_manager.list().unwrap()[0].is_admin());
let manager = Manager::new(&ctx.config_manager.config_file_path, auth::Secret([0; 32]))
.await
.unwrap();
assert!(manager.get_user("Walter").await.is_ok());
}
}

149
src/app/config/mounts.rs Normal file
View file

@ -0,0 +1,149 @@
use std::{
ops::Deref,
path::{Path, PathBuf},
};
use regex::Regex;
use crate::app::Error;
use super::storage;
use super::Config;
#[derive(Clone, Debug, Default, Eq, PartialEq)]
pub struct MountDir {
pub source: PathBuf,
pub name: String,
}
impl TryFrom<storage::MountDir> for MountDir {
type Error = Error;
fn try_from(mount_dir: storage::MountDir) -> Result<Self, Self::Error> {
// TODO validation
Ok(Self {
source: sanitize_path(&mount_dir.source),
name: mount_dir.name,
})
}
}
impl From<MountDir> for storage::MountDir {
fn from(m: MountDir) -> Self {
Self {
source: m.source,
name: m.name,
}
}
}
impl Config {
pub fn set_mounts(&mut self, mount_dirs: Vec<storage::MountDir>) -> Result<(), Error> {
let mut new_mount_dirs = Vec::new();
for mount_dir in mount_dirs {
let mount_dir = <storage::MountDir as TryInto<MountDir>>::try_into(mount_dir)?;
new_mount_dirs.push(mount_dir);
}
new_mount_dirs.dedup_by(|a, b| a.name == b.name);
self.mount_dirs = new_mount_dirs;
Ok(())
}
pub fn resolve_virtual_path<P: AsRef<Path>>(&self, virtual_path: P) -> Result<PathBuf, Error> {
for mount in &self.mount_dirs {
if let Ok(p) = virtual_path.as_ref().strip_prefix(&mount.name) {
return if p.components().count() == 0 {
Ok(mount.source.clone())
} else {
Ok(mount.source.join(p))
};
}
}
Err(Error::CouldNotMapToRealPath(virtual_path.as_ref().into()))
}
}
fn sanitize_path(source: &PathBuf) -> PathBuf {
let path_string = source.to_string_lossy();
let separator_regex = Regex::new(r"\\|/").unwrap();
let mut correct_separator = String::new();
correct_separator.push(std::path::MAIN_SEPARATOR);
let path_string = separator_regex.replace_all(&path_string, correct_separator.as_str());
PathBuf::from(path_string.deref())
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn can_resolve_virtual_paths() {
let raw_config = storage::Config {
mount_dirs: vec![storage::MountDir {
name: "root".to_owned(),
source: PathBuf::from("test_dir"),
}],
..Default::default()
};
let config: Config = raw_config.try_into().unwrap();
let test_cases = vec![
(vec!["root"], vec!["test_dir"]),
(
vec!["root", "somewhere", "something.png"],
vec!["test_dir", "somewhere", "something.png"],
),
];
for (r#virtual, real) in test_cases {
let real_path: PathBuf = real.iter().collect();
let virtual_path: PathBuf = r#virtual.iter().collect();
let converted_path = config.resolve_virtual_path(&virtual_path).unwrap();
assert_eq!(converted_path, real_path);
}
}
#[test]
fn sanitizes_paths() {
let mut correct_path = PathBuf::new();
if cfg!(target_os = "windows") {
correct_path.push("C:\\");
} else {
correct_path.push("/usr");
}
correct_path.push("some");
correct_path.push("path");
let tests = if cfg!(target_os = "windows") {
vec![
r#"C:/some/path"#,
r#"C:\some\path"#,
r#"C:\some\path\"#,
r#"C:\some\path\\\\"#,
r#"C:\some/path//"#,
]
} else {
vec![
r#"/usr/some/path"#,
r#"/usr\some\path"#,
r#"/usr\some\path\"#,
r#"/usr\some\path\\\\"#,
r#"/usr\some/path//"#,
]
};
for test in tests {
let raw_config = storage::Config {
mount_dirs: vec![storage::MountDir {
name: "root".to_owned(),
source: PathBuf::from(test),
}],
..Default::default()
};
let config: Config = raw_config.try_into().unwrap();
let converted_path = config.resolve_virtual_path(&PathBuf::from("root")).unwrap();
assert_eq!(converted_path, correct_path);
}
}
}

32
src/app/config/storage.rs Normal file
View file

@ -0,0 +1,32 @@
use std::path::PathBuf;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, Default, Eq, PartialEq, Serialize, Deserialize)]
pub struct User {
pub name: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub admin: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub initial_password: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub hashed_password: Option<String>,
}
#[derive(Clone, Debug, Default, Eq, PartialEq, Serialize, Deserialize)]
pub struct MountDir {
pub source: PathBuf,
pub name: String,
}
#[derive(Clone, Debug, Default, Eq, PartialEq, Serialize, Deserialize)]
pub struct Config {
#[serde(skip_serializing_if = "Option::is_none")]
pub album_art_pattern: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub mount_dirs: Vec<MountDir>,
#[serde(skip_serializing_if = "Option::is_none")]
pub ddns_update_url: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub users: Vec<User>,
}

308
src/app/config/user.rs Normal file
View file

@ -0,0 +1,308 @@
use crate::app::{auth, Error};
use super::storage;
use super::Config;
#[derive(Clone, Debug, Default, Eq, PartialEq)]
pub struct User {
pub name: String,
pub admin: Option<bool>,
pub initial_password: Option<String>,
pub hashed_password: String,
}
impl User {
pub fn is_admin(&self) -> bool {
self.admin == Some(true)
}
}
impl TryFrom<storage::User> for User {
type Error = Error;
fn try_from(user: storage::User) -> Result<Self, Self::Error> {
let hashed_password = match (&user.initial_password, &user.hashed_password) {
(_, Some(p)) => p.clone(),
(Some(p), None) => auth::hash_password(p)?,
(None, None) => return Err(Error::EmptyPassword),
};
Ok(Self {
name: user.name,
admin: user.admin,
initial_password: user.initial_password,
hashed_password,
})
}
}
impl From<User> for storage::User {
fn from(user: User) -> Self {
Self {
name: user.name,
admin: user.admin,
initial_password: user.initial_password,
hashed_password: Some(user.hashed_password),
}
}
}
impl Config {
pub fn set_users(&mut self, users: Vec<storage::User>) -> Result<(), Error> {
let mut new_users = Vec::new();
for user in users {
let user = <storage::User as TryInto<User>>::try_into(user)?;
new_users.push(user);
}
new_users.dedup_by(|a, b| a.name == b.name);
self.users = new_users;
Ok(())
}
pub fn create_user(
&mut self,
username: &str,
password: &str,
admin: bool,
) -> Result<(), Error> {
if username.is_empty() {
return Err(Error::EmptyUsername);
}
if self.exists(username) {
return Err(Error::DuplicateUsername);
}
let password_hash = auth::hash_password(&password)?;
self.users.push(User {
name: username.to_owned(),
admin: Some(admin),
initial_password: None,
hashed_password: password_hash,
});
Ok(())
}
pub fn exists(&self, username: &str) -> bool {
self.users.iter().any(|u| u.name == username)
}
pub fn get_user(&self, username: &str) -> Option<&User> {
self.users.iter().find(|u| u.name == username)
}
pub fn get_user_mut(&mut self, username: &str) -> Option<&mut User> {
self.users.iter_mut().find(|u| u.name == username)
}
pub fn authenticate(
&self,
auth_token: &auth::Token,
scope: auth::Scope,
auth_secret: &auth::Secret,
) -> Result<auth::Authorization, Error> {
let authorization = auth::decode_auth_token(auth_token, scope, auth_secret)?;
if self.exists(&authorization.username) {
Ok(authorization)
} else {
Err(Error::IncorrectUsername)
}
}
pub fn login(
&self,
username: &str,
password: &str,
auth_secret: &auth::Secret,
) -> Result<auth::Token, Error> {
let user = self.get_user(username).ok_or(Error::IncorrectUsername)?;
if auth::verify_password(&user.hashed_password, password) {
let authorization = auth::Authorization {
username: username.to_owned(),
scope: auth::Scope::PolarisAuth,
};
auth::generate_auth_token(&authorization, auth_secret)
} else {
Err(Error::IncorrectPassword)
}
}
pub fn set_is_admin(&mut self, username: &str, is_admin: bool) -> Result<(), Error> {
let user = self.get_user_mut(username).ok_or(Error::UserNotFound)?;
user.admin = Some(is_admin);
Ok(())
}
pub fn set_password(&mut self, username: &str, password: &str) -> Result<(), Error> {
let user = self.get_user_mut(username).ok_or(Error::UserNotFound)?;
user.hashed_password = auth::hash_password(password)?;
Ok(())
}
pub fn delete_user(&mut self, username: &str) {
self.users.retain(|u| u.name != username);
}
}
#[cfg(test)]
mod test {
use crate::app::test;
use crate::test_name;
use super::*;
const TEST_USERNAME: &str = "Walter";
const TEST_PASSWORD: &str = "super_secret!";
#[test]
fn adds_password_hashes() {
let user_in = storage::User {
name: TEST_USERNAME.to_owned(),
initial_password: Some(TEST_PASSWORD.to_owned()),
..Default::default()
};
let user: User = user_in.try_into().unwrap();
let user_out: storage::User = user.into();
assert_eq!(user_out.name, TEST_USERNAME);
assert_eq!(user_out.initial_password, Some(TEST_PASSWORD.to_owned()));
assert!(user_out.hashed_password.is_some());
}
#[test]
fn preserves_password_hashes() {
let user_in = storage::User {
name: TEST_USERNAME.to_owned(),
hashed_password: Some("hash".to_owned()),
..Default::default()
};
let user: User = user_in.clone().try_into().unwrap();
let user_out: storage::User = user.into();
assert_eq!(user_out, user_in);
}
#[tokio::test]
async fn create_delete_user_golden_path() {
let ctx = test::ContextBuilder::new(test_name!()).build().await;
ctx.config_manager
.create_user(TEST_USERNAME, TEST_PASSWORD, false)
.await
.unwrap();
assert!(ctx.config_manager.get_user(TEST_USERNAME).await.is_ok());
ctx.config_manager.delete_user(TEST_USERNAME).await.unwrap();
assert!(ctx.config_manager.get_user(TEST_USERNAME).await.is_err());
}
#[tokio::test]
async fn cannot_create_user_with_blank_username() {
let ctx = test::ContextBuilder::new(test_name!()).build().await;
let result = ctx.config_manager.create_user("", TEST_PASSWORD, false);
assert!(matches!(result.await.unwrap_err(), Error::EmptyUsername));
}
#[tokio::test]
async fn cannot_create_user_with_blank_password() {
let ctx = test::ContextBuilder::new(test_name!()).build().await;
let result = ctx.config_manager.create_user(TEST_USERNAME, "", false);
assert!(matches!(result.await.unwrap_err(), Error::EmptyPassword));
}
#[tokio::test]
async fn cannot_create_duplicate_user() {
let ctx = test::ContextBuilder::new(test_name!()).build().await;
let result = ctx
.config_manager
.create_user(TEST_USERNAME, TEST_PASSWORD, false);
assert!(result.await.is_ok());
let result = ctx
.config_manager
.create_user(TEST_USERNAME, TEST_PASSWORD, false);
assert!(matches!(
result.await.unwrap_err(),
Error::DuplicateUsername
));
}
#[tokio::test]
async fn login_rejects_bad_password() {
let ctx = test::ContextBuilder::new(test_name!()).build().await;
ctx.config_manager
.create_user(TEST_USERNAME, TEST_PASSWORD, false)
.await
.unwrap();
let result = ctx.config_manager.login(TEST_USERNAME, "not the password");
assert!(matches!(
result.await.unwrap_err(),
Error::IncorrectPassword
));
}
#[tokio::test]
async fn login_golden_path() {
let ctx = test::ContextBuilder::new(test_name!()).build().await;
ctx.config_manager
.create_user(TEST_USERNAME, TEST_PASSWORD, false)
.await
.unwrap();
let result = ctx.config_manager.login(TEST_USERNAME, TEST_PASSWORD);
assert!(result.await.is_ok());
}
#[tokio::test]
async fn authenticate_rejects_bad_token() {
let ctx = test::ContextBuilder::new(test_name!()).build().await;
ctx.config_manager
.create_user(TEST_USERNAME, TEST_PASSWORD, false)
.await
.unwrap();
let fake_token = auth::Token("fake token".to_owned());
assert!(ctx
.config_manager
.authenticate(&fake_token, auth::Scope::PolarisAuth)
.await
.is_err())
}
#[tokio::test]
async fn authenticate_golden_path() {
let ctx = test::ContextBuilder::new(test_name!()).build().await;
ctx.config_manager
.create_user(TEST_USERNAME, TEST_PASSWORD, false)
.await
.unwrap();
let token = ctx
.config_manager
.login(TEST_USERNAME, TEST_PASSWORD)
.await
.unwrap();
let authorization = ctx
.config_manager
.authenticate(&token, auth::Scope::PolarisAuth)
.await
.unwrap();
assert_eq!(
authorization,
auth::Authorization {
username: TEST_USERNAME.to_owned(),
scope: auth::Scope::PolarisAuth,
}
)
}
}

View file

@ -1,59 +1,26 @@
use base64::prelude::*;
use diesel::prelude::*;
use log::{debug, error};
use serde::{Deserialize, Serialize};
use std::thread;
use std::time;
use std::time::Duration;
use crate::db::{self, ddns_config, DB};
const DDNS_UPDATE_URL: &str = "https://ydns.io/api/v1/update/";
#[derive(thiserror::Error, Debug)]
pub enum Error {
#[error("DDNS update query failed with HTTP status code `{0}`")]
UpdateQueryFailed(u16),
#[error("DDNS update query failed due to a transport error")]
UpdateQueryTransport,
#[error(transparent)]
DatabaseConnection(#[from] db::Error),
#[error(transparent)]
Database(#[from] diesel::result::Error),
}
#[derive(Clone, Debug, Deserialize, Insertable, PartialEq, Eq, Queryable, Serialize)]
#[diesel(table_name = ddns_config)]
pub struct Config {
pub host: String,
pub username: String,
pub password: String,
}
use crate::app::{config, Error};
#[derive(Clone)]
pub struct Manager {
db: DB,
config_manager: config::Manager,
}
impl Manager {
pub fn new(db: DB) -> Self {
Self { db }
pub fn new(config_manager: config::Manager) -> Self {
Self { config_manager }
}
fn update_my_ip(&self) -> Result<(), Error> {
let config = self.config()?;
if config.host.is_empty() || config.username.is_empty() {
pub async fn update_ddns(&self) -> Result<(), Error> {
let url = self.config_manager.get_ddns_update_url().await;
let Some(url) = url else {
debug!("Skipping DDNS update because credentials are missing");
return Ok(());
}
};
let full_url = format!("{}?host={}", DDNS_UPDATE_URL, &config.host);
let credentials = format!("{}:{}", &config.username, &config.password);
let response = ureq::get(full_url.as_str())
.set(
"Authorization",
&format!("Basic {}", BASE64_STANDARD_NO_PAD.encode(credentials)),
)
.call();
let response = ureq::get(&url.to_string()).call();
match response {
Ok(_) => Ok(()),
@ -62,40 +29,17 @@ impl Manager {
}
}
pub fn config(&self) -> Result<Config, Error> {
use crate::db::ddns_config::dsl::*;
let mut connection = self.db.connect()?;
Ok(ddns_config
.select((host, username, password))
.get_result(&mut connection)?)
}
pub fn set_config(&self, new_config: &Config) -> Result<(), Error> {
use crate::db::ddns_config::dsl::*;
let mut connection = self.db.connect()?;
diesel::update(ddns_config)
.set((
host.eq(&new_config.host),
username.eq(&new_config.username),
password.eq(&new_config.password),
))
.execute(&mut connection)?;
Ok(())
}
pub fn begin_periodic_updates(&self) {
let cloned = self.clone();
std::thread::spawn(move || {
cloned.run();
tokio::spawn({
let ddns = self.clone();
async move {
loop {
if let Err(e) = ddns.update_ddns().await {
error!("Dynamic DNS update error: {:?}", e);
}
tokio::time::sleep(Duration::from_secs(60 * 30)).await;
}
}
});
}
fn run(&self) {
loop {
if let Err(e) = self.update_my_ip() {
error!("Dynamic DNS update error: {:?}", e);
}
thread::sleep(time::Duration::from_secs(60 * 30));
}
}
}

444
src/app/formats.rs Normal file
View file

@ -0,0 +1,444 @@
use id3::TagLike;
use lewton::inside_ogg::OggStreamReader;
use log::error;
use std::fs;
use std::io::{Seek, SeekFrom};
use std::path::Path;
use crate::app::Error;
use crate::utils;
use crate::utils::AudioFormat;
#[derive(Debug, Default, Clone, PartialEq, Eq)]
pub struct SongMetadata {
pub disc_number: Option<u32>,
pub track_number: Option<u32>,
pub title: Option<String>,
pub duration: Option<u32>,
pub artists: Vec<String>,
pub album_artists: Vec<String>,
pub album: Option<String>,
pub year: Option<i32>,
pub has_artwork: bool,
pub lyricists: Vec<String>,
pub composers: Vec<String>,
pub genres: Vec<String>,
pub labels: Vec<String>,
}
pub fn read_metadata<P: AsRef<Path>>(path: P) -> Option<SongMetadata> {
let data = match utils::get_audio_format(&path) {
Some(AudioFormat::AIFF) => read_id3(&path),
Some(AudioFormat::FLAC) => read_flac(&path),
Some(AudioFormat::MP3) => read_mp3(&path),
Some(AudioFormat::OGG) => read_vorbis(&path),
Some(AudioFormat::OPUS) => read_opus(&path),
Some(AudioFormat::WAVE) => read_id3(&path),
Some(AudioFormat::APE) | Some(AudioFormat::MPC) => read_ape(&path),
Some(AudioFormat::MP4) | Some(AudioFormat::M4B) => read_mp4(&path),
None => return None,
};
match data {
Ok(d) => Some(d),
Err(e) => {
error!(
"Error while reading file metadata for '{:?}': {}",
path.as_ref(),
e
);
None
}
}
}
trait ID3Ext {
fn get_text_values(&self, frame_name: &str) -> Vec<String>;
}
impl ID3Ext for id3::Tag {
fn get_text_values(&self, frame_name: &str) -> Vec<String> {
self.get(frame_name)
.and_then(|f| f.content().text_values())
.map(|i| i.map(str::to_string).collect())
.unwrap_or_default()
}
}
fn read_id3<P: AsRef<Path>>(path: P) -> Result<SongMetadata, Error> {
let file = fs::File::open(path.as_ref()).map_err(|e| Error::Io(path.as_ref().to_owned(), e))?;
read_id3_from_file(&file, path)
}
fn read_id3_from_file<P: AsRef<Path>>(file: &fs::File, path: P) -> Result<SongMetadata, Error> {
let tag = id3::Tag::read_from2(file)
.or_else(|error| {
if let Some(tag) = error.partial_tag {
Ok(tag)
} else {
Err(error)
}
})
.map_err(|e| Error::Id3(path.as_ref().to_owned(), e))?;
let artists = tag.get_text_values("TPE1");
let album_artists = tag.get_text_values("TPE2");
let album = tag.album().map(|s| s.to_string());
let title = tag.title().map(|s| s.to_string());
let duration = tag.duration();
let disc_number = tag.disc();
let track_number = tag.track();
let year = tag
.year()
.or_else(|| tag.date_released().map(|d| d.year))
.or_else(|| tag.original_date_released().map(|d| d.year))
.or_else(|| tag.date_recorded().map(|d| d.year));
let has_artwork = tag.pictures().count() > 0;
let lyricists = tag.get_text_values("TEXT");
let composers = tag.get_text_values("TCOM");
let genres = tag.get_text_values("TCON");
let labels = tag.get_text_values("TPUB");
Ok(SongMetadata {
disc_number,
track_number,
title,
duration,
artists,
album_artists,
album,
year,
has_artwork,
lyricists,
composers,
genres,
labels,
})
}
fn read_mp3<P: AsRef<Path>>(path: P) -> Result<SongMetadata, Error> {
let mut file = fs::File::open(&path).unwrap();
let mut metadata = read_id3_from_file(&file, &path)?;
metadata.duration = metadata.duration.or_else(|| {
file.seek(SeekFrom::Start(0)).unwrap();
mp3_duration::from_file(&file)
.map(|d| d.as_secs() as u32)
.ok()
});
Ok(metadata)
}
mod ape_ext {
use regex::Regex;
use std::sync::LazyLock;
pub fn read_string(item: &ape::Item) -> Option<String> {
item.try_into().ok().map(str::to_string)
}
pub fn read_strings(item: Option<&ape::Item>) -> Vec<String> {
let Some(item) = item else {
return vec![];
};
let strings: Vec<&str> = item.try_into().unwrap_or_default();
strings.into_iter().map(str::to_string).collect()
}
pub fn read_i32(item: &ape::Item) -> Option<i32> {
item.try_into()
.ok()
.map(|s: &str| s.parse::<i32>().ok())
.flatten()
}
static X_OF_Y_REGEX: LazyLock<Regex> = LazyLock::new(|| Regex::new(r#"^\d+"#).unwrap());
pub fn read_x_of_y(item: &ape::Item) -> Option<u32> {
item.try_into()
.ok()
.map(|s: &str| {
if let Some(m) = X_OF_Y_REGEX.find(s) {
s[m.start()..m.end()].parse().ok()
} else {
None
}
})
.flatten()
}
}
fn read_ape<P: AsRef<Path>>(path: P) -> Result<SongMetadata, Error> {
let tag = ape::read_from_path(path)?;
let artists = ape_ext::read_strings(tag.item("Artist"));
let album = tag.item("Album").and_then(ape_ext::read_string);
let album_artists = ape_ext::read_strings(tag.item("Album artist"));
let title = tag.item("Title").and_then(ape_ext::read_string);
let year = tag.item("Year").and_then(ape_ext::read_i32);
let disc_number = tag.item("Disc").and_then(ape_ext::read_x_of_y);
let track_number = tag.item("Track").and_then(ape_ext::read_x_of_y);
let lyricists = ape_ext::read_strings(tag.item("LYRICIST"));
let composers = ape_ext::read_strings(tag.item("COMPOSER"));
let genres = ape_ext::read_strings(tag.item("GENRE"));
let labels = ape_ext::read_strings(tag.item("PUBLISHER"));
Ok(SongMetadata {
artists,
album_artists,
album,
title,
duration: None,
disc_number,
track_number,
year,
has_artwork: false,
lyricists,
composers,
genres,
labels,
})
}
fn read_vorbis<P: AsRef<Path>>(path: P) -> Result<SongMetadata, Error> {
let file = fs::File::open(&path).map_err(|e| Error::Io(path.as_ref().to_owned(), e))?;
let source = OggStreamReader::new(file)?;
let mut metadata = SongMetadata::default();
for (key, value) in source.comment_hdr.comment_list {
utils::match_ignore_case! {
match key {
"TITLE" => metadata.title = Some(value),
"ALBUM" => metadata.album = Some(value),
"ARTIST" => metadata.artists.push(value),
"ALBUMARTIST" => metadata.album_artists.push(value),
"TRACKNUMBER" => metadata.track_number = value.parse::<u32>().ok(),
"DISCNUMBER" => metadata.disc_number = value.parse::<u32>().ok(),
"DATE" => metadata.year = value.parse::<i32>().ok(),
"LYRICIST" => metadata.lyricists.push(value),
"COMPOSER" => metadata.composers.push(value),
"GENRE" => metadata.genres.push(value),
"PUBLISHER" => metadata.labels.push(value),
_ => (),
}
}
}
Ok(metadata)
}
fn read_opus<P: AsRef<Path>>(path: P) -> Result<SongMetadata, Error> {
let headers = opus_headers::parse_from_path(path)?;
let mut metadata = SongMetadata::default();
for (key, value) in headers.comments.user_comments {
utils::match_ignore_case! {
match key {
"TITLE" => metadata.title = Some(value),
"ALBUM" => metadata.album = Some(value),
"ARTIST" => metadata.artists.push(value),
"ALBUMARTIST" => metadata.album_artists.push(value),
"TRACKNUMBER" => metadata.track_number = value.parse::<u32>().ok(),
"DISCNUMBER" => metadata.disc_number = value.parse::<u32>().ok(),
"DATE" => metadata.year = value.parse::<i32>().ok(),
"LYRICIST" => metadata.lyricists.push(value),
"COMPOSER" => metadata.composers.push(value),
"GENRE" => metadata.genres.push(value),
"PUBLISHER" => metadata.labels.push(value),
_ => (),
}
}
}
Ok(metadata)
}
fn read_flac<P: AsRef<Path>>(path: P) -> Result<SongMetadata, Error> {
let tag = metaflac::Tag::read_from_path(&path)
.map_err(|e| Error::Metaflac(path.as_ref().to_owned(), e))?;
let vorbis = tag
.vorbis_comments()
.ok_or(Error::VorbisCommentNotFoundInFlacFile)?;
let disc_number = vorbis
.get("DISCNUMBER")
.and_then(|d| d[0].parse::<u32>().ok());
let year = vorbis.get("DATE").and_then(|d| d[0].parse::<i32>().ok());
let mut streaminfo = tag.get_blocks(metaflac::BlockType::StreamInfo);
let duration = match streaminfo.next() {
Some(metaflac::Block::StreamInfo(s)) => Some(s.total_samples as u32 / s.sample_rate),
_ => None,
};
let has_artwork = tag.pictures().count() > 0;
let multivalue = |o: Option<&Vec<String>>| o.cloned().unwrap_or_default();
Ok(SongMetadata {
artists: multivalue(vorbis.artist()),
album_artists: multivalue(vorbis.album_artist()),
album: vorbis.album().map(|v| v[0].clone()),
title: vorbis.title().map(|v| v[0].clone()),
duration,
disc_number,
track_number: vorbis.track(),
year,
has_artwork,
lyricists: multivalue(vorbis.get("LYRICIST")),
composers: multivalue(vorbis.get("COMPOSER")),
genres: multivalue(vorbis.get("GENRE")),
labels: multivalue(vorbis.get("PUBLISHER")),
})
}
fn read_mp4<P: AsRef<Path>>(path: P) -> Result<SongMetadata, Error> {
let mut tag = mp4ameta::Tag::read_from_path(&path)
.map_err(|e| Error::Mp4aMeta(path.as_ref().to_owned(), e))?;
let label_ident = mp4ameta::FreeformIdent::new("com.apple.iTunes", "Label");
Ok(SongMetadata {
artists: tag.take_artists().collect(),
album_artists: tag.take_album_artists().collect(),
album: tag.take_album(),
title: tag.take_title(),
duration: tag.duration().map(|v| v.as_secs() as u32),
disc_number: tag.disc_number().map(|d| d as u32),
track_number: tag.track_number().map(|d| d as u32),
year: tag.year().and_then(|v| v.parse::<i32>().ok()),
has_artwork: tag.artwork().is_some(),
lyricists: tag.take_lyricists().collect(),
composers: tag.take_composers().collect(),
genres: tag.take_genres().collect(),
labels: tag.take_strings_of(&label_ident).collect(),
})
}
#[test]
fn reads_file_metadata() {
let expected_without_duration = SongMetadata {
disc_number: Some(3),
track_number: Some(1),
title: Some("TEST TITLE".into()),
artists: vec!["TEST ARTIST".into()],
album_artists: vec!["TEST ALBUM ARTIST".into()],
album: Some("TEST ALBUM".into()),
duration: None,
year: Some(2016),
has_artwork: false,
lyricists: vec!["TEST LYRICIST".into()],
composers: vec!["TEST COMPOSER".into()],
genres: vec!["TEST GENRE".into()],
labels: vec!["TEST LABEL".into()],
};
let expected_with_duration = SongMetadata {
duration: Some(0),
..expected_without_duration.clone()
};
assert_eq!(
read_metadata(Path::new("test-data/formats/sample.aif")).unwrap(),
expected_without_duration
);
assert_eq!(
read_metadata(Path::new("test-data/formats/sample.mp3")).unwrap(),
expected_with_duration
);
assert_eq!(
read_metadata(Path::new("test-data/formats/sample.ogg")).unwrap(),
expected_without_duration
);
assert_eq!(
read_metadata(Path::new("test-data/formats/sample.flac")).unwrap(),
expected_with_duration
);
assert_eq!(
read_metadata(Path::new("test-data/formats/sample.m4a")).unwrap(),
expected_with_duration
);
assert_eq!(
read_metadata(Path::new("test-data/formats/sample.opus")).unwrap(),
expected_without_duration
);
assert_eq!(
read_metadata(Path::new("test-data/formats/sample.ape")).unwrap(),
expected_without_duration
);
assert_eq!(
read_metadata(Path::new("test-data/formats/sample.wav")).unwrap(),
expected_without_duration
);
}
#[test]
fn reads_embedded_artwork() {
assert!(
read_metadata(Path::new("test-data/artwork/sample.aif"))
.unwrap()
.has_artwork
);
assert!(
read_metadata(Path::new("test-data/artwork/sample.mp3"))
.unwrap()
.has_artwork
);
assert!(
read_metadata(Path::new("test-data/artwork/sample.flac"))
.unwrap()
.has_artwork
);
assert!(
read_metadata(Path::new("test-data/artwork/sample.m4a"))
.unwrap()
.has_artwork
);
assert!(
read_metadata(Path::new("test-data/artwork/sample.wav"))
.unwrap()
.has_artwork
);
}
#[test]
fn reads_multivalue_fields() {
let expected_without_duration = SongMetadata {
disc_number: Some(3),
track_number: Some(1),
title: Some("TEST TITLE".into()),
artists: vec!["TEST ARTIST".into(), "OTHER ARTIST".into()],
album_artists: vec!["TEST ALBUM ARTIST".into(), "OTHER ALBUM ARTIST".into()],
album: Some("TEST ALBUM".into()),
duration: None,
year: Some(2016),
has_artwork: false,
lyricists: vec!["TEST LYRICIST".into(), "OTHER LYRICIST".into()],
composers: vec!["TEST COMPOSER".into(), "OTHER COMPOSER".into()],
genres: vec!["TEST GENRE".into(), "OTHER GENRE".into()],
labels: vec!["TEST LABEL".into(), "OTHER LABEL".into()],
};
let expected_with_duration = SongMetadata {
duration: Some(0),
..expected_without_duration.clone()
};
assert_eq!(
read_metadata(Path::new("test-data/multivalue/multivalue.aif")).unwrap(),
expected_without_duration
);
assert_eq!(
read_metadata(Path::new("test-data/multivalue/multivalue.mp3")).unwrap(),
expected_with_duration
);
assert_eq!(
read_metadata(Path::new("test-data/multivalue/multivalue.ogg")).unwrap(),
expected_without_duration
);
assert_eq!(
read_metadata(Path::new("test-data/multivalue/multivalue.flac")).unwrap(),
expected_with_duration
);
// TODO Test m4a support (likely working). Pending https://tickets.metabrainz.org/browse/PICARD-3029
assert_eq!(
read_metadata(Path::new("test-data/multivalue/multivalue.opus")).unwrap(),
expected_without_duration
);
assert_eq!(
read_metadata(Path::new("test-data/multivalue/multivalue.ape")).unwrap(),
expected_without_duration
);
assert_eq!(
read_metadata(Path::new("test-data/multivalue/multivalue.wav")).unwrap(),
expected_without_duration
);
}

View file

@ -1,91 +1,388 @@
use log::error;
use std::sync::{Arc, Condvar, Mutex};
use std::time::Duration;
use std::{
path::{Path, PathBuf},
sync::{Arc, RwLock},
};
use crate::app::{settings, vfs};
use crate::db::DB;
use log::{error, info};
use serde::{Deserialize, Serialize};
use tokio::task::spawn_blocking;
mod metadata;
use crate::app::{scanner, Error};
mod browser;
mod collection;
mod dictionary;
mod query;
#[cfg(test)]
mod test;
mod types;
mod update;
mod search;
mod storage;
pub use self::query::*;
pub use self::types::*;
pub use browser::File;
pub use collection::{Album, AlbumHeader, Artist, ArtistHeader, Genre, GenreHeader, Song};
use storage::{store_song, AlbumKey, ArtistKey, GenreKey, InternPath, SongKey};
#[derive(Clone)]
pub struct Index {
db: DB,
vfs_manager: vfs::Manager,
settings_manager: settings::Manager,
pending_reindex: Arc<(Mutex<bool>, Condvar)>,
pub struct Manager {
index_file_path: PathBuf,
index: Arc<RwLock<Index>>, // Not a tokio RwLock as we want to do CPU-bound work with Index and lock this inside spawn_blocking()
}
impl Index {
pub fn new(db: DB, vfs_manager: vfs::Manager, settings_manager: settings::Manager) -> Self {
let index = Self {
db,
vfs_manager,
settings_manager,
impl Manager {
pub async fn new(directory: &Path) -> Result<Self, Error> {
tokio::fs::create_dir_all(directory)
.await
.map_err(|e| Error::Io(directory.to_owned(), e))?;
pending_reindex: Arc::new((
#[allow(clippy::mutex_atomic)]
Mutex::new(false),
Condvar::new(),
)),
let index_manager = Self {
index_file_path: directory.join("collection.index"),
index: Arc::default(),
};
let commands_index = index.clone();
std::thread::spawn(move || {
commands_index.process_commands();
});
match index_manager.try_restore_index().await {
Ok(true) => info!("Restored collection index from disk"),
Ok(false) => info!("No existing collection index to restore"),
Err(e) => error!("Failed to restore collection index: {}", e),
};
index
Ok(index_manager)
}
pub fn trigger_reindex(&self) {
let (lock, cvar) = &*self.pending_reindex;
let mut pending_reindex = lock.lock().unwrap();
*pending_reindex = true;
cvar.notify_one();
}
pub fn begin_periodic_updates(&self) {
let auto_index = self.clone();
std::thread::spawn(move || {
auto_index.automatic_reindex();
});
}
fn process_commands(&self) {
loop {
{
let (lock, cvar) = &*self.pending_reindex;
let mut pending = lock.lock().unwrap();
while !*pending {
pending = cvar.wait(pending).unwrap();
}
*pending = false;
pub async fn is_index_empty(&self) -> bool {
spawn_blocking({
let index_manager = self.clone();
move || {
let index = index_manager.index.read().unwrap();
index.collection.num_songs() == 0
}
if let Err(e) = self.update() {
error!("Error while updating index: {}", e);
}
}
})
.await
.unwrap()
}
fn automatic_reindex(&self) {
loop {
self.trigger_reindex();
let sleep_duration = self
.settings_manager
.get_index_sleep_duration()
.unwrap_or_else(|e| {
error!("Could not retrieve index sleep duration: {}", e);
Duration::from_secs(1800)
});
std::thread::sleep(sleep_duration);
pub async fn replace_index(&self, new_index: Index) {
spawn_blocking({
let index_manager = self.clone();
move || {
let mut lock = index_manager.index.write().unwrap();
*lock = new_index;
}
})
.await
.unwrap()
}
pub async fn persist_index(&self, index: &Index) -> Result<(), Error> {
let serialized = match bitcode::serialize(index) {
Ok(s) => s,
Err(_) => return Err(Error::IndexSerializationError),
};
tokio::fs::write(&self.index_file_path, &serialized[..])
.await
.map_err(|e| Error::Io(self.index_file_path.clone(), e))?;
Ok(())
}
async fn try_restore_index(&self) -> Result<bool, Error> {
match tokio::fs::try_exists(&self.index_file_path).await {
Ok(true) => (),
Ok(false) => return Ok(false),
Err(e) => return Err(Error::Io(self.index_file_path.clone(), e)),
};
let serialized = tokio::fs::read(&self.index_file_path)
.await
.map_err(|e| Error::Io(self.index_file_path.clone(), e))?;
let index = match bitcode::deserialize(&serialized[..]) {
Ok(i) => i,
Err(_) => return Err(Error::IndexDeserializationError),
};
self.replace_index(index).await;
Ok(true)
}
pub async fn browse(&self, virtual_path: PathBuf) -> Result<Vec<browser::File>, Error> {
spawn_blocking({
let index_manager = self.clone();
move || {
let index = index_manager.index.read().unwrap();
index.browser.browse(&index.dictionary, virtual_path)
}
})
.await
.unwrap()
}
pub async fn flatten(&self, virtual_path: PathBuf) -> Result<Vec<PathBuf>, Error> {
spawn_blocking({
let index_manager = self.clone();
move || {
let index = index_manager.index.read().unwrap();
index.browser.flatten(&index.dictionary, virtual_path)
}
})
.await
.unwrap()
}
pub async fn get_genres(&self) -> Vec<GenreHeader> {
spawn_blocking({
let index_manager = self.clone();
move || {
let index = index_manager.index.read().unwrap();
index.collection.get_genres(&index.dictionary)
}
})
.await
.unwrap()
}
pub async fn get_genre(&self, name: String) -> Result<Genre, Error> {
spawn_blocking({
let index_manager = self.clone();
move || {
let index = index_manager.index.read().unwrap();
let name = index
.dictionary
.get(&name)
.ok_or_else(|| Error::GenreNotFound)?;
let genre_key = GenreKey(name);
index
.collection
.get_genre(&index.dictionary, genre_key)
.ok_or_else(|| Error::GenreNotFound)
}
})
.await
.unwrap()
}
pub async fn get_albums(&self) -> Vec<AlbumHeader> {
spawn_blocking({
let index_manager = self.clone();
move || {
let index = index_manager.index.read().unwrap();
index.collection.get_albums(&index.dictionary)
}
})
.await
.unwrap()
}
pub async fn get_artists(&self) -> Vec<ArtistHeader> {
spawn_blocking({
let index_manager = self.clone();
move || {
let index = index_manager.index.read().unwrap();
index.collection.get_artists(&index.dictionary)
}
})
.await
.unwrap()
}
pub async fn get_artist(&self, name: String) -> Result<Artist, Error> {
spawn_blocking({
let index_manager = self.clone();
move || {
let index = index_manager.index.read().unwrap();
let name = index
.dictionary
.get(name)
.ok_or_else(|| Error::ArtistNotFound)?;
let artist_key = ArtistKey(name);
index
.collection
.get_artist(&index.dictionary, artist_key)
.ok_or_else(|| Error::ArtistNotFound)
}
})
.await
.unwrap()
}
pub async fn get_album(&self, artists: Vec<String>, name: String) -> Result<Album, Error> {
spawn_blocking({
let index_manager = self.clone();
move || {
let index = index_manager.index.read().unwrap();
let name = index
.dictionary
.get(&name)
.ok_or_else(|| Error::AlbumNotFound)?;
let album_key = AlbumKey {
artists: artists
.into_iter()
.filter_map(|a| index.dictionary.get(a))
.map(|k| ArtistKey(k))
.collect(),
name,
};
index
.collection
.get_album(&index.dictionary, album_key)
.ok_or_else(|| Error::AlbumNotFound)
}
})
.await
.unwrap()
}
pub async fn get_random_albums(
&self,
seed: Option<u64>,
offset: usize,
count: usize,
) -> Result<Vec<Album>, Error> {
spawn_blocking({
let index_manager = self.clone();
move || {
let index = index_manager.index.read().unwrap();
Ok(index
.collection
.get_random_albums(&index.dictionary, seed, offset, count))
}
})
.await
.unwrap()
}
pub async fn get_recent_albums(
&self,
offset: usize,
count: usize,
) -> Result<Vec<Album>, Error> {
spawn_blocking({
let index_manager = self.clone();
move || {
let index = index_manager.index.read().unwrap();
Ok(index
.collection
.get_recent_albums(&index.dictionary, offset, count))
}
})
.await
.unwrap()
}
pub async fn get_songs(&self, virtual_paths: Vec<PathBuf>) -> Vec<Result<Song, Error>> {
spawn_blocking({
let index_manager = self.clone();
move || {
let index = index_manager.index.read().unwrap();
virtual_paths
.into_iter()
.map(|p| {
p.get(&index.dictionary)
.and_then(|virtual_path| {
let key = SongKey { virtual_path };
index.collection.get_song(&index.dictionary, key)
})
.ok_or_else(|| Error::SongNotFound)
})
.collect()
}
})
.await
.unwrap()
}
pub async fn search(&self, query: String) -> Result<Vec<Song>, Error> {
spawn_blocking({
let index_manager = self.clone();
move || {
let index = index_manager.index.read().unwrap();
index
.search
.find_songs(&index.collection, &index.dictionary, &query)
}
})
.await
.unwrap()
}
}
#[derive(Serialize, Deserialize)]
pub struct Index {
pub dictionary: dictionary::Dictionary,
pub browser: browser::Browser,
pub collection: collection::Collection,
pub search: search::Search,
}
impl Default for Index {
fn default() -> Self {
Self {
dictionary: Default::default(),
browser: Default::default(),
collection: Default::default(),
search: Default::default(),
}
}
}
#[derive(Clone)]
pub struct Builder {
dictionary_builder: dictionary::Builder,
browser_builder: browser::Builder,
collection_builder: collection::Builder,
search_builder: search::Builder,
}
impl Builder {
pub fn new() -> Self {
Self {
dictionary_builder: dictionary::Builder::default(),
browser_builder: browser::Builder::default(),
collection_builder: collection::Builder::default(),
search_builder: search::Builder::default(),
}
}
pub fn add_directory(&mut self, directory: scanner::Directory) {
self.browser_builder
.add_directory(&mut self.dictionary_builder, directory);
}
pub fn add_song(&mut self, scanner_song: scanner::Song) {
if let Some(storage_song) = store_song(&mut self.dictionary_builder, &scanner_song) {
self.browser_builder
.add_song(&mut self.dictionary_builder, &scanner_song);
self.collection_builder.add_song(&storage_song);
self.search_builder.add_song(&scanner_song, &storage_song);
}
}
pub fn build(self) -> Index {
Index {
dictionary: self.dictionary_builder.build(),
browser: self.browser_builder.build(),
collection: self.collection_builder.build(),
search: self.search_builder.build(),
}
}
}
impl Default for Builder {
fn default() -> Self {
Self::new()
}
}
#[cfg(test)]
mod test {
use crate::{
app::{index, test},
test_name,
};
#[tokio::test]
async fn can_persist_index() {
let ctx = test::ContextBuilder::new(test_name!()).build().await;
assert_eq!(ctx.index_manager.try_restore_index().await.unwrap(), false);
let index = index::Builder::new().build();
ctx.index_manager.persist_index(&index).await.unwrap();
assert_eq!(ctx.index_manager.try_restore_index().await.unwrap(), true);
}
}

389
src/app/index/browser.rs Normal file
View file

@ -0,0 +1,389 @@
use std::{
cmp::Ordering,
collections::{BTreeSet, HashMap},
ffi::OsStr,
hash::Hash,
path::{Path, PathBuf},
};
use rayon::prelude::*;
use serde::{Deserialize, Serialize};
use tinyvec::TinyVec;
use trie_rs::{Trie, TrieBuilder};
use crate::app::index::{
dictionary::{self, Dictionary},
storage::{self, PathKey},
InternPath,
};
use crate::app::{scanner, Error};
#[derive(Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
pub enum File {
Directory(PathBuf),
Song(PathBuf),
}
#[derive(Serialize, Deserialize)]
pub struct Browser {
directories: HashMap<PathKey, BTreeSet<storage::File>>,
flattened: Trie<lasso2::Spur>,
}
impl Default for Browser {
fn default() -> Self {
Self {
directories: HashMap::default(),
flattened: TrieBuilder::new().build(),
}
}
}
impl Browser {
pub fn browse<P: AsRef<Path>>(
&self,
dictionary: &Dictionary,
virtual_path: P,
) -> Result<Vec<File>, Error> {
let path = virtual_path
.as_ref()
.get(dictionary)
.ok_or_else(|| Error::DirectoryNotFound(virtual_path.as_ref().to_owned()))?;
let Some(files) = self.directories.get(&path) else {
return Err(Error::DirectoryNotFound(virtual_path.as_ref().to_owned()));
};
let mut files = files
.iter()
.map(|f| {
let path = match f {
storage::File::Directory(p) => p,
storage::File::Song(p) => p,
};
let path = Path::new(OsStr::new(dictionary.resolve(&path.0))).to_owned();
match f {
storage::File::Directory(_) => File::Directory(path),
storage::File::Song(_) => File::Song(path),
}
})
.collect::<Vec<_>>();
if virtual_path.as_ref().parent().is_none() {
if let [File::Directory(ref p)] = files[..] {
return self.browse(dictionary, p);
}
}
let collator = dictionary::make_collator();
files.sort_by(|a, b| {
let (a, b) = match (a, b) {
(File::Directory(_), File::Song(_)) => return Ordering::Less,
(File::Song(_), File::Directory(_)) => return Ordering::Greater,
(File::Directory(a), File::Directory(b)) => (a, b),
(File::Song(a), File::Song(b)) => (a, b),
};
collator.compare(
a.as_os_str().to_string_lossy().as_ref(),
b.as_os_str().to_string_lossy().as_ref(),
)
});
Ok(files)
}
pub fn flatten<P: AsRef<Path>>(
&self,
dictionary: &Dictionary,
virtual_path: P,
) -> Result<Vec<PathBuf>, Error> {
let path_components = virtual_path
.as_ref()
.components()
.map(|c| c.as_os_str().to_str().unwrap_or_default())
.filter_map(|c| dictionary.get(c))
.collect::<Vec<_>>();
if !self.flattened.is_prefix(&path_components) {
return Err(Error::DirectoryNotFound(virtual_path.as_ref().to_owned()));
}
let mut results: Vec<TinyVec<[_; 8]>> = self
.flattened
.predictive_search(path_components)
.collect::<Vec<_>>();
results.par_sort_unstable_by(|a, b| {
for (x, y) in a.iter().zip(b.iter()) {
match dictionary.cmp(x, y) {
Ordering::Equal => continue,
ordering @ _ => return ordering,
}
}
a.len().cmp(&b.len())
});
let files = results
.into_iter()
.map(|c: TinyVec<[_; 8]>| -> PathBuf {
c.into_iter()
.map(|s| dictionary.resolve(&s))
.collect::<TinyVec<[&str; 8]>>()
.join(std::path::MAIN_SEPARATOR_STR)
.into()
})
.collect::<Vec<_>>();
Ok(files)
}
}
#[derive(Clone, Default)]
pub struct Builder {
directories: HashMap<PathKey, BTreeSet<storage::File>>,
flattened: TrieBuilder<lasso2::Spur>,
}
impl Builder {
pub fn add_directory(
&mut self,
dictionary_builder: &mut dictionary::Builder,
directory: scanner::Directory,
) {
let Some(virtual_path) = (&directory.virtual_path).get_or_intern(dictionary_builder) else {
return;
};
let Some(virtual_parent) = directory
.virtual_path
.parent()
.and_then(|p| p.get_or_intern(dictionary_builder))
else {
return;
};
self.directories.entry(virtual_path).or_default();
self.directories
.entry(virtual_parent)
.or_default()
.insert(storage::File::Directory(virtual_path));
}
pub fn add_song(&mut self, dictionary_builder: &mut dictionary::Builder, song: &scanner::Song) {
let Some(virtual_path) = (&song.virtual_path).get_or_intern(dictionary_builder) else {
return;
};
let Some(virtual_parent) = song
.virtual_path
.parent()
.and_then(|p| p.get_or_intern(dictionary_builder))
else {
return;
};
self.directories
.entry(virtual_parent)
.or_default()
.insert(storage::File::Song(virtual_path));
self.flattened.push(
song.virtual_path
.components()
.map(|c| dictionary_builder.get_or_intern(c.as_os_str().to_str().unwrap()))
.collect::<TinyVec<[lasso2::Spur; 8]>>(),
);
}
pub fn build(self) -> Browser {
Browser {
directories: self.directories,
flattened: self.flattened.build(),
}
}
}
#[cfg(test)]
mod test {
use std::collections::HashSet;
use std::path::PathBuf;
use super::*;
fn setup_test(songs: HashSet<PathBuf>) -> (Browser, Dictionary) {
let mut dictionary_builder = dictionary::Builder::default();
let mut builder = Builder::default();
let directories = songs
.iter()
.flat_map(|k| k.parent().unwrap().ancestors())
.collect::<HashSet<_>>();
for directory in directories {
builder.add_directory(
&mut dictionary_builder,
scanner::Directory {
virtual_path: directory.to_owned(),
},
);
}
for path in songs {
let mut song = scanner::Song::default();
song.virtual_path = path.clone();
builder.add_song(&mut dictionary_builder, &song);
}
let browser = builder.build();
let dictionary = dictionary_builder.build();
(browser, dictionary)
}
#[test]
fn can_browse_top_level() {
let (browser, strings) = setup_test(HashSet::from([
PathBuf::from_iter(["Music", "Iron Maiden", "Moonchild.mp3"]),
PathBuf::from_iter(["Also Music", "Iron Maiden", "The Prisoner.mp3"]),
]));
let files = browser.browse(&strings, PathBuf::new()).unwrap();
assert_eq!(
files[..],
[
File::Directory(PathBuf::from_iter(["Also Music"])),
File::Directory(PathBuf::from_iter(["Music"])),
]
);
}
#[test]
fn browse_skips_redundant_top_level() {
let (browser, strings) = setup_test(HashSet::from([PathBuf::from_iter([
"Music",
"Iron Maiden",
"Moonchild.mp3",
])]));
let files = browser.browse(&strings, PathBuf::new()).unwrap();
assert_eq!(
files[..],
[File::Directory(PathBuf::from_iter([
"Music",
"Iron Maiden"
])),]
);
}
#[test]
fn can_browse_directory() {
let artist_directory = PathBuf::from_iter(["Music", "Iron Maiden"]);
let (browser, strings) = setup_test(HashSet::from([
artist_directory.join("Infinite Dreams.mp3"),
artist_directory.join("Moonchild.mp3"),
]));
let files = browser.browse(&strings, artist_directory.clone()).unwrap();
assert_eq!(
files,
[
File::Song(artist_directory.join("Infinite Dreams.mp3")),
File::Song(artist_directory.join("Moonchild.mp3"))
]
);
}
#[test]
fn browse_entries_are_sorted() {
let (browser, strings) = setup_test(HashSet::from([
PathBuf::from_iter(["Ott", "Mir.mp3"]),
PathBuf::from("Helios.mp3"),
PathBuf::from("asura.mp3"),
PathBuf::from("à la maison.mp3"),
]));
let files = browser.browse(&strings, PathBuf::new()).unwrap();
assert_eq!(
files,
[
File::Directory(PathBuf::from("Ott")),
File::Song(PathBuf::from("à la maison.mp3")),
File::Song(PathBuf::from("asura.mp3")),
File::Song(PathBuf::from("Helios.mp3")),
]
);
}
#[test]
fn can_flatten_root() {
let song_a = PathBuf::from_iter(["Music", "Electronic", "Papua New Guinea.mp3"]);
let song_b = PathBuf::from_iter(["Music", "Metal", "Destiny.mp3"]);
let song_c = PathBuf::from_iter(["Music", "Metal", "No Turning Back.mp3"]);
let (browser, strings) = setup_test(HashSet::from([
song_a.clone(),
song_b.clone(),
song_c.clone(),
]));
let files = browser.flatten(&strings, PathBuf::new()).unwrap();
assert_eq!(files, [song_a, song_b, song_c]);
}
#[test]
fn can_flatten_directory() {
let electronic = PathBuf::from_iter(["Music", "Electronic"]);
let song_a = electronic.join(PathBuf::from_iter(["FSOL", "Papua New Guinea.mp3"]));
let song_b = electronic.join(PathBuf::from_iter(["Kraftwerk", "Autobahn.mp3"]));
let song_c = PathBuf::from_iter(["Music", "Metal", "Destiny.mp3"]);
let (browser, strings) = setup_test(HashSet::from([
song_a.clone(),
song_b.clone(),
song_c.clone(),
]));
let files = browser.flatten(&strings, electronic).unwrap();
assert_eq!(files, [song_a, song_b]);
}
#[test]
fn flatten_entries_are_sorted() {
let (browser, strings) = setup_test(HashSet::from([
PathBuf::from_iter(["Ott", "Mir.mp3"]),
PathBuf::from("Helios.mp3"),
PathBuf::from("à la maison.mp3.mp3"),
PathBuf::from("asura.mp3"),
]));
let files = browser.flatten(&strings, PathBuf::new()).unwrap();
assert_eq!(
files,
[
PathBuf::from("à la maison.mp3.mp3"),
PathBuf::from("asura.mp3"),
PathBuf::from("Helios.mp3"),
PathBuf::from_iter(["Ott", "Mir.mp3"]),
]
);
}
#[test]
fn can_flatten_directory_with_shared_prefix() {
let directory_a = PathBuf::from_iter(["Music", "Therion", "Leviathan II"]);
let directory_b = PathBuf::from_iter(["Music", "Therion", "Leviathan III"]);
let song_a = directory_a.join("Pazuzu.mp3");
let song_b = directory_b.join("Ninkigal.mp3");
let (browser, strings) = setup_test(HashSet::from([song_a.clone(), song_b.clone()]));
let files = browser.flatten(&strings, directory_a).unwrap();
assert_eq!(files, [song_a]);
}
}

1116
src/app/index/collection.rs Normal file

File diff suppressed because it is too large Load diff

110
src/app/index/dictionary.rs Normal file
View file

@ -0,0 +1,110 @@
use std::{cmp::Ordering, collections::HashMap};
use icu_collator::{Collator, CollatorOptions, Strength};
use lasso2::{Rodeo, RodeoReader, Spur};
use rayon::slice::ParallelSliceMut;
use serde::{Deserialize, Serialize};
pub fn sanitize(s: &str) -> String {
// TODO merge inconsistent diacritic usage
let mut cleaned = s.to_owned();
cleaned.retain(|c| match c {
' ' | '_' | '-' | '\'' => false,
_ => true,
});
cleaned.to_lowercase()
}
pub fn make_collator() -> Collator {
let options = {
let mut o = CollatorOptions::new();
o.strength = Some(Strength::Secondary);
o
};
Collator::try_new(&Default::default(), options).unwrap()
}
#[derive(Serialize, Deserialize)]
pub struct Dictionary {
strings: RodeoReader, // Interned strings
canon: HashMap<String, Spur>, // Canonical representation of similar strings
sort_keys: HashMap<Spur, u32>, // All spurs sorted against each other
}
impl Dictionary {
pub fn get<S: AsRef<str>>(&self, string: S) -> Option<Spur> {
self.strings.get(string)
}
pub fn get_canon<S: AsRef<str>>(&self, string: S) -> Option<Spur> {
self.canon.get(&sanitize(string.as_ref())).copied()
}
pub fn resolve(&self, spur: &Spur) -> &str {
self.strings.resolve(spur)
}
pub fn cmp(&self, a: &Spur, b: &Spur) -> Ordering {
self.sort_keys
.get(a)
.copied()
.unwrap_or_default()
.cmp(&self.sort_keys.get(b).copied().unwrap_or_default())
}
}
impl Default for Dictionary {
fn default() -> Self {
Self {
strings: Rodeo::default().into_reader(),
canon: Default::default(),
sort_keys: Default::default(),
}
}
}
#[derive(Clone, Default)]
pub struct Builder {
strings: Rodeo,
canon: HashMap<String, Spur>,
}
impl Builder {
pub fn build(self) -> Dictionary {
let mut sorted_spurs = self.strings.iter().collect::<Vec<_>>();
// TODO this is too slow!
sorted_spurs.par_sort_unstable_by(|(_, a), (_, b)| {
let collator = make_collator();
collator.compare(a, b)
});
let sort_keys = sorted_spurs
.into_iter()
.enumerate()
.map(|(i, (spur, _))| (spur, i as u32))
.collect();
Dictionary {
strings: self.strings.into_reader(),
canon: self.canon,
sort_keys,
}
}
pub fn get_or_intern<S: AsRef<str>>(&mut self, string: S) -> Spur {
self.strings.get_or_intern(string)
}
pub fn get_or_intern_canon<S: AsRef<str>>(&mut self, string: S) -> Option<Spur> {
let cleaned = sanitize(string.as_ref());
match cleaned.is_empty() {
true => None,
false => Some(
self.canon
.entry(cleaned)
.or_insert_with(|| self.strings.get_or_intern(string.as_ref()))
.to_owned(),
),
}
}
}

View file

@ -1,450 +0,0 @@
use id3::TagLike;
use lewton::inside_ogg::OggStreamReader;
use log::error;
use regex::Regex;
use std::fs;
use std::path::{Path, PathBuf};
use crate::utils;
use crate::utils::AudioFormat;
#[derive(thiserror::Error, Debug)]
pub enum Error {
#[error(transparent)]
Ape(#[from] ape::Error),
#[error(transparent)]
Id3(#[from] id3::Error),
#[error("Filesystem error for `{0}`: `{1}`")]
Io(PathBuf, std::io::Error),
#[error(transparent)]
Metaflac(#[from] metaflac::Error),
#[error(transparent)]
Mp4aMeta(#[from] mp4ameta::Error),
#[error(transparent)]
Opus(#[from] opus_headers::ParseError),
#[error(transparent)]
Vorbis(#[from] lewton::VorbisError),
#[error("Could not find a Vorbis comment within flac file")]
VorbisCommentNotFoundInFlacFile,
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct SongTags {
pub disc_number: Option<u32>,
pub track_number: Option<u32>,
pub title: Option<String>,
pub duration: Option<u32>,
pub artist: Option<String>,
pub album_artist: Option<String>,
pub album: Option<String>,
pub year: Option<i32>,
pub has_artwork: bool,
pub lyricist: Option<String>,
pub composer: Option<String>,
pub genre: Option<String>,
pub label: Option<String>,
}
impl From<id3::Tag> for SongTags {
fn from(tag: id3::Tag) -> Self {
let artist = tag.artist().map(|s| s.to_string());
let album_artist = tag.album_artist().map(|s| s.to_string());
let album = tag.album().map(|s| s.to_string());
let title = tag.title().map(|s| s.to_string());
let duration = tag.duration();
let disc_number = tag.disc();
let track_number = tag.track();
let year = tag
.year()
.or_else(|| tag.date_released().map(|d| d.year))
.or_else(|| tag.original_date_released().map(|d| d.year))
.or_else(|| tag.date_recorded().map(|d| d.year));
let has_artwork = tag.pictures().count() > 0;
let lyricist = tag.get_text("TEXT");
let composer = tag.get_text("TCOM");
let genre = tag.genre().map(|s| s.to_string());
let label = tag.get_text("TPUB");
SongTags {
disc_number,
track_number,
title,
duration,
artist,
album_artist,
album,
year,
has_artwork,
lyricist,
composer,
genre,
label,
}
}
}
pub fn read(path: &Path) -> Option<SongTags> {
let data = match utils::get_audio_format(path) {
Some(AudioFormat::AIFF) => read_aiff(path),
Some(AudioFormat::FLAC) => read_flac(path),
Some(AudioFormat::MP3) => read_mp3(path),
Some(AudioFormat::OGG) => read_vorbis(path),
Some(AudioFormat::OPUS) => read_opus(path),
Some(AudioFormat::WAVE) => read_wave(path),
Some(AudioFormat::APE) | Some(AudioFormat::MPC) => read_ape(path),
Some(AudioFormat::MP4) | Some(AudioFormat::M4B) => read_mp4(path),
None => return None,
};
match data {
Ok(d) => Some(d),
Err(e) => {
error!("Error while reading file metadata for '{:?}': {}", path, e);
None
}
}
}
trait FrameContent {
/// Returns the value stored, if any, in the Frame.
/// Say "TCOM" returns composer field.
fn get_text(&self, key: &str) -> Option<String>;
}
impl FrameContent for id3::Tag {
fn get_text(&self, key: &str) -> Option<String> {
let frame = self.get(key)?;
match frame.content() {
id3::Content::Text(value) => Some(value.to_string()),
_ => None,
}
}
}
fn read_mp3(path: &Path) -> Result<SongTags, Error> {
let tag = id3::Tag::read_from_path(path).or_else(|error| {
if let Some(tag) = error.partial_tag {
Ok(tag)
} else {
Err(error)
}
})?;
let duration = {
mp3_duration::from_path(path)
.map(|d| d.as_secs() as u32)
.ok()
};
let mut song_tags: SongTags = tag.into();
song_tags.duration = duration; // Use duration from mp3_duration instead of from tags.
Ok(song_tags)
}
fn read_aiff(path: &Path) -> Result<SongTags, Error> {
let tag = id3::Tag::read_from_aiff_path(path).or_else(|error| {
if let Some(tag) = error.partial_tag {
Ok(tag)
} else {
Err(error)
}
})?;
Ok(tag.into())
}
fn read_wave(path: &Path) -> Result<SongTags, Error> {
let tag = id3::Tag::read_from_wav_path(path).or_else(|error| {
if let Some(tag) = error.partial_tag {
Ok(tag)
} else {
Err(error)
}
})?;
Ok(tag.into())
}
fn read_ape_string(item: &ape::Item) -> Option<String> {
match item.value {
ape::ItemValue::Text(ref s) => Some(s.clone()),
_ => None,
}
}
fn read_ape_i32(item: &ape::Item) -> Option<i32> {
match item.value {
ape::ItemValue::Text(ref s) => s.parse::<i32>().ok(),
_ => None,
}
}
fn read_ape_x_of_y(item: &ape::Item) -> Option<u32> {
match item.value {
ape::ItemValue::Text(ref s) => {
let format = Regex::new(r#"^\d+"#).unwrap();
if let Some(m) = format.find(s) {
s[m.start()..m.end()].parse().ok()
} else {
None
}
}
_ => None,
}
}
fn read_ape(path: &Path) -> Result<SongTags, Error> {
let tag = ape::read_from_path(path)?;
let artist = tag.item("Artist").and_then(read_ape_string);
let album = tag.item("Album").and_then(read_ape_string);
let album_artist = tag.item("Album artist").and_then(read_ape_string);
let title = tag.item("Title").and_then(read_ape_string);
let year = tag.item("Year").and_then(read_ape_i32);
let disc_number = tag.item("Disc").and_then(read_ape_x_of_y);
let track_number = tag.item("Track").and_then(read_ape_x_of_y);
let lyricist = tag.item("LYRICIST").and_then(read_ape_string);
let composer = tag.item("COMPOSER").and_then(read_ape_string);
let genre = tag.item("GENRE").and_then(read_ape_string);
let label = tag.item("PUBLISHER").and_then(read_ape_string);
Ok(SongTags {
artist,
album_artist,
album,
title,
duration: None,
disc_number,
track_number,
year,
has_artwork: false,
lyricist,
composer,
genre,
label,
})
}
fn read_vorbis(path: &Path) -> Result<SongTags, Error> {
let file = fs::File::open(path).map_err(|e| Error::Io(path.to_owned(), e))?;
let source = OggStreamReader::new(file)?;
let mut tags = SongTags {
artist: None,
album_artist: None,
album: None,
title: None,
duration: None,
disc_number: None,
track_number: None,
year: None,
has_artwork: false,
lyricist: None,
composer: None,
genre: None,
label: None,
};
for (key, value) in source.comment_hdr.comment_list {
utils::match_ignore_case! {
match key {
"TITLE" => tags.title = Some(value),
"ALBUM" => tags.album = Some(value),
"ARTIST" => tags.artist = Some(value),
"ALBUMARTIST" => tags.album_artist = Some(value),
"TRACKNUMBER" => tags.track_number = value.parse::<u32>().ok(),
"DISCNUMBER" => tags.disc_number = value.parse::<u32>().ok(),
"DATE" => tags.year = value.parse::<i32>().ok(),
"LYRICIST" => tags.lyricist = Some(value),
"COMPOSER" => tags.composer = Some(value),
"GENRE" => tags.genre = Some(value),
"PUBLISHER" => tags.label = Some(value),
_ => (),
}
}
}
Ok(tags)
}
fn read_opus(path: &Path) -> Result<SongTags, Error> {
let headers = opus_headers::parse_from_path(path)?;
let mut tags = SongTags {
artist: None,
album_artist: None,
album: None,
title: None,
duration: None,
disc_number: None,
track_number: None,
year: None,
has_artwork: false,
lyricist: None,
composer: None,
genre: None,
label: None,
};
for (key, value) in headers.comments.user_comments {
utils::match_ignore_case! {
match key {
"TITLE" => tags.title = Some(value),
"ALBUM" => tags.album = Some(value),
"ARTIST" => tags.artist = Some(value),
"ALBUMARTIST" => tags.album_artist = Some(value),
"TRACKNUMBER" => tags.track_number = value.parse::<u32>().ok(),
"DISCNUMBER" => tags.disc_number = value.parse::<u32>().ok(),
"DATE" => tags.year = value.parse::<i32>().ok(),
"LYRICIST" => tags.lyricist = Some(value),
"COMPOSER" => tags.composer = Some(value),
"GENRE" => tags.genre = Some(value),
"PUBLISHER" => tags.label = Some(value),
_ => (),
}
}
}
Ok(tags)
}
fn read_flac(path: &Path) -> Result<SongTags, Error> {
let tag = metaflac::Tag::read_from_path(path)?;
let vorbis = tag
.vorbis_comments()
.ok_or(Error::VorbisCommentNotFoundInFlacFile)?;
let disc_number = vorbis
.get("DISCNUMBER")
.and_then(|d| d[0].parse::<u32>().ok());
let year = vorbis.get("DATE").and_then(|d| d[0].parse::<i32>().ok());
let mut streaminfo = tag.get_blocks(metaflac::BlockType::StreamInfo);
let duration = match streaminfo.next() {
Some(metaflac::Block::StreamInfo(s)) => Some(s.total_samples as u32 / s.sample_rate),
_ => None,
};
let has_artwork = tag.pictures().count() > 0;
Ok(SongTags {
artist: vorbis.artist().map(|v| v[0].clone()),
album_artist: vorbis.album_artist().map(|v| v[0].clone()),
album: vorbis.album().map(|v| v[0].clone()),
title: vorbis.title().map(|v| v[0].clone()),
duration,
disc_number,
track_number: vorbis.track(),
year,
has_artwork,
lyricist: vorbis.get("LYRICIST").map(|v| v[0].clone()),
composer: vorbis.get("COMPOSER").map(|v| v[0].clone()),
genre: vorbis.get("GENRE").map(|v| v[0].clone()),
label: vorbis.get("PUBLISHER").map(|v| v[0].clone()),
})
}
fn read_mp4(path: &Path) -> Result<SongTags, Error> {
let mut tag = mp4ameta::Tag::read_from_path(path)?;
let label_ident = mp4ameta::FreeformIdent::new("com.apple.iTunes", "Label");
Ok(SongTags {
artist: tag.take_artist(),
album_artist: tag.take_album_artist(),
album: tag.take_album(),
title: tag.take_title(),
duration: tag.duration().map(|v| v.as_secs() as u32),
disc_number: tag.disc_number().map(|d| d as u32),
track_number: tag.track_number().map(|d| d as u32),
year: tag.year().and_then(|v| v.parse::<i32>().ok()),
has_artwork: tag.artwork().is_some(),
lyricist: tag.take_lyricist(),
composer: tag.take_composer(),
genre: tag.take_genre(),
label: tag.take_strings_of(&label_ident).next(),
})
}
#[test]
fn reads_file_metadata() {
let sample_tags = SongTags {
disc_number: Some(3),
track_number: Some(1),
title: Some("TEST TITLE".into()),
artist: Some("TEST ARTIST".into()),
album_artist: Some("TEST ALBUM ARTIST".into()),
album: Some("TEST ALBUM".into()),
duration: None,
year: Some(2016),
has_artwork: false,
lyricist: Some("TEST LYRICIST".into()),
composer: Some("TEST COMPOSER".into()),
genre: Some("TEST GENRE".into()),
label: Some("TEST LABEL".into()),
};
let flac_sample_tag = SongTags {
duration: Some(0),
..sample_tags.clone()
};
let mp3_sample_tag = SongTags {
duration: Some(0),
..sample_tags.clone()
};
let m4a_sample_tag = SongTags {
duration: Some(0),
..sample_tags.clone()
};
assert_eq!(
read(Path::new("test-data/formats/sample.aif")).unwrap(),
sample_tags
);
assert_eq!(
read(Path::new("test-data/formats/sample.mp3")).unwrap(),
mp3_sample_tag
);
assert_eq!(
read(Path::new("test-data/formats/sample.ogg")).unwrap(),
sample_tags
);
assert_eq!(
read(Path::new("test-data/formats/sample.flac")).unwrap(),
flac_sample_tag
);
assert_eq!(
read(Path::new("test-data/formats/sample.m4a")).unwrap(),
m4a_sample_tag
);
assert_eq!(
read(Path::new("test-data/formats/sample.opus")).unwrap(),
sample_tags
);
assert_eq!(
read(Path::new("test-data/formats/sample.ape")).unwrap(),
sample_tags
);
assert_eq!(
read(Path::new("test-data/formats/sample.wav")).unwrap(),
sample_tags
);
}
#[test]
fn reads_embedded_artwork() {
assert!(
read(Path::new("test-data/artwork/sample.aif"))
.unwrap()
.has_artwork
);
assert!(
read(Path::new("test-data/artwork/sample.mp3"))
.unwrap()
.has_artwork
);
assert!(
read(Path::new("test-data/artwork/sample.flac"))
.unwrap()
.has_artwork
);
assert!(
read(Path::new("test-data/artwork/sample.m4a"))
.unwrap()
.has_artwork
);
assert!(
read(Path::new("test-data/artwork/sample.wav"))
.unwrap()
.has_artwork
);
}

View file

@ -1,186 +1,478 @@
use diesel::dsl::sql;
use diesel::prelude::*;
use diesel::sql_types;
use std::path::{Path, PathBuf};
use std::collections::HashSet;
use super::*;
use crate::db::{self, directories, songs};
use chumsky::{
error::Simple,
prelude::{choice, end, filter, just, none_of, recursive},
text::{int, keyword, whitespace, TextParser},
Parser,
};
use enum_map::Enum;
use serde::{Deserialize, Serialize};
#[derive(thiserror::Error, Debug)]
pub enum QueryError {
#[error(transparent)]
Database(#[from] diesel::result::Error),
#[error(transparent)]
DatabaseConnection(#[from] db::Error),
#[error("Song was not found: `{0}`")]
SongNotFound(PathBuf),
#[error(transparent)]
Vfs(#[from] vfs::Error),
#[derive(Clone, Copy, Debug, Deserialize, Enum, Eq, Hash, PartialEq, Serialize)]
pub enum TextField {
Album,
AlbumArtist,
Artist,
Composer,
Genre,
Label,
Lyricist,
Path,
Title,
}
sql_function!(
#[aggregate]
fn random() -> Integer;
);
impl Index {
pub fn browse<P>(&self, virtual_path: P) -> Result<Vec<CollectionFile>, QueryError>
where
P: AsRef<Path>,
{
let mut output = Vec::new();
let vfs = self.vfs_manager.get_vfs()?;
let mut connection = self.db.connect()?;
if virtual_path.as_ref().components().count() == 0 {
// Browse top-level
let real_directories: Vec<Directory> = directories::table
.filter(directories::parent.is_null())
.load(&mut connection)?;
let virtual_directories = real_directories
.into_iter()
.filter_map(|d| d.virtualize(&vfs));
output.extend(virtual_directories.map(CollectionFile::Directory));
} else {
// Browse sub-directory
let real_path = vfs.virtual_to_real(virtual_path)?;
let real_path_string = real_path.as_path().to_string_lossy().into_owned();
let real_directories: Vec<Directory> = directories::table
.filter(directories::parent.eq(&real_path_string))
.order(sql::<sql_types::Bool>("path COLLATE NOCASE ASC"))
.load(&mut connection)?;
let virtual_directories = real_directories
.into_iter()
.filter_map(|d| d.virtualize(&vfs));
output.extend(virtual_directories.map(CollectionFile::Directory));
let real_songs: Vec<Song> = songs::table
.filter(songs::parent.eq(&real_path_string))
.order(sql::<sql_types::Bool>("path COLLATE NOCASE ASC"))
.load(&mut connection)?;
let virtual_songs = real_songs.into_iter().filter_map(|s| s.virtualize(&vfs));
output.extend(virtual_songs.map(CollectionFile::Song));
}
Ok(output)
}
pub fn flatten<P>(&self, virtual_path: P) -> Result<Vec<Song>, QueryError>
where
P: AsRef<Path>,
{
use self::songs::dsl::*;
let vfs = self.vfs_manager.get_vfs()?;
let mut connection = self.db.connect()?;
let real_songs: Vec<Song> = if virtual_path.as_ref().parent().is_some() {
let real_path = vfs.virtual_to_real(virtual_path)?;
let song_path_filter = {
let mut path_buf = real_path;
path_buf.push("%");
path_buf.as_path().to_string_lossy().into_owned()
};
songs
.filter(path.like(&song_path_filter))
.order(path)
.load(&mut connection)?
} else {
songs.order(path).load(&mut connection)?
};
let virtual_songs = real_songs.into_iter().filter_map(|s| s.virtualize(&vfs));
Ok(virtual_songs.collect::<Vec<_>>())
}
pub fn get_random_albums(&self, count: i64) -> Result<Vec<Directory>, QueryError> {
use self::directories::dsl::*;
let vfs = self.vfs_manager.get_vfs()?;
let mut connection = self.db.connect()?;
let real_directories: Vec<Directory> = directories
.filter(album.is_not_null())
.limit(count)
.order(random())
.load(&mut connection)?;
let virtual_directories = real_directories
.into_iter()
.filter_map(|d| d.virtualize(&vfs));
Ok(virtual_directories.collect::<Vec<_>>())
}
pub fn get_recent_albums(&self, count: i64) -> Result<Vec<Directory>, QueryError> {
use self::directories::dsl::*;
let vfs = self.vfs_manager.get_vfs()?;
let mut connection = self.db.connect()?;
let real_directories: Vec<Directory> = directories
.filter(album.is_not_null())
.order(date_added.desc())
.limit(count)
.load(&mut connection)?;
let virtual_directories = real_directories
.into_iter()
.filter_map(|d| d.virtualize(&vfs));
Ok(virtual_directories.collect::<Vec<_>>())
}
pub fn search(&self, query: &str) -> Result<Vec<CollectionFile>, QueryError> {
let vfs = self.vfs_manager.get_vfs()?;
let mut connection = self.db.connect()?;
let like_test = format!("%{}%", query);
let mut output = Vec::new();
// Find dirs with matching path and parent not matching
{
use self::directories::dsl::*;
let real_directories: Vec<Directory> = directories
.filter(path.like(&like_test))
.filter(parent.not_like(&like_test))
.load(&mut connection)?;
let virtual_directories = real_directories
.into_iter()
.filter_map(|d| d.virtualize(&vfs));
output.extend(virtual_directories.map(CollectionFile::Directory));
}
// Find songs with matching title/album/artist and non-matching parent
{
use self::songs::dsl::*;
let real_songs: Vec<Song> = songs
.filter(
path.like(&like_test)
.or(title.like(&like_test))
.or(album.like(&like_test))
.or(artist.like(&like_test))
.or(album_artist.like(&like_test)),
)
.filter(parent.not_like(&like_test))
.load(&mut connection)?;
let virtual_songs = real_songs.into_iter().filter_map(|d| d.virtualize(&vfs));
output.extend(virtual_songs.map(CollectionFile::Song));
}
Ok(output)
}
pub fn get_song(&self, virtual_path: &Path) -> Result<Song, QueryError> {
let vfs = self.vfs_manager.get_vfs()?;
let mut connection = self.db.connect()?;
let real_path = vfs.virtual_to_real(virtual_path)?;
let real_path_string = real_path.as_path().to_string_lossy();
use self::songs::dsl::*;
let real_song: Song = songs
.filter(path.eq(real_path_string))
.get_result(&mut connection)?;
match real_song.virtualize(&vfs) {
Some(s) => Ok(s),
None => Err(QueryError::SongNotFound(real_path)),
}
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum TextOp {
Eq,
Like,
}
#[derive(Clone, Copy, Debug, Deserialize, Enum, Eq, Hash, PartialEq, Serialize)]
pub enum NumberField {
DiscNumber,
TrackNumber,
Year,
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum NumberOp {
Eq,
Greater,
GreaterOrEq,
Less,
LessOrEq,
}
#[derive(Debug, Eq, PartialEq)]
pub enum Literal {
Text(String),
Number(i32),
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum BoolOp {
And,
Or,
Not,
}
#[derive(Debug, Eq, PartialEq)]
pub enum Expr {
Fuzzy(Literal),
TextCmp(TextField, TextOp, String),
NumberCmp(NumberField, NumberOp, i32),
Combined(Box<Expr>, BoolOp, Box<Expr>),
}
pub fn make_parser() -> impl Parser<char, Expr, Error = Simple<char>> {
recursive(|expr| {
let quoted_str = just('"')
.ignore_then(none_of('"').repeated().collect::<String>())
.then_ignore(just('"'));
let symbols = r#"()<>"|&=!"#.chars().collect::<HashSet<_>>();
let raw_str = filter(move |c: &char| !c.is_whitespace() && !symbols.contains(c))
.repeated()
.at_least(1)
.collect::<String>();
let str_ = choice((quoted_str, raw_str)).padded();
let number = int(10).from_str().unwrapped().padded();
let text_field = choice((
keyword("album").to(TextField::Album),
keyword("albumartist").to(TextField::AlbumArtist),
keyword("artist").to(TextField::Artist),
keyword("composer").to(TextField::Composer),
keyword("genre").to(TextField::Genre),
keyword("label").to(TextField::Label),
keyword("lyricist").to(TextField::Lyricist),
keyword("path").to(TextField::Path),
keyword("title").to(TextField::Title),
))
.padded();
let text_op = choice((just("=").to(TextOp::Eq), just("%").to(TextOp::Like))).padded();
let text_cmp = text_field
.then(text_op)
.then(str_.clone())
.map(|((a, b), c)| Expr::TextCmp(a, b, c));
let number_field = choice((
keyword("discnumber").to(NumberField::DiscNumber),
keyword("tracknumber").to(NumberField::TrackNumber),
keyword("year").to(NumberField::Year),
))
.padded();
let number_op = choice((
just("=").to(NumberOp::Eq),
just(">=").to(NumberOp::GreaterOrEq),
just(">").to(NumberOp::Greater),
just("<=").to(NumberOp::LessOrEq),
just("<").to(NumberOp::Less),
))
.padded();
let number_cmp = number_field
.then(number_op)
.then(number)
.map(|((a, b), c)| Expr::NumberCmp(a, b, c));
let literal = choice((number.map(Literal::Number), str_.map(Literal::Text)));
let fuzzy = literal.map(Expr::Fuzzy);
let filter = choice((text_cmp, number_cmp, fuzzy));
let atom = choice((filter, expr.delimited_by(just('('), just(')'))));
let bool_op = choice((
just("&&").to(BoolOp::And),
just("||").to(BoolOp::Or),
just("!!").to(BoolOp::Not),
))
.padded();
let combined = atom
.clone()
.then(bool_op.then(atom).repeated())
.foldl(|a, (b, c)| Expr::Combined(Box::new(a), b, Box::new(c)));
let implicit_and = combined
.clone()
.then(whitespace().ignore_then(combined).repeated())
.foldl(|a: Expr, b: Expr| Expr::Combined(Box::new(a), BoolOp::And, Box::new(b)));
implicit_and
})
.then_ignore(end())
}
#[test]
fn can_parse_fuzzy_query() {
let parser = make_parser();
assert_eq!(
parser.parse(r#"rhapsody"#).unwrap(),
Expr::Fuzzy(Literal::Text("rhapsody".to_owned())),
);
assert_eq!(
parser.parse(r#"2005"#).unwrap(),
Expr::Fuzzy(Literal::Number(2005)),
);
}
#[test]
fn can_repeat_fuzzy_queries() {
let parser = make_parser();
assert_eq!(
parser.parse(r#"rhapsody "of victory""#).unwrap(),
Expr::Combined(
Box::new(Expr::Fuzzy(Literal::Text("rhapsody".to_owned()))),
BoolOp::And,
Box::new(Expr::Fuzzy(Literal::Text("of victory".to_owned()))),
),
);
}
#[test]
fn can_mix_fuzzy_and_structured() {
let parser = make_parser();
assert_eq!(
parser.parse(r#"rhapsody album % dragonflame"#).unwrap(),
Expr::Combined(
Box::new(Expr::Fuzzy(Literal::Text("rhapsody".to_owned()))),
BoolOp::And,
Box::new(Expr::TextCmp(
TextField::Album,
TextOp::Like,
"dragonflame".to_owned()
)),
),
);
}
#[test]
fn can_parse_text_fields() {
let parser = make_parser();
assert_eq!(
parser.parse(r#"album = "legendary tales""#).unwrap(),
Expr::TextCmp(TextField::Album, TextOp::Eq, "legendary tales".to_owned()),
);
assert_eq!(
parser.parse(r#"albumartist = "rhapsody""#).unwrap(),
Expr::TextCmp(TextField::AlbumArtist, TextOp::Eq, "rhapsody".to_owned()),
);
assert_eq!(
parser.parse(r#"artist = "rhapsody""#).unwrap(),
Expr::TextCmp(TextField::Artist, TextOp::Eq, "rhapsody".to_owned()),
);
assert_eq!(
parser.parse(r#"composer = "yoko kanno""#).unwrap(),
Expr::TextCmp(TextField::Composer, TextOp::Eq, "yoko kanno".to_owned()),
);
assert_eq!(
parser.parse(r#"genre = "jazz""#).unwrap(),
Expr::TextCmp(TextField::Genre, TextOp::Eq, "jazz".to_owned()),
);
assert_eq!(
parser.parse(r#"label = "diverse system""#).unwrap(),
Expr::TextCmp(TextField::Label, TextOp::Eq, "diverse system".to_owned()),
);
assert_eq!(
parser.parse(r#"lyricist = "dalida""#).unwrap(),
Expr::TextCmp(TextField::Lyricist, TextOp::Eq, "dalida".to_owned()),
);
assert_eq!(
parser.parse(r#"path = "electronic/big beat""#).unwrap(),
Expr::TextCmp(
TextField::Path,
TextOp::Eq,
"electronic/big beat".to_owned()
),
);
assert_eq!(
parser.parse(r#"title = "emerald sword""#).unwrap(),
Expr::TextCmp(TextField::Title, TextOp::Eq, "emerald sword".to_owned()),
);
}
#[test]
fn can_parse_text_operators() {
let parser = make_parser();
assert_eq!(
parser.parse(r#"album = "legendary tales""#).unwrap(),
Expr::TextCmp(TextField::Album, TextOp::Eq, "legendary tales".to_owned()),
);
assert_eq!(
parser.parse(r#"album % "legendary tales""#).unwrap(),
Expr::TextCmp(TextField::Album, TextOp::Like, "legendary tales".to_owned()),
);
}
#[test]
fn can_parse_number_fields() {
let parser = make_parser();
assert_eq!(
parser.parse(r#"discnumber = 6"#).unwrap(),
Expr::NumberCmp(NumberField::DiscNumber, NumberOp::Eq, 6),
);
assert_eq!(
parser.parse(r#"tracknumber = 12"#).unwrap(),
Expr::NumberCmp(NumberField::TrackNumber, NumberOp::Eq, 12),
);
assert_eq!(
parser.parse(r#"year = 1999"#).unwrap(),
Expr::NumberCmp(NumberField::Year, NumberOp::Eq, 1999),
);
}
#[test]
fn can_parse_number_operators() {
let parser = make_parser();
assert_eq!(
parser.parse(r#"discnumber = 6"#).unwrap(),
Expr::NumberCmp(NumberField::DiscNumber, NumberOp::Eq, 6),
);
assert_eq!(
parser.parse(r#"discnumber > 6"#).unwrap(),
Expr::NumberCmp(NumberField::DiscNumber, NumberOp::Greater, 6),
);
assert_eq!(
parser.parse(r#"discnumber >= 6"#).unwrap(),
Expr::NumberCmp(NumberField::DiscNumber, NumberOp::GreaterOrEq, 6),
);
assert_eq!(
parser.parse(r#"discnumber < 6"#).unwrap(),
Expr::NumberCmp(NumberField::DiscNumber, NumberOp::Less, 6),
);
assert_eq!(
parser.parse(r#"discnumber <= 6"#).unwrap(),
Expr::NumberCmp(NumberField::DiscNumber, NumberOp::LessOrEq, 6),
);
}
#[test]
fn can_use_and_operator() {
let parser = make_parser();
assert_eq!(
parser.parse(r#"album % lands && title % "sword""#).unwrap(),
Expr::Combined(
Box::new(Expr::TextCmp(
TextField::Album,
TextOp::Like,
"lands".to_owned()
)),
BoolOp::And,
Box::new(Expr::TextCmp(
TextField::Title,
TextOp::Like,
"sword".to_owned()
))
),
);
}
#[test]
fn can_use_or_operator() {
let parser = make_parser();
assert_eq!(
parser.parse(r#"album % lands || title % "sword""#).unwrap(),
Expr::Combined(
Box::new(Expr::TextCmp(
TextField::Album,
TextOp::Like,
"lands".to_owned()
)),
BoolOp::Or,
Box::new(Expr::TextCmp(
TextField::Title,
TextOp::Like,
"sword".to_owned()
))
),
);
}
#[test]
fn can_use_not_operator() {
let parser = make_parser();
assert_eq!(
parser.parse(r#"album % lands !! title % "sword""#).unwrap(),
Expr::Combined(
Box::new(Expr::TextCmp(
TextField::Album,
TextOp::Like,
"lands".to_owned()
)),
BoolOp::Not,
Box::new(Expr::TextCmp(
TextField::Title,
TextOp::Like,
"sword".to_owned()
))
),
);
}
#[test]
fn boolean_operators_share_precedence() {
let parser = make_parser();
assert_eq!(
parser
.parse(r#"album % lands || album % tales && title % "sword""#)
.unwrap(),
Expr::Combined(
Box::new(Expr::Combined(
Box::new(Expr::TextCmp(
TextField::Album,
TextOp::Like,
"lands".to_owned()
)),
BoolOp::Or,
Box::new(Expr::TextCmp(
TextField::Album,
TextOp::Like,
"tales".to_owned()
))
)),
BoolOp::And,
Box::new(Expr::TextCmp(
TextField::Title,
TextOp::Like,
"sword".to_owned()
))
),
);
assert_eq!(
parser
.parse(r#"album % lands && album % tales || title % "sword""#)
.unwrap(),
Expr::Combined(
Box::new(Expr::Combined(
Box::new(Expr::TextCmp(
TextField::Album,
TextOp::Like,
"lands".to_owned()
)),
BoolOp::And,
Box::new(Expr::TextCmp(
TextField::Album,
TextOp::Like,
"tales".to_owned()
))
)),
BoolOp::Or,
Box::new(Expr::TextCmp(
TextField::Title,
TextOp::Like,
"sword".to_owned()
))
),
);
}
#[test]
fn can_use_parenthesis_for_precedence() {
let parser = make_parser();
assert_eq!(
parser
.parse(r#"album % lands || (album % tales && title % sword)"#)
.unwrap(),
Expr::Combined(
Box::new(Expr::TextCmp(
TextField::Album,
TextOp::Like,
"lands".to_owned()
)),
BoolOp::Or,
Box::new(Expr::Combined(
Box::new(Expr::TextCmp(
TextField::Album,
TextOp::Like,
"tales".to_owned()
)),
BoolOp::And,
Box::new(Expr::TextCmp(
TextField::Title,
TextOp::Like,
"sword".to_owned()
)),
))
),
);
assert_eq!(
parser
.parse(r#"(album % lands || album % tales) && title % "sword""#)
.unwrap(),
Expr::Combined(
Box::new(Expr::Combined(
Box::new(Expr::TextCmp(
TextField::Album,
TextOp::Like,
"lands".to_owned()
)),
BoolOp::Or,
Box::new(Expr::TextCmp(
TextField::Album,
TextOp::Like,
"tales".to_owned()
))
)),
BoolOp::And,
Box::new(Expr::TextCmp(
TextField::Title,
TextOp::Like,
"sword".to_owned()
))
),
);
}

708
src/app/index/search.rs Normal file
View file

@ -0,0 +1,708 @@
use chumsky::Parser;
use enum_map::EnumMap;
use lasso2::Spur;
use nohash_hasher::IntSet;
use serde::{Deserialize, Serialize};
use std::collections::{BTreeMap, HashMap};
use tinyvec::TinyVec;
use crate::app::{
index::{
dictionary::Dictionary,
query::{BoolOp, Expr, Literal, NumberField, NumberOp, TextField, TextOp},
storage::SongKey,
},
scanner, Error,
};
use super::{collection, dictionary::sanitize, query::make_parser, storage};
#[derive(Serialize, Deserialize)]
pub struct Search {
text_fields: EnumMap<TextField, TextFieldIndex>,
number_fields: EnumMap<NumberField, NumberFieldIndex>,
}
impl Default for Search {
fn default() -> Self {
Self {
text_fields: Default::default(),
number_fields: Default::default(),
}
}
}
impl Search {
pub fn find_songs(
&self,
collection: &collection::Collection,
dictionary: &Dictionary,
query: &str,
) -> Result<Vec<collection::Song>, Error> {
let parser = make_parser();
let parsed_query = parser
.parse(query)
.map_err(|_| Error::SearchQueryParseError)?;
let mut songs = self
.eval(dictionary, &parsed_query)
.into_iter()
.collect::<Vec<_>>();
collection.sort_songs(&mut songs, dictionary);
let songs = songs
.into_iter()
.filter_map(|song_key| collection.get_song(dictionary, song_key))
.collect::<Vec<_>>();
Ok(songs)
}
fn eval(&self, dictionary: &Dictionary, expr: &Expr) -> IntSet<SongKey> {
match expr {
Expr::Fuzzy(s) => self.eval_fuzzy(dictionary, s),
Expr::TextCmp(field, op, s) => self.eval_text_operator(dictionary, *field, *op, &s),
Expr::NumberCmp(field, op, n) => self.eval_number_operator(*field, *op, *n),
Expr::Combined(e, op, f) => self.combine(dictionary, e, *op, f),
}
}
fn combine(
&self,
dictionary: &Dictionary,
e: &Box<Expr>,
op: BoolOp,
f: &Box<Expr>,
) -> IntSet<SongKey> {
let is_operable = |expr: &Expr| match expr {
Expr::Fuzzy(Literal::Text(s)) if s.chars().count() < BIGRAM_SIZE => false,
Expr::Fuzzy(Literal::Number(n)) if *n < 10 => false,
Expr::TextCmp(_, _, s) if s.chars().count() < BIGRAM_SIZE => false,
_ => true,
};
let left = is_operable(e).then(|| self.eval(dictionary, e));
let right = is_operable(f).then(|| self.eval(dictionary, f));
match (left, op, right) {
(Some(l), BoolOp::And, Some(r)) => l.intersection(&r).cloned().collect(),
(Some(l), BoolOp::Or, Some(r)) => l.union(&r).cloned().collect(),
(Some(l), BoolOp::Not, Some(r)) => l.difference(&r).cloned().collect(),
(None, BoolOp::Not, _) => IntSet::default(),
(Some(l), _, None) => l,
(None, _, Some(r)) => r,
(None, _, None) => IntSet::default(),
}
}
fn eval_fuzzy(&self, dictionary: &Dictionary, value: &Literal) -> IntSet<SongKey> {
match value {
Literal::Text(s) => {
let mut songs = IntSet::default();
for field in self.text_fields.values() {
songs.extend(field.find_like(dictionary, s));
}
songs
}
Literal::Number(n) => {
let mut songs = IntSet::default();
for field in self.number_fields.values() {
songs.extend(field.find(*n as i64, NumberOp::Eq));
}
songs
.union(&self.eval_fuzzy(dictionary, &Literal::Text(n.to_string())))
.copied()
.collect()
}
}
}
fn eval_text_operator(
&self,
dictionary: &Dictionary,
field: TextField,
operator: TextOp,
value: &str,
) -> IntSet<SongKey> {
match operator {
TextOp::Eq => self.text_fields[field].find_exact(dictionary, value),
TextOp::Like => self.text_fields[field].find_like(dictionary, value),
}
}
fn eval_number_operator(
&self,
field: NumberField,
operator: NumberOp,
value: i32,
) -> IntSet<SongKey> {
self.number_fields[field].find(value as i64, operator)
}
}
const BIGRAM_SIZE: usize = 2;
const ASCII_RANGE: usize = u8::MAX as usize;
#[derive(Clone, Deserialize, Serialize)]
struct TextFieldIndex {
exact: HashMap<Spur, IntSet<SongKey>>,
ascii_bigrams: Vec<Vec<(SongKey, Spur)>>,
other_bigrams: HashMap<[char; BIGRAM_SIZE], Vec<(SongKey, Spur)>>,
}
impl Default for TextFieldIndex {
fn default() -> Self {
Self {
exact: Default::default(),
ascii_bigrams: vec![Default::default(); ASCII_RANGE * ASCII_RANGE],
other_bigrams: Default::default(),
}
}
}
impl TextFieldIndex {
fn ascii_bigram_to_index(a: char, b: char) -> usize {
assert!(a.is_ascii());
assert!(b.is_ascii());
(a as usize) * ASCII_RANGE + (b as usize) as usize
}
pub fn insert(&mut self, raw_value: &str, value: Spur, song: SongKey) {
let characters = sanitize(raw_value).chars().collect::<TinyVec<[char; 32]>>();
for substring in characters[..].windows(BIGRAM_SIZE) {
if substring.iter().all(|c| c.is_ascii()) {
let index = Self::ascii_bigram_to_index(substring[0], substring[1]);
self.ascii_bigrams[index].push((song, value));
} else {
self.other_bigrams
.entry(substring.try_into().unwrap())
.or_default()
.push((song, value));
}
}
self.exact.entry(value).or_default().insert(song);
}
pub fn find_like(&self, dictionary: &Dictionary, value: &str) -> IntSet<SongKey> {
let sanitized = sanitize(value);
let characters = sanitized.chars().collect::<Vec<_>>();
let empty = Vec::new();
let candidates_by_bigram = characters[..]
.windows(BIGRAM_SIZE)
.map(|s| {
if s.iter().all(|c| c.is_ascii()) {
let index = Self::ascii_bigram_to_index(s[0], s[1]);
&self.ascii_bigrams[index]
} else {
self.other_bigrams
.get::<[char; BIGRAM_SIZE]>(s.try_into().unwrap())
.unwrap_or(&empty)
}
})
.collect::<Vec<_>>();
candidates_by_bigram
.into_iter()
.min_by_key(|h| h.len()) // Only check songs that contain the least common bigram from the search term
.unwrap_or(&empty)
.iter()
.filter(|(_song_key, indexed_value)| {
// Only keep songs that actually contain the search term in full
let resolved = dictionary.resolve(indexed_value);
sanitize(resolved).contains(&sanitized)
})
.map(|(k, _v)| k)
.copied()
.collect()
}
pub fn find_exact(&self, dictionary: &Dictionary, value: &str) -> IntSet<SongKey> {
dictionary
.get_canon(value)
.and_then(|s| self.exact.get(&s))
.cloned()
.unwrap_or_default()
}
}
#[derive(Clone, Default, Deserialize, Serialize)]
struct NumberFieldIndex {
values: BTreeMap<i64, IntSet<SongKey>>,
}
impl NumberFieldIndex {
pub fn insert(&mut self, value: i64, key: SongKey) {
self.values.entry(value).or_default().insert(key);
}
pub fn find(&self, value: i64, operator: NumberOp) -> IntSet<SongKey> {
let range = match operator {
NumberOp::Eq => self.values.range(value..=value),
NumberOp::Greater => self.values.range((value + 1)..),
NumberOp::GreaterOrEq => self.values.range(value..),
NumberOp::Less => self.values.range(..value),
NumberOp::LessOrEq => self.values.range(..=value),
};
let candidates = range.map(|(_n, songs)| songs).collect::<Vec<_>>();
let mut results = Vec::with_capacity(candidates.iter().map(|c| c.len()).sum());
candidates
.into_iter()
.for_each(|songs| results.extend(songs.iter()));
IntSet::from_iter(results)
}
}
#[derive(Clone, Default)]
pub struct Builder {
text_fields: EnumMap<TextField, TextFieldIndex>,
number_fields: EnumMap<NumberField, NumberFieldIndex>,
}
impl Builder {
pub fn add_song(&mut self, scanner_song: &scanner::Song, storage_song: &storage::Song) {
let song_key = SongKey {
virtual_path: storage_song.virtual_path,
};
if let (Some(str), Some(spur)) = (&scanner_song.album, storage_song.album) {
self.text_fields[TextField::Album].insert(str, spur, song_key);
}
for (str, artist_key) in scanner_song
.album_artists
.iter()
.zip(storage_song.album_artists.iter())
{
self.text_fields[TextField::AlbumArtist].insert(str, artist_key.0, song_key);
}
for (str, artist_key) in scanner_song.artists.iter().zip(storage_song.artists.iter()) {
self.text_fields[TextField::Artist].insert(str, artist_key.0, song_key);
}
for (str, artist_key) in scanner_song
.composers
.iter()
.zip(storage_song.composers.iter())
{
self.text_fields[TextField::Composer].insert(str, artist_key.0, song_key);
}
if let Some(disc_number) = &scanner_song.disc_number {
self.number_fields[NumberField::DiscNumber].insert(*disc_number, song_key);
}
for (str, spur) in scanner_song.genres.iter().zip(storage_song.genres.iter()) {
self.text_fields[TextField::Genre].insert(str, *spur, song_key);
}
for (str, spur) in scanner_song.labels.iter().zip(storage_song.labels.iter()) {
self.text_fields[TextField::Label].insert(str, *spur, song_key);
}
for (str, artist_key) in scanner_song
.lyricists
.iter()
.zip(storage_song.lyricists.iter())
{
self.text_fields[TextField::Lyricist].insert(str, artist_key.0, song_key);
}
self.text_fields[TextField::Path].insert(
scanner_song.virtual_path.to_string_lossy().as_ref(),
storage_song.virtual_path.0,
song_key,
);
if let (Some(str), Some(spur)) = (&scanner_song.title, storage_song.title) {
self.text_fields[TextField::Title].insert(str, spur, song_key);
}
if let Some(track_number) = &scanner_song.track_number {
self.number_fields[NumberField::TrackNumber].insert(*track_number, song_key);
}
if let Some(year) = &scanner_song.year {
self.number_fields[NumberField::Year].insert(*year, song_key);
}
}
pub fn build(self) -> Search {
Search {
text_fields: self.text_fields,
number_fields: self.number_fields,
}
}
}
#[cfg(test)]
mod test {
use std::path::PathBuf;
use super::*;
use crate::app::index::dictionary;
use collection::Collection;
use storage::store_song;
struct Context {
dictionary: Dictionary,
collection: Collection,
search: Search,
}
impl Context {
pub fn search(&self, query: &str) -> Vec<PathBuf> {
self.search
.find_songs(&self.collection, &self.dictionary, query)
.unwrap()
.into_iter()
.map(|s| s.virtual_path)
.collect()
}
}
fn setup_test(songs: Vec<scanner::Song>) -> Context {
let mut dictionary_builder = dictionary::Builder::default();
let mut collection_builder = collection::Builder::default();
let mut search_builder = Builder::default();
for song in songs {
let storage_song = store_song(&mut dictionary_builder, &song).unwrap();
collection_builder.add_song(&storage_song);
search_builder.add_song(&song, &storage_song);
}
Context {
collection: collection_builder.build(),
search: search_builder.build(),
dictionary: dictionary_builder.build(),
}
}
#[test]
fn can_find_fuzzy() {
let ctx = setup_test(vec![
scanner::Song {
virtual_path: PathBuf::from("seasons.mp3"),
title: Some("Seasons".to_owned()),
artists: vec!["Dragonforce".to_owned()],
..Default::default()
},
scanner::Song {
virtual_path: PathBuf::from("potd.mp3"),
title: Some("Power of the Dragonflame".to_owned()),
artists: vec!["Rhapsody".to_owned()],
..Default::default()
},
scanner::Song {
virtual_path: PathBuf::from("calcium.mp3"),
title: Some("Calcium".to_owned()),
artists: vec!["FSOL".to_owned()],
..Default::default()
},
]);
let songs = ctx.search("agon");
assert_eq!(songs.len(), 2);
assert!(songs.contains(&PathBuf::from("seasons.mp3")));
assert!(songs.contains(&PathBuf::from("potd.mp3")));
}
#[test]
fn can_find_field_like() {
let ctx = setup_test(vec![
scanner::Song {
virtual_path: PathBuf::from("seasons.mp3"),
title: Some("Seasons".to_owned()),
artists: vec!["Dragonforce".to_owned()],
..Default::default()
},
scanner::Song {
virtual_path: PathBuf::from("potd.mp3"),
title: Some("Power of the Dragonflame".to_owned()),
artists: vec!["Rhapsody".to_owned()],
..Default::default()
},
]);
let songs = ctx.search("artist % agon");
assert_eq!(songs.len(), 1);
assert!(songs.contains(&PathBuf::from("seasons.mp3")));
}
#[test]
fn text_is_case_insensitive() {
let ctx = setup_test(vec![scanner::Song {
virtual_path: PathBuf::from("seasons.mp3"),
artists: vec!["Dragonforce".to_owned()],
..Default::default()
}]);
let songs = ctx.search("dragonforce");
assert_eq!(songs.len(), 1);
assert!(songs.contains(&PathBuf::from("seasons.mp3")));
let songs = ctx.search("artist = dragonforce");
assert_eq!(songs.len(), 1);
assert!(songs.contains(&PathBuf::from("seasons.mp3")));
}
#[test]
fn can_find_field_exact() {
let ctx = setup_test(vec![
scanner::Song {
virtual_path: PathBuf::from("seasons.mp3"),
title: Some("Seasons".to_owned()),
artists: vec!["Dragonforce".to_owned()],
..Default::default()
},
scanner::Song {
virtual_path: PathBuf::from("potd.mp3"),
title: Some("Power of the Dragonflame".to_owned()),
artists: vec!["Rhapsody".to_owned()],
..Default::default()
},
]);
let songs = ctx.search("artist = Dragon");
assert!(songs.is_empty());
let songs = ctx.search("artist = Dragonforce");
assert_eq!(songs.len(), 1);
assert!(songs.contains(&PathBuf::from("seasons.mp3")));
}
#[test]
fn can_query_number_fields() {
let ctx = setup_test(vec![
scanner::Song {
virtual_path: PathBuf::from("1999.mp3"),
year: Some(1999),
..Default::default()
},
scanner::Song {
virtual_path: PathBuf::from("2000.mp3"),
year: Some(2000),
..Default::default()
},
scanner::Song {
virtual_path: PathBuf::from("2001.mp3"),
year: Some(2001),
..Default::default()
},
]);
let songs = ctx.search("year=2000");
assert_eq!(songs.len(), 1);
assert!(songs.contains(&PathBuf::from("2000.mp3")));
let songs = ctx.search("year>2000");
assert_eq!(songs.len(), 1);
assert!(songs.contains(&PathBuf::from("2001.mp3")));
let songs = ctx.search("year<2000");
assert_eq!(songs.len(), 1);
assert!(songs.contains(&PathBuf::from("1999.mp3")));
let songs = ctx.search("year>=2000");
assert_eq!(songs.len(), 2);
assert!(songs.contains(&PathBuf::from("2000.mp3")));
assert!(songs.contains(&PathBuf::from("2001.mp3")));
let songs = ctx.search("year<=2000");
assert_eq!(songs.len(), 2);
assert!(songs.contains(&PathBuf::from("1999.mp3")));
assert!(songs.contains(&PathBuf::from("2000.mp3")));
}
#[test]
fn fuzzy_numbers_query_all_fields() {
let ctx = setup_test(vec![
scanner::Song {
virtual_path: PathBuf::from("music.mp3"),
year: Some(2000),
..Default::default()
},
scanner::Song {
virtual_path: PathBuf::from("fireworks 2000.mp3"),
..Default::default()
},
scanner::Song {
virtual_path: PathBuf::from("calcium.mp3"),
..Default::default()
},
]);
let songs = ctx.search("2000");
assert_eq!(songs.len(), 2);
assert!(songs.contains(&PathBuf::from("music.mp3")));
assert!(songs.contains(&PathBuf::from("fireworks 2000.mp3")));
}
#[test]
fn can_use_and_operator() {
let ctx = setup_test(vec![
scanner::Song {
virtual_path: PathBuf::from("whale.mp3"),
..Default::default()
},
scanner::Song {
virtual_path: PathBuf::from("space.mp3"),
..Default::default()
},
scanner::Song {
virtual_path: PathBuf::from("whales in space.mp3"),
..Default::default()
},
]);
let songs = ctx.search("space && whale");
assert_eq!(songs.len(), 1);
assert!(songs.contains(&PathBuf::from("whales in space.mp3")));
let songs = ctx.search("space whale");
assert_eq!(songs.len(), 1);
assert!(songs.contains(&PathBuf::from("whales in space.mp3")));
}
#[test]
fn can_use_or_operator() {
let ctx = setup_test(vec![
scanner::Song {
virtual_path: PathBuf::from("whale.mp3"),
..Default::default()
},
scanner::Song {
virtual_path: PathBuf::from("space.mp3"),
..Default::default()
},
scanner::Song {
virtual_path: PathBuf::from("whales in space.mp3"),
..Default::default()
},
]);
let songs = ctx.search("space || whale");
assert_eq!(songs.len(), 3);
assert!(songs.contains(&PathBuf::from("whale.mp3")));
assert!(songs.contains(&PathBuf::from("space.mp3")));
assert!(songs.contains(&PathBuf::from("whales in space.mp3")));
}
#[test]
fn can_use_not_operator() {
let ctx = setup_test(vec![
scanner::Song {
virtual_path: PathBuf::from("whale.mp3"),
..Default::default()
},
scanner::Song {
virtual_path: PathBuf::from("space.mp3"),
..Default::default()
},
scanner::Song {
virtual_path: PathBuf::from("whales in space.mp3"),
..Default::default()
},
]);
let songs = ctx.search("whale !! space");
assert_eq!(songs.len(), 1);
assert!(songs.contains(&PathBuf::from("whale.mp3")));
}
#[test]
fn results_are_sorted() {
let ctx = setup_test(vec![
scanner::Song {
virtual_path: PathBuf::from("accented.mp3"),
artists: vec!["à la maison".to_owned()],
genres: vec!["Metal".to_owned()],
..Default::default()
},
scanner::Song {
virtual_path: PathBuf::from("cry thunder.mp3"),
artists: vec!["Dragonforce".to_owned()],
album: Some("The Power Within".to_owned()),
year: Some(2012),
genres: vec!["Metal".to_owned()],
..Default::default()
},
scanner::Song {
virtual_path: PathBuf::from("revelations.mp3"),
artists: vec!["Dragonforce".to_owned()],
album: Some("Valley of the Damned".to_owned()),
year: Some(2003),
track_number: Some(7),
genres: vec!["Metal".to_owned()],
..Default::default()
},
scanner::Song {
virtual_path: PathBuf::from("starfire.mp3"),
artists: vec!["Dragonforce".to_owned()],
album: Some("Valley of the Damned".to_owned()),
year: Some(2003),
track_number: Some(5),
genres: vec!["Metal".to_owned()],
..Default::default()
},
scanner::Song {
virtual_path: PathBuf::from("eternal snow.mp3"),
artists: vec!["Rhapsody".to_owned()],
genres: vec!["Metal".to_owned()],
..Default::default()
},
scanner::Song {
virtual_path: PathBuf::from("alchemy.mp3"),
artists: vec!["Avantasia".to_owned()],
genres: vec!["Metal".to_owned()],
..Default::default()
},
]);
let songs = ctx.search("metal");
assert_eq!(songs.len(), 6);
assert_eq!(
songs,
vec![
PathBuf::from("accented.mp3"),
PathBuf::from("alchemy.mp3"),
PathBuf::from("starfire.mp3"),
PathBuf::from("revelations.mp3"),
PathBuf::from("cry thunder.mp3"),
PathBuf::from("eternal snow.mp3"),
]
);
}
#[test]
fn avoids_bigram_false_positives() {
let ctx = setup_test(vec![scanner::Song {
virtual_path: PathBuf::from("lorry bovine vehicle.mp3"),
..Default::default()
}]);
let songs = ctx.search("love");
assert!(songs.is_empty());
}
#[test]
fn ignores_single_letter_components() {
let ctx = setup_test(vec![scanner::Song {
virtual_path: PathBuf::from("seasons.mp3"),
..Default::default()
}]);
let songs = ctx.search("seas u");
assert_eq!(songs.len(), 1);
let songs = ctx.search("seas 2");
assert_eq!(songs.len(), 1);
let songs = ctx.search("seas || u");
assert_eq!(songs.len(), 1);
let songs = ctx.search("seas || 2");
assert_eq!(songs.len(), 1);
}
}

256
src/app/index/storage.rs Normal file
View file

@ -0,0 +1,256 @@
use std::{
collections::{HashMap, HashSet},
path::{Path, PathBuf},
};
use lasso2::Spur;
use log::error;
use serde::{Deserialize, Serialize};
use tinyvec::TinyVec;
use crate::app::scanner;
use crate::app::index::dictionary::{self, Dictionary};
#[derive(Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
pub enum File {
Directory(PathKey),
Song(PathKey),
}
#[derive(Clone, Serialize, Deserialize)]
pub struct Genre {
pub name: Spur,
pub albums: HashSet<AlbumKey>,
pub artists: HashSet<ArtistKey>,
pub related_genres: HashMap<GenreKey, u32>,
pub songs: Vec<SongKey>,
}
#[derive(Clone, Serialize, Deserialize)]
pub struct Artist {
pub name: Spur,
pub all_albums: HashSet<AlbumKey>,
pub albums_as_performer: HashSet<AlbumKey>,
pub albums_as_additional_performer: HashSet<AlbumKey>,
pub albums_as_composer: HashSet<AlbumKey>,
pub albums_as_lyricist: HashSet<AlbumKey>,
pub num_songs_by_genre: HashMap<Spur, u32>,
pub num_songs: u32,
}
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct Album {
pub name: Spur,
pub artwork: Option<PathKey>,
pub artists: TinyVec<[ArtistKey; 1]>,
pub year: Option<i64>,
pub date_added: i64,
pub songs: HashSet<SongKey>,
}
#[derive(Clone, Serialize, Deserialize)]
pub struct Song {
pub real_path: PathKey,
pub virtual_path: PathKey,
pub track_number: Option<i64>,
pub disc_number: Option<i64>,
pub title: Option<Spur>,
pub artists: TinyVec<[ArtistKey; 1]>,
pub album_artists: TinyVec<[ArtistKey; 1]>,
pub year: Option<i64>,
pub album: Option<Spur>,
pub artwork: Option<PathKey>,
pub duration: Option<i64>,
pub lyricists: TinyVec<[ArtistKey; 0]>,
pub composers: TinyVec<[ArtistKey; 0]>,
pub genres: TinyVec<[Spur; 1]>,
pub labels: TinyVec<[Spur; 0]>,
pub date_added: i64,
}
#[derive(
Copy, Clone, Debug, Default, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize,
)]
pub struct PathKey(pub Spur);
#[derive(Copy, Clone, Debug, Default, Eq, Hash, PartialEq, Serialize, Deserialize)]
pub struct GenreKey(pub Spur);
#[derive(Copy, Clone, Debug, Default, Eq, Hash, PartialEq, Serialize, Deserialize)]
pub struct ArtistKey(pub Spur);
#[derive(Clone, Debug, Eq, Hash, PartialEq, Serialize, Deserialize)]
pub struct AlbumKey {
pub artists: TinyVec<[ArtistKey; 4]>,
pub name: Spur,
}
#[derive(Copy, Clone, Debug, Default, Eq, Hash, PartialEq, Serialize, Deserialize)]
pub struct SongKey {
pub virtual_path: PathKey,
}
impl nohash_hasher::IsEnabled for SongKey {}
impl Song {
pub fn album_key(&self) -> Option<AlbumKey> {
let main_artists = match self.album_artists.is_empty() {
true => &self.artists,
false => &self.album_artists,
};
if main_artists.is_empty() {
return None;
}
match self.album {
None => None,
Some(name) => Some(AlbumKey {
artists: main_artists.iter().cloned().collect(),
name,
}),
}
}
}
pub fn store_song(
dictionary_builder: &mut dictionary::Builder,
song: &scanner::Song,
) -> Option<Song> {
let Some(real_path) = (&song.real_path).get_or_intern(dictionary_builder) else {
return None;
};
let Some(virtual_path) = (&song.virtual_path).get_or_intern(dictionary_builder) else {
return None;
};
let artwork = match &song.artwork {
Some(a) => match a.get_or_intern(dictionary_builder) {
Some(a) => Some(a),
None => return None,
},
None => None,
};
let mut canonicalize = |s: &String| dictionary_builder.get_or_intern_canon(s);
Some(Song {
real_path,
virtual_path,
track_number: song.track_number,
disc_number: song.disc_number,
title: song.title.as_ref().and_then(&mut canonicalize),
artists: song
.artists
.iter()
.filter_map(&mut canonicalize)
.map(|k| ArtistKey(k))
.collect(),
album_artists: song
.album_artists
.iter()
.filter_map(&mut canonicalize)
.map(|k| ArtistKey(k))
.collect(),
year: song.year,
album: song.album.as_ref().and_then(&mut canonicalize),
artwork: artwork,
duration: song.duration,
lyricists: song
.lyricists
.iter()
.filter_map(&mut canonicalize)
.map(|k| ArtistKey(k))
.collect(),
composers: song
.composers
.iter()
.filter_map(&mut canonicalize)
.map(|k| ArtistKey(k))
.collect(),
genres: song.genres.iter().filter_map(&mut canonicalize).collect(),
labels: song.labels.iter().filter_map(&mut canonicalize).collect(),
date_added: song.date_added,
})
}
pub fn fetch_song(dictionary: &Dictionary, song: &Song) -> super::Song {
super::Song {
real_path: PathBuf::from(dictionary.resolve(&song.real_path.0)),
virtual_path: PathBuf::from(dictionary.resolve(&song.virtual_path.0)),
track_number: song.track_number,
disc_number: song.disc_number,
title: song.title.map(|s| dictionary.resolve(&s).to_string()),
artists: song
.artists
.iter()
.map(|k| dictionary.resolve(&k.0).to_string())
.collect(),
album_artists: song
.album_artists
.iter()
.map(|k| dictionary.resolve(&k.0).to_string())
.collect(),
year: song.year,
album: song.album.map(|s| dictionary.resolve(&s).to_string()),
artwork: song
.artwork
.map(|a| PathBuf::from(dictionary.resolve(&a.0))),
duration: song.duration,
lyricists: song
.lyricists
.iter()
.map(|k| dictionary.resolve(&k.0).to_string())
.collect(),
composers: song
.composers
.iter()
.map(|k| dictionary.resolve(&k.0).to_string())
.collect(),
genres: song
.genres
.iter()
.map(|s| dictionary.resolve(&s).to_string())
.collect(),
labels: song
.labels
.iter()
.map(|s| dictionary.resolve(&s).to_string())
.collect(),
date_added: song.date_added,
}
}
pub trait InternPath {
fn get_or_intern(self, dictionary: &mut dictionary::Builder) -> Option<PathKey>;
fn get(self, dictionary: &Dictionary) -> Option<PathKey>;
}
impl<P: AsRef<Path>> InternPath for P {
fn get_or_intern(self, dictionary: &mut dictionary::Builder) -> Option<PathKey> {
let id = self
.as_ref()
.as_os_str()
.to_str()
.map(|s| dictionary.get_or_intern(s))
.map(PathKey);
if id.is_none() {
error!("Unsupported path: `{}`", self.as_ref().to_string_lossy());
}
id
}
fn get(self, dictionary: &Dictionary) -> Option<PathKey> {
let id = self
.as_ref()
.as_os_str()
.to_str()
.and_then(|s| dictionary.get(s))
.map(PathKey);
if id.is_none() {
error!("Unsupported path: `{}`", self.as_ref().to_string_lossy());
}
id
}
}

View file

@ -1,233 +0,0 @@
use diesel::prelude::*;
use std::default::Default;
use std::path::{Path, PathBuf};
use super::*;
use crate::app::test;
use crate::db::{directories, songs};
use crate::test_name;
const TEST_MOUNT_NAME: &str = "root";
#[test]
fn update_adds_new_content() {
let ctx = test::ContextBuilder::new(test_name!())
.mount(TEST_MOUNT_NAME, "test-data/small-collection")
.build();
ctx.index.update().unwrap();
ctx.index.update().unwrap(); // Validates that subsequent updates don't run into conflicts
let mut connection = ctx.db.connect().unwrap();
let all_directories: Vec<Directory> = directories::table.load(&mut connection).unwrap();
let all_songs: Vec<Song> = songs::table.load(&mut connection).unwrap();
assert_eq!(all_directories.len(), 6);
assert_eq!(all_songs.len(), 13);
}
#[test]
fn update_removes_missing_content() {
let builder = test::ContextBuilder::new(test_name!());
let original_collection_dir: PathBuf = ["test-data", "small-collection"].iter().collect();
let test_collection_dir: PathBuf = builder.test_directory.join("small-collection");
let copy_options = fs_extra::dir::CopyOptions::new();
fs_extra::dir::copy(
original_collection_dir,
&builder.test_directory,
&copy_options,
)
.unwrap();
let ctx = builder
.mount(TEST_MOUNT_NAME, test_collection_dir.to_str().unwrap())
.build();
ctx.index.update().unwrap();
{
let mut connection = ctx.db.connect().unwrap();
let all_directories: Vec<Directory> = directories::table.load(&mut connection).unwrap();
let all_songs: Vec<Song> = songs::table.load(&mut connection).unwrap();
assert_eq!(all_directories.len(), 6);
assert_eq!(all_songs.len(), 13);
}
let khemmis_directory = test_collection_dir.join("Khemmis");
std::fs::remove_dir_all(khemmis_directory).unwrap();
ctx.index.update().unwrap();
{
let mut connection = ctx.db.connect().unwrap();
let all_directories: Vec<Directory> = directories::table.load(&mut connection).unwrap();
let all_songs: Vec<Song> = songs::table.load(&mut connection).unwrap();
assert_eq!(all_directories.len(), 4);
assert_eq!(all_songs.len(), 8);
}
}
#[test]
fn can_browse_top_level() {
let ctx = test::ContextBuilder::new(test_name!())
.mount(TEST_MOUNT_NAME, "test-data/small-collection")
.build();
ctx.index.update().unwrap();
let root_path = Path::new(TEST_MOUNT_NAME);
let files = ctx.index.browse(Path::new("")).unwrap();
assert_eq!(files.len(), 1);
match files[0] {
CollectionFile::Directory(ref d) => assert_eq!(d.path, root_path.to_str().unwrap()),
_ => panic!("Expected directory"),
}
}
#[test]
fn can_browse_directory() {
let khemmis_path: PathBuf = [TEST_MOUNT_NAME, "Khemmis"].iter().collect();
let tobokegao_path: PathBuf = [TEST_MOUNT_NAME, "Tobokegao"].iter().collect();
let ctx = test::ContextBuilder::new(test_name!())
.mount(TEST_MOUNT_NAME, "test-data/small-collection")
.build();
ctx.index.update().unwrap();
let files = ctx.index.browse(Path::new(TEST_MOUNT_NAME)).unwrap();
assert_eq!(files.len(), 2);
match files[0] {
CollectionFile::Directory(ref d) => assert_eq!(d.path, khemmis_path.to_str().unwrap()),
_ => panic!("Expected directory"),
}
match files[1] {
CollectionFile::Directory(ref d) => assert_eq!(d.path, tobokegao_path.to_str().unwrap()),
_ => panic!("Expected directory"),
}
}
#[test]
fn can_flatten_root() {
let ctx = test::ContextBuilder::new(test_name!())
.mount(TEST_MOUNT_NAME, "test-data/small-collection")
.build();
ctx.index.update().unwrap();
let songs = ctx.index.flatten(Path::new(TEST_MOUNT_NAME)).unwrap();
assert_eq!(songs.len(), 13);
assert_eq!(songs[0].title, Some("Above The Water".to_owned()));
}
#[test]
fn can_flatten_directory() {
let ctx = test::ContextBuilder::new(test_name!())
.mount(TEST_MOUNT_NAME, "test-data/small-collection")
.build();
ctx.index.update().unwrap();
let path: PathBuf = [TEST_MOUNT_NAME, "Tobokegao"].iter().collect();
let songs = ctx.index.flatten(path).unwrap();
assert_eq!(songs.len(), 8);
}
#[test]
fn can_flatten_directory_with_shared_prefix() {
let ctx = test::ContextBuilder::new(test_name!())
.mount(TEST_MOUNT_NAME, "test-data/small-collection")
.build();
ctx.index.update().unwrap();
let path: PathBuf = [TEST_MOUNT_NAME, "Tobokegao", "Picnic"].iter().collect(); // Prefix of '(Picnic Remixes)'
let songs = ctx.index.flatten(path).unwrap();
assert_eq!(songs.len(), 7);
}
#[test]
fn can_get_random_albums() {
let ctx = test::ContextBuilder::new(test_name!())
.mount(TEST_MOUNT_NAME, "test-data/small-collection")
.build();
ctx.index.update().unwrap();
let albums = ctx.index.get_random_albums(1).unwrap();
assert_eq!(albums.len(), 1);
}
#[test]
fn can_get_recent_albums() {
let ctx = test::ContextBuilder::new(test_name!())
.mount(TEST_MOUNT_NAME, "test-data/small-collection")
.build();
ctx.index.update().unwrap();
let albums = ctx.index.get_recent_albums(2).unwrap();
assert_eq!(albums.len(), 2);
assert!(albums[0].date_added >= albums[1].date_added);
}
#[test]
fn can_get_a_song() {
let ctx = test::ContextBuilder::new(test_name!())
.mount(TEST_MOUNT_NAME, "test-data/small-collection")
.build();
ctx.index.update().unwrap();
let picnic_virtual_dir: PathBuf = [TEST_MOUNT_NAME, "Tobokegao", "Picnic"].iter().collect();
let song_virtual_path = picnic_virtual_dir.join("05 - シャーベット (Sherbet).mp3");
let artwork_virtual_path = picnic_virtual_dir.join("Folder.png");
let song = ctx.index.get_song(&song_virtual_path).unwrap();
assert_eq!(song.path, song_virtual_path.to_string_lossy().as_ref());
assert_eq!(song.track_number, Some(5));
assert_eq!(song.disc_number, None);
assert_eq!(song.title, Some("シャーベット (Sherbet)".to_owned()));
assert_eq!(song.artist, Some("Tobokegao".to_owned()));
assert_eq!(song.album_artist, None);
assert_eq!(song.album, Some("Picnic".to_owned()));
assert_eq!(song.year, Some(2016));
assert_eq!(
song.artwork,
Some(artwork_virtual_path.to_string_lossy().into_owned())
);
}
#[test]
fn indexes_embedded_artwork() {
let ctx = test::ContextBuilder::new(test_name!())
.mount(TEST_MOUNT_NAME, "test-data/small-collection")
.build();
ctx.index.update().unwrap();
let picnic_virtual_dir: PathBuf = [TEST_MOUNT_NAME, "Tobokegao", "Picnic"].iter().collect();
let song_virtual_path = picnic_virtual_dir.join("07 - なぜ (Why).mp3");
let song = ctx.index.get_song(&song_virtual_path).unwrap();
assert_eq!(
song.artwork,
Some(song_virtual_path.to_string_lossy().into_owned())
);
}
#[test]
fn album_art_pattern_is_case_insensitive() {
let ctx = test::ContextBuilder::new(test_name!())
.mount(TEST_MOUNT_NAME, "test-data/small-collection")
.build();
let patterns = vec!["folder", "FOLDER"];
for pattern in patterns.into_iter() {
ctx.settings_manager
.amend(&settings::NewSettings {
album_art_pattern: Some(pattern.to_owned()),
..Default::default()
})
.unwrap();
ctx.index.update().unwrap();
let hunted_virtual_dir: PathBuf = [TEST_MOUNT_NAME, "Khemmis", "Hunted"].iter().collect();
let artwork_virtual_path = hunted_virtual_dir.join("Folder.jpg");
let song = &ctx.index.flatten(&hunted_virtual_dir).unwrap()[0];
assert_eq!(
song.artwork,
Some(artwork_virtual_path.to_string_lossy().into_owned())
);
}
}

View file

@ -1,80 +0,0 @@
use serde::{Deserialize, Serialize};
use std::path::Path;
use crate::app::vfs::VFS;
use crate::db::songs;
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)]
pub enum CollectionFile {
Directory(Directory),
Song(Song),
}
#[derive(Debug, PartialEq, Eq, Queryable, QueryableByName, Serialize, Deserialize)]
#[diesel(table_name = songs)]
pub struct Song {
#[serde(skip_serializing, skip_deserializing)]
id: i32,
pub path: String,
#[serde(skip_serializing, skip_deserializing)]
pub parent: String,
pub track_number: Option<i32>,
pub disc_number: Option<i32>,
pub title: Option<String>,
pub artist: Option<String>,
pub album_artist: Option<String>,
pub year: Option<i32>,
pub album: Option<String>,
pub artwork: Option<String>,
pub duration: Option<i32>,
pub lyricist: Option<String>,
pub composer: Option<String>,
pub genre: Option<String>,
pub label: Option<String>,
}
impl Song {
pub fn virtualize(mut self, vfs: &VFS) -> Option<Song> {
self.path = match vfs.real_to_virtual(Path::new(&self.path)) {
Ok(p) => p.to_string_lossy().into_owned(),
_ => return None,
};
if let Some(artwork_path) = self.artwork {
self.artwork = match vfs.real_to_virtual(Path::new(&artwork_path)) {
Ok(p) => Some(p.to_string_lossy().into_owned()),
_ => None,
};
}
Some(self)
}
}
#[derive(Debug, PartialEq, Eq, Queryable, Serialize, Deserialize)]
pub struct Directory {
#[serde(skip_serializing, skip_deserializing)]
id: i32,
pub path: String,
#[serde(skip_serializing, skip_deserializing)]
pub parent: Option<String>,
pub artist: Option<String>,
pub year: Option<i32>,
pub album: Option<String>,
pub artwork: Option<String>,
pub date_added: i32,
}
impl Directory {
pub fn virtualize(mut self, vfs: &VFS) -> Option<Directory> {
self.path = match vfs.real_to_virtual(Path::new(&self.path)) {
Ok(p) => p.to_string_lossy().into_owned(),
_ => return None,
};
if let Some(artwork_path) = self.artwork {
self.artwork = match vfs.real_to_virtual(Path::new(&artwork_path)) {
Ok(p) => Some(p.to_string_lossy().into_owned()),
_ => None,
};
}
Some(self)
}
}

View file

@ -1,79 +0,0 @@
use log::{error, info};
use std::time;
mod cleaner;
mod collector;
mod inserter;
mod traverser;
use crate::app::index::Index;
use crate::app::vfs;
use crate::db;
use cleaner::Cleaner;
use collector::Collector;
use inserter::Inserter;
use traverser::Traverser;
#[derive(thiserror::Error, Debug)]
pub enum Error {
#[error(transparent)]
IndexClean(#[from] cleaner::Error),
#[error(transparent)]
Database(#[from] diesel::result::Error),
#[error(transparent)]
DatabaseConnection(#[from] db::Error),
#[error(transparent)]
Vfs(#[from] vfs::Error),
}
impl Index {
pub fn update(&self) -> Result<(), Error> {
let start = time::Instant::now();
info!("Beginning library index update");
let album_art_pattern = self.settings_manager.get_index_album_art_pattern().ok();
let cleaner = Cleaner::new(self.db.clone(), self.vfs_manager.clone());
cleaner.clean()?;
let (insert_sender, insert_receiver) = crossbeam_channel::unbounded();
let inserter_db = self.db.clone();
let insertion_thread = std::thread::spawn(move || {
let mut inserter = Inserter::new(inserter_db, insert_receiver);
inserter.insert();
});
let (collect_sender, collect_receiver) = crossbeam_channel::unbounded();
let collector_thread = std::thread::spawn(move || {
let collector = Collector::new(collect_receiver, insert_sender, album_art_pattern);
collector.collect();
});
let vfs = self.vfs_manager.get_vfs()?;
let traverser_thread = std::thread::spawn(move || {
let mounts = vfs.mounts();
let traverser = Traverser::new(collect_sender);
traverser.traverse(mounts.iter().map(|p| p.source.clone()).collect());
});
if let Err(e) = traverser_thread.join() {
error!("Error joining on traverser thread: {:?}", e);
}
if let Err(e) = collector_thread.join() {
error!("Error joining on collector thread: {:?}", e);
}
if let Err(e) = insertion_thread.join() {
error!("Error joining on inserter thread: {:?}", e);
}
info!(
"Library index update took {} seconds",
start.elapsed().as_millis() as f32 / 1000.0
);
Ok(())
}
}

View file

@ -1,85 +0,0 @@
use diesel::prelude::*;
use rayon::prelude::*;
use std::path::Path;
use crate::app::vfs;
use crate::db::{self, directories, songs, DB};
const INDEX_BUILDING_CLEAN_BUFFER_SIZE: usize = 500; // Deletions in each transaction
#[derive(thiserror::Error, Debug)]
pub enum Error {
#[error(transparent)]
Database(#[from] diesel::result::Error),
#[error(transparent)]
DatabaseConnection(#[from] db::Error),
#[error(transparent)]
ThreadPoolBuilder(#[from] rayon::ThreadPoolBuildError),
#[error(transparent)]
Vfs(#[from] vfs::Error),
}
pub struct Cleaner {
db: DB,
vfs_manager: vfs::Manager,
}
impl Cleaner {
pub fn new(db: DB, vfs_manager: vfs::Manager) -> Self {
Self { db, vfs_manager }
}
pub fn clean(&self) -> Result<(), Error> {
let vfs = self.vfs_manager.get_vfs()?;
let all_directories: Vec<String> = {
let mut connection = self.db.connect()?;
directories::table
.select(directories::path)
.load(&mut connection)?
};
let all_songs: Vec<String> = {
let mut connection = self.db.connect()?;
songs::table.select(songs::path).load(&mut connection)?
};
let list_missing_directories = || {
all_directories
.par_iter()
.filter(|ref directory_path| {
let path = Path::new(&directory_path);
!path.exists() || vfs.real_to_virtual(path).is_err()
})
.collect::<Vec<_>>()
};
let list_missing_songs = || {
all_songs
.par_iter()
.filter(|ref song_path| {
let path = Path::new(&song_path);
!path.exists() || vfs.real_to_virtual(path).is_err()
})
.collect::<Vec<_>>()
};
let thread_pool = rayon::ThreadPoolBuilder::new().build()?;
let (missing_directories, missing_songs) =
thread_pool.join(list_missing_directories, list_missing_songs);
{
let mut connection = self.db.connect()?;
for chunk in missing_directories[..].chunks(INDEX_BUILDING_CLEAN_BUFFER_SIZE) {
diesel::delete(directories::table.filter(directories::path.eq_any(chunk)))
.execute(&mut connection)?;
}
for chunk in missing_songs[..].chunks(INDEX_BUILDING_CLEAN_BUFFER_SIZE) {
diesel::delete(songs::table.filter(songs::path.eq_any(chunk)))
.execute(&mut connection)?;
}
}
Ok(())
}
}

View file

@ -1,149 +0,0 @@
use crossbeam_channel::{Receiver, Sender};
use log::error;
use regex::Regex;
use super::*;
pub struct Collector {
receiver: Receiver<traverser::Directory>,
sender: Sender<inserter::Item>,
album_art_pattern: Option<Regex>,
}
impl Collector {
pub fn new(
receiver: Receiver<traverser::Directory>,
sender: Sender<inserter::Item>,
album_art_pattern: Option<Regex>,
) -> Self {
Self {
receiver,
sender,
album_art_pattern,
}
}
pub fn collect(&self) {
while let Ok(directory) = self.receiver.recv() {
self.collect_directory(directory);
}
}
fn collect_directory(&self, directory: traverser::Directory) {
let mut directory_album = None;
let mut directory_year = None;
let mut directory_artist = None;
let mut inconsistent_directory_album = false;
let mut inconsistent_directory_year = false;
let mut inconsistent_directory_artist = false;
let directory_artwork = self.get_artwork(&directory);
let directory_path_string = directory.path.to_string_lossy().to_string();
let directory_parent_string = directory.parent.map(|p| p.to_string_lossy().to_string());
for song in directory.songs {
let tags = song.metadata;
let path_string = song.path.to_string_lossy().to_string();
if tags.year.is_some() {
inconsistent_directory_year |=
directory_year.is_some() && directory_year != tags.year;
directory_year = tags.year;
}
if tags.album.is_some() {
inconsistent_directory_album |=
directory_album.is_some() && directory_album != tags.album;
directory_album = tags.album.as_ref().cloned();
}
if tags.album_artist.is_some() {
inconsistent_directory_artist |=
directory_artist.is_some() && directory_artist != tags.album_artist;
directory_artist = tags.album_artist.as_ref().cloned();
} else if tags.artist.is_some() {
inconsistent_directory_artist |=
directory_artist.is_some() && directory_artist != tags.artist;
directory_artist = tags.artist.as_ref().cloned();
}
let artwork_path = if tags.has_artwork {
Some(path_string.clone())
} else {
directory_artwork.as_ref().cloned()
};
if let Err(e) = self.sender.send(inserter::Item::Song(inserter::Song {
path: path_string,
parent: directory_path_string.clone(),
disc_number: tags.disc_number.map(|n| n as i32),
track_number: tags.track_number.map(|n| n as i32),
title: tags.title,
duration: tags.duration.map(|n| n as i32),
artist: tags.artist,
album_artist: tags.album_artist,
album: tags.album,
year: tags.year,
artwork: artwork_path,
lyricist: tags.lyricist,
composer: tags.composer,
genre: tags.genre,
label: tags.label,
})) {
error!("Error while sending song from collector: {}", e);
}
}
if inconsistent_directory_year {
directory_year = None;
}
if inconsistent_directory_album {
directory_album = None;
}
if inconsistent_directory_artist {
directory_artist = None;
}
if let Err(e) = self
.sender
.send(inserter::Item::Directory(inserter::Directory {
path: directory_path_string,
parent: directory_parent_string,
artwork: directory_artwork,
album: directory_album,
artist: directory_artist,
year: directory_year,
date_added: directory.created,
})) {
error!("Error while sending directory from collector: {}", e);
}
}
fn get_artwork(&self, directory: &traverser::Directory) -> Option<String> {
let regex_artwork = directory.other_files.iter().find_map(|path| {
let matches = path
.file_name()
.and_then(|name| name.to_str())
.map(|name| match &self.album_art_pattern {
Some(pattern) => pattern.is_match(name),
None => false,
})
.unwrap_or(false);
if matches {
Some(path.to_string_lossy().to_string())
} else {
None
}
});
let embedded_artwork = directory.songs.iter().find_map(|song| {
if song.metadata.has_artwork {
Some(song.path.to_string_lossy().to_string())
} else {
None
}
});
regex_artwork.or(embedded_artwork)
}
}

View file

@ -1,124 +0,0 @@
use crossbeam_channel::Receiver;
use diesel::prelude::*;
use log::error;
use crate::db::{directories, songs, DB};
const INDEX_BUILDING_INSERT_BUFFER_SIZE: usize = 1000; // Insertions in each transaction
#[derive(Debug, Insertable)]
#[diesel(table_name = songs)]
pub struct Song {
pub path: String,
pub parent: String,
pub track_number: Option<i32>,
pub disc_number: Option<i32>,
pub title: Option<String>,
pub artist: Option<String>,
pub album_artist: Option<String>,
pub year: Option<i32>,
pub album: Option<String>,
pub artwork: Option<String>,
pub duration: Option<i32>,
pub lyricist: Option<String>,
pub composer: Option<String>,
pub genre: Option<String>,
pub label: Option<String>,
}
#[derive(Debug, Insertable)]
#[diesel(table_name = directories)]
pub struct Directory {
pub path: String,
pub parent: Option<String>,
pub artist: Option<String>,
pub year: Option<i32>,
pub album: Option<String>,
pub artwork: Option<String>,
pub date_added: i32,
}
pub enum Item {
Directory(Directory),
Song(Song),
}
pub struct Inserter {
receiver: Receiver<Item>,
new_directories: Vec<Directory>,
new_songs: Vec<Song>,
db: DB,
}
impl Inserter {
pub fn new(db: DB, receiver: Receiver<Item>) -> Self {
let new_directories = Vec::with_capacity(INDEX_BUILDING_INSERT_BUFFER_SIZE);
let new_songs = Vec::with_capacity(INDEX_BUILDING_INSERT_BUFFER_SIZE);
Self {
receiver,
new_directories,
new_songs,
db,
}
}
pub fn insert(&mut self) {
while let Ok(item) = self.receiver.recv() {
self.insert_item(item);
}
}
fn insert_item(&mut self, insert: Item) {
match insert {
Item::Directory(d) => {
self.new_directories.push(d);
if self.new_directories.len() >= INDEX_BUILDING_INSERT_BUFFER_SIZE {
self.flush_directories();
}
}
Item::Song(s) => {
self.new_songs.push(s);
if self.new_songs.len() >= INDEX_BUILDING_INSERT_BUFFER_SIZE {
self.flush_songs();
}
}
};
}
fn flush_directories(&mut self) {
let res = self.db.connect().ok().and_then(|mut connection| {
diesel::insert_into(directories::table)
.values(&self.new_directories)
.execute(&mut *connection) // TODO https://github.com/diesel-rs/diesel/issues/1822
.ok()
});
if res.is_none() {
error!("Could not insert new directories in database");
}
self.new_directories.clear();
}
fn flush_songs(&mut self) {
let res = self.db.connect().ok().and_then(|mut connection| {
diesel::insert_into(songs::table)
.values(&self.new_songs)
.execute(&mut *connection) // TODO https://github.com/diesel-rs/diesel/issues/1822
.ok()
});
if res.is_none() {
error!("Could not insert new songs in database");
}
self.new_songs.clear();
}
}
impl Drop for Inserter {
fn drop(&mut self) {
if !self.new_directories.is_empty() {
self.flush_directories();
}
if !self.new_songs.is_empty() {
self.flush_songs();
}
}
}

View file

@ -1,202 +0,0 @@
use crossbeam_channel::{self, Receiver, Sender};
use log::{error, info};
use std::cmp::min;
use std::fs;
use std::path::{Path, PathBuf};
use std::str::FromStr;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
use std::thread;
use std::time::Duration;
use crate::app::index::metadata::{self, SongTags};
#[derive(Debug)]
pub struct Song {
pub path: PathBuf,
pub metadata: SongTags,
}
#[derive(Debug)]
pub struct Directory {
pub parent: Option<PathBuf>,
pub path: PathBuf,
pub songs: Vec<Song>,
pub other_files: Vec<PathBuf>,
pub created: i32,
}
pub struct Traverser {
directory_sender: Sender<Directory>,
}
#[derive(Debug)]
struct WorkItem {
parent: Option<PathBuf>,
path: PathBuf,
}
impl Traverser {
pub fn new(directory_sender: Sender<Directory>) -> Self {
Self { directory_sender }
}
pub fn traverse(&self, roots: Vec<PathBuf>) {
let num_pending_work_items = Arc::new(AtomicUsize::new(roots.len()));
let (work_item_sender, work_item_receiver) = crossbeam_channel::unbounded();
let key = "POLARIS_NUM_TRAVERSER_THREADS";
let num_threads = std::env::var_os(key)
.map(|v| v.to_string_lossy().to_string())
.and_then(|v| usize::from_str(&v).ok())
.unwrap_or_else(|| min(num_cpus::get(), 4));
info!("Browsing collection using {} threads", num_threads);
let mut threads = Vec::new();
for _ in 0..num_threads {
let work_item_sender = work_item_sender.clone();
let work_item_receiver = work_item_receiver.clone();
let directory_sender = self.directory_sender.clone();
let num_pending_work_items = num_pending_work_items.clone();
threads.push(thread::spawn(move || {
let worker = Worker {
work_item_sender,
work_item_receiver,
directory_sender,
num_pending_work_items,
};
worker.run();
}));
}
for root in roots {
let work_item = WorkItem {
parent: None,
path: root,
};
if let Err(e) = work_item_sender.send(work_item) {
error!("Error initializing traverser: {:#?}", e);
}
}
for thread in threads {
if let Err(e) = thread.join() {
error!("Error joining on traverser worker thread: {:#?}", e);
}
}
}
}
struct Worker {
work_item_sender: Sender<WorkItem>,
work_item_receiver: Receiver<WorkItem>,
directory_sender: Sender<Directory>,
num_pending_work_items: Arc<AtomicUsize>,
}
impl Worker {
fn run(&self) {
while let Some(work_item) = self.find_work_item() {
self.process_work_item(work_item);
self.on_item_processed();
}
}
fn find_work_item(&self) -> Option<WorkItem> {
loop {
if self.is_all_work_done() {
return None;
}
if let Ok(w) = self
.work_item_receiver
.recv_timeout(Duration::from_millis(100))
{
return Some(w);
}
}
}
fn is_all_work_done(&self) -> bool {
self.num_pending_work_items.load(Ordering::SeqCst) == 0
}
fn queue_work(&self, work_item: WorkItem) {
self.num_pending_work_items.fetch_add(1, Ordering::SeqCst);
self.work_item_sender.send(work_item).unwrap();
}
fn on_item_processed(&self) {
self.num_pending_work_items.fetch_sub(1, Ordering::SeqCst);
}
fn emit_directory(&self, directory: Directory) {
self.directory_sender.send(directory).unwrap();
}
pub fn process_work_item(&self, work_item: WorkItem) {
let read_dir = match fs::read_dir(&work_item.path) {
Ok(read_dir) => read_dir,
Err(e) => {
error!(
"Directory read error for `{}`: {}",
work_item.path.display(),
e
);
return;
}
};
let mut sub_directories = Vec::new();
let mut songs = Vec::new();
let mut other_files = Vec::new();
for entry in read_dir {
let path = match entry {
Ok(ref f) => f.path(),
Err(e) => {
error!(
"File read error within `{}`: {}",
work_item.path.display(),
e
);
break;
}
};
if path.is_dir() {
sub_directories.push(path);
} else if let Some(metadata) = metadata::read(&path) {
songs.push(Song { path, metadata });
} else {
other_files.push(path);
}
}
let created = Self::get_date_created(&work_item.path).unwrap_or_default();
self.emit_directory(Directory {
path: work_item.path.to_owned(),
parent: work_item.parent,
songs,
other_files,
created,
});
for sub_directory in sub_directories.into_iter() {
self.queue_work(WorkItem {
parent: Some(work_item.path.clone()),
path: sub_directory,
});
}
}
fn get_date_created(path: &Path) -> Option<i32> {
if let Ok(t) = fs::metadata(path).and_then(|m| m.created().or_else(|_| m.modified())) {
t.duration_since(std::time::UNIX_EPOCH)
.map(|d| d.as_secs() as i32)
.ok()
} else {
None
}
}
}

View file

@ -1,92 +0,0 @@
use rustfm_scrobble::{Scrobble, Scrobbler};
use std::path::Path;
use user::AuthToken;
use crate::app::{
index::{Index, QueryError},
user,
};
const LASTFM_API_KEY: &str = "02b96c939a2b451c31dfd67add1f696e";
const LASTFM_API_SECRET: &str = "0f25a80ceef4b470b5cb97d99d4b3420";
#[derive(thiserror::Error, Debug)]
pub enum Error {
#[error("Failed to authenticate with last.fm")]
ScrobblerAuthentication(rustfm_scrobble::ScrobblerError),
#[error("Failed to emit last.fm scrobble")]
Scrobble(rustfm_scrobble::ScrobblerError),
#[error("Failed to emit last.fm now playing update")]
NowPlaying(rustfm_scrobble::ScrobblerError),
#[error(transparent)]
Query(#[from] QueryError),
#[error(transparent)]
User(#[from] user::Error),
}
#[derive(Clone)]
pub struct Manager {
index: Index,
user_manager: user::Manager,
}
impl Manager {
pub fn new(index: Index, user_manager: user::Manager) -> Self {
Self {
index,
user_manager,
}
}
pub fn generate_link_token(&self, username: &str) -> Result<AuthToken, Error> {
self.user_manager
.generate_lastfm_link_token(username)
.map_err(|e| e.into())
}
pub fn link(&self, username: &str, lastfm_token: &str) -> Result<(), Error> {
let mut scrobbler = Scrobbler::new(LASTFM_API_KEY, LASTFM_API_SECRET);
let auth_response = scrobbler
.authenticate_with_token(lastfm_token)
.map_err(Error::ScrobblerAuthentication)?;
self.user_manager
.lastfm_link(username, &auth_response.name, &auth_response.key)
.map_err(|e| e.into())
}
pub fn unlink(&self, username: &str) -> Result<(), Error> {
self.user_manager
.lastfm_unlink(username)
.map_err(|e| e.into())
}
pub fn scrobble(&self, username: &str, track: &Path) -> Result<(), Error> {
let mut scrobbler = Scrobbler::new(LASTFM_API_KEY, LASTFM_API_SECRET);
let scrobble = self.scrobble_from_path(track)?;
let auth_token = self.user_manager.get_lastfm_session_key(username)?;
scrobbler.authenticate_with_session_key(&auth_token);
scrobbler.scrobble(&scrobble).map_err(Error::Scrobble)?;
Ok(())
}
pub fn now_playing(&self, username: &str, track: &Path) -> Result<(), Error> {
let mut scrobbler = Scrobbler::new(LASTFM_API_KEY, LASTFM_API_SECRET);
let scrobble = self.scrobble_from_path(track)?;
let auth_token = self.user_manager.get_lastfm_session_key(username)?;
scrobbler.authenticate_with_session_key(&auth_token);
scrobbler
.now_playing(&scrobble)
.map_err(Error::NowPlaying)?;
Ok(())
}
fn scrobble_from_path(&self, track: &Path) -> Result<Scrobble, Error> {
let song = self.index.get_song(track)?;
Ok(Scrobble::new(
song.artist.as_deref().unwrap_or(""),
song.title.as_deref().unwrap_or(""),
song.album.as_deref().unwrap_or(""),
))
}
}

305
src/app/legacy.rs Normal file
View file

@ -0,0 +1,305 @@
use std::{
collections::HashMap,
ops::Deref,
path::{Path, PathBuf},
str::FromStr,
};
use regex::Regex;
use rusqlite::Connection;
use crate::app::{config, index, scanner, Error};
pub fn read_legacy_auth_secret(db_file_path: &PathBuf) -> Result<[u8; 32], Error> {
let connection = Connection::open(db_file_path)?;
let auth_secret: [u8; 32] =
connection.query_row("SELECT auth_secret FROM misc_settings", [], |row| {
row.get(0)
})?;
Ok(auth_secret)
}
pub fn read_legacy_config(
db_file_path: &PathBuf,
) -> Result<Option<config::storage::Config>, Error> {
let connection = Connection::open(db_file_path)?;
let album_art_pattern: String = connection.query_row(
"SELECT index_album_art_pattern FROM misc_settings",
[],
|row| row.get(0),
)?;
let mount_dirs = read_mount_dirs(db_file_path)?;
let users = read_users(db_file_path)?;
Ok(Some(config::storage::Config {
album_art_pattern: Some(album_art_pattern),
mount_dirs,
ddns_update_url: None,
users: users.into_values().collect(),
}))
}
fn read_mount_dirs(db_file_path: &PathBuf) -> Result<Vec<config::storage::MountDir>, Error> {
let connection = Connection::open(db_file_path)?;
let mut mount_dirs_statement = connection.prepare("SELECT source, name FROM mount_points")?;
let mount_dirs_rows = mount_dirs_statement.query_and_then::<_, Error, _, _>([], |row| {
let source_string = row.get::<_, String>(0)?;
let source = PathBuf::from_str(&source_string)
.map_err(|_| Error::InvalidDirectory(source_string))?;
Ok(config::storage::MountDir {
source,
name: row.get::<_, String>(1)?,
})
})?;
let mut mount_dirs = vec![];
for mount_dir_result in mount_dirs_rows {
mount_dirs.push(mount_dir_result?);
}
Ok(mount_dirs)
}
fn read_users(db_file_path: &PathBuf) -> Result<HashMap<u32, config::storage::User>, Error> {
let connection = Connection::open(db_file_path)?;
let mut users_statement =
connection.prepare("SELECT id, name, password_hash, admin FROM users")?;
let users_rows = users_statement.query_map([], |row| {
Ok((
row.get(0)?,
config::storage::User {
name: row.get(1)?,
admin: row.get(3)?,
initial_password: None,
hashed_password: row.get(2)?,
},
))
})?;
let mut users = HashMap::new();
for users_row in users_rows {
let (id, user) = users_row?;
users.insert(id, user);
}
Ok(users)
}
fn sanitize_path(source: &PathBuf) -> PathBuf {
let path_string = source.to_string_lossy();
let separator_regex = Regex::new(r"\\|/").unwrap();
let mut correct_separator = String::new();
correct_separator.push(std::path::MAIN_SEPARATOR);
let path_string = separator_regex.replace_all(&path_string, correct_separator.as_str());
PathBuf::from(path_string.deref())
}
fn virtualize_path(
real_path: &PathBuf,
mount_dirs: &Vec<config::storage::MountDir>,
) -> Result<PathBuf, Error> {
let sanitized = sanitize_path(real_path); // Paths in test database use `/` separators, but need `\` when running tests on Windows
for mount_dir in mount_dirs {
if let Ok(tail) = sanitized.strip_prefix(&mount_dir.source) {
return Ok(Path::new(&mount_dir.name).join(tail));
}
}
Err(Error::CouldNotMapToVirtualPath(real_path.clone()))
}
pub async fn read_legacy_playlists(
db_file_path: &PathBuf,
index_manager: index::Manager,
scanner: scanner::Scanner,
) -> Result<Vec<(String, String, Vec<index::Song>)>, Error> {
scanner.run_scan().await?;
let users = read_users(db_file_path)?;
let mount_dirs = read_mount_dirs(db_file_path)?;
let connection = Connection::open(db_file_path)?;
let mut playlists_statement = connection.prepare("SELECT id, owner, name FROM playlists")?;
let playlists_rows =
playlists_statement.query_map([], |row| Ok((row.get(0)?, row.get(1)?, row.get(2)?)))?;
let mut playlists = HashMap::new();
for playlists_row in playlists_rows {
let (id, owner, name): (u32, u32, String) = playlists_row?;
playlists.insert(id, (users.get(&owner).ok_or(Error::UserNotFound)?, name));
}
let mut playlists_by_user: HashMap<String, HashMap<String, Vec<index::Song>>> = HashMap::new();
let mut songs_statement =
connection.prepare("SELECT playlist, path FROM playlist_songs ORDER BY ordering")?;
let mut songs_rows = songs_statement.query([])?;
while let Some(row) = songs_rows.next()? {
let playlist = playlists.get(&row.get(0)?).ok_or(Error::PlaylistNotFound)?;
let user = playlist.0.name.clone();
let name = playlist.1.clone();
let real_path = PathBuf::from(row.get::<_, String>(1)?);
let Ok(virtual_path) = virtualize_path(&real_path, &mount_dirs) else {
continue;
};
let Ok(song) = index_manager
.get_songs(vec![virtual_path])
.await
.pop()
.unwrap()
else {
continue;
};
playlists_by_user
.entry(user)
.or_default()
.entry(name)
.or_default()
.push(song);
}
let mut results = vec![];
for (user, playlists) in playlists_by_user {
for (playlist_name, songs) in playlists {
results.push((playlist_name.clone(), user.clone(), songs));
}
}
Ok(results)
}
pub async fn delete_legacy_db(db_file_path: &PathBuf) -> Result<(), Error> {
tokio::fs::remove_file(db_file_path)
.await
.map_err(|e| Error::Io(db_file_path.clone(), e))?;
Ok(())
}
#[cfg(test)]
mod test {
use std::path::PathBuf;
use super::*;
use crate::{
app::{config, test},
test_name,
};
#[test]
fn can_read_auth_secret() {
let secret =
read_legacy_auth_secret(&PathBuf::from_iter(["test-data", "legacy_db_blank.sqlite"]))
.unwrap();
assert_eq!(
secret,
[
0x8B as u8, 0x88, 0x50, 0x17, 0x20, 0x09, 0x7E, 0x60, 0x31, 0x80, 0xCE, 0xE3, 0xF0,
0x5A, 0x00, 0xBC, 0x3A, 0xF4, 0xDC, 0xFD, 0x2E, 0xB7, 0x5D, 0x33, 0x5D, 0x81, 0x2F,
0x9A, 0xB4, 0x3A, 0x27, 0x2D
]
);
}
#[test]
fn can_read_blank_config() {
let actual =
read_legacy_config(&PathBuf::from_iter(["test-data", "legacy_db_blank.sqlite"]))
.unwrap()
.unwrap();
let expected = config::storage::Config {
album_art_pattern: Some("Folder.(jpeg|jpg|png)".to_owned()),
mount_dirs: vec![],
ddns_update_url: None,
users: vec![],
};
assert_eq!(actual, expected);
}
#[test]
fn can_read_populated_config() {
let actual = read_legacy_config(&PathBuf::from_iter([
"test-data",
"legacy_db_populated.sqlite",
]))
.unwrap()
.unwrap();
let expected = config::storage::Config {
album_art_pattern: Some("Folder.(jpeg|jpg|png)".to_owned()),
mount_dirs: vec![config::storage::MountDir {
source: PathBuf::from_iter(["test-data", "small-collection"]),
name: "root".to_owned(),
}],
ddns_update_url: None,
users: vec![config::storage::User {
name: "example_user".to_owned(),
admin: Some(true),
initial_password: None,
hashed_password: Some("$pbkdf2-sha256$i=10000,l=32$ADvDnwBv3kLUtjTJEwGcFA$oK43ICpNt2rbH21diMo6cSXL62qqLWOM7qs8f0s/9Oo".to_owned()),
}],
};
assert_eq!(actual, expected);
}
#[tokio::test]
async fn can_read_blank_playlists() {
let ctx = test::ContextBuilder::new(test_name!()).build().await;
let actual = read_legacy_playlists(
&PathBuf::from_iter(["test-data", "legacy_db_blank.sqlite"]),
ctx.index_manager,
ctx.scanner,
)
.await
.unwrap();
let expected = vec![];
assert_eq!(actual, expected);
}
#[tokio::test]
async fn can_read_populated_playlists() {
let ctx = test::ContextBuilder::new(test_name!()).build().await;
let db_file_path = PathBuf::from_iter(["test-data", "legacy_db_populated.sqlite"]);
let config = read_legacy_config(&db_file_path).unwrap().unwrap();
ctx.config_manager.apply_config(config).await.unwrap();
let actual = read_legacy_playlists(
&db_file_path,
ctx.index_manager.clone(),
ctx.scanner.clone(),
)
.await
.unwrap();
#[rustfmt::skip]
let song_paths = vec![
PathBuf::from_iter(["root", "Khemmis", "Hunted", "01 - Above The Water.mp3"]),
PathBuf::from_iter(["root", "Khemmis", "Hunted", "02 - Candlelight.mp3"]),
PathBuf::from_iter(["root", "Khemmis", "Hunted", "03 - Three Gates.mp3"]),
PathBuf::from_iter(["root", "Khemmis", "Hunted", "04 - Beyond The Door.mp3"]),
PathBuf::from_iter(["root", "Khemmis", "Hunted", "05 - Hunted.mp3"]),
];
let songs: Vec<index::Song> = ctx
.index_manager
.get_songs(song_paths)
.await
.into_iter()
.map(|s| s.unwrap())
.collect();
let expected = vec![(
"Example Playlist".to_owned(),
"example_user".to_owned(),
songs,
)];
assert_eq!(actual, expected);
}
}

40
src/app/ndb.rs Normal file
View file

@ -0,0 +1,40 @@
use std::{
ops::Deref,
path::Path,
sync::{Arc, LazyLock},
};
use native_db::{Database, Models};
use crate::app::{playlist, Error};
static MODELS: LazyLock<Models> = LazyLock::new(|| {
let mut models = Models::new();
models.define::<playlist::v1::PlaylistModel>().unwrap();
models
});
#[derive(Clone)]
pub struct Manager {
database: Arc<Database<'static>>,
}
impl Manager {
pub fn new(directory: &Path) -> Result<Self, Error> {
std::fs::create_dir_all(directory).map_err(|e| Error::Io(directory.to_owned(), e))?;
let path = directory.join("polaris.ndb");
let database = native_db::Builder::new()
.create(&MODELS, path)
.map_err(|e| Error::NativeDatabaseCreationError(e))?;
let database = Arc::new(database);
Ok(Self { database })
}
}
impl Deref for Manager {
type Target = Database<'static>;
fn deref(&self) -> &Self::Target {
self.database.as_ref()
}
}

179
src/app/peaks.rs Normal file
View file

@ -0,0 +1,179 @@
use std::{
hash::{DefaultHasher, Hash, Hasher},
path::{Path, PathBuf},
};
use serde::{Deserialize, Serialize};
use symphonia::core::{
audio::SampleBuffer,
codecs::{DecoderOptions, CODEC_TYPE_NULL},
formats::FormatOptions,
io::{MediaSourceStream, MediaSourceStreamOptions},
meta::MetadataOptions,
probe::Hint,
};
use tokio::{io::AsyncWriteExt, task::spawn_blocking};
use crate::app::Error;
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct Peaks {
pub interleaved: Vec<u8>,
}
#[derive(Clone)]
pub struct Manager {
peaks_dir_path: PathBuf,
}
impl Manager {
pub fn new(peaks_dir_path: PathBuf) -> Self {
Self { peaks_dir_path }
}
pub async fn get_peaks(&self, audio_path: &Path) -> Result<Peaks, Error> {
match self.read_from_cache(audio_path).await {
Ok(Some(peaks)) => Ok(peaks),
_ => self.read_from_source(audio_path).await,
}
}
fn get_peaks_path(&self, audio_path: &Path) -> PathBuf {
let hash = Manager::hash(audio_path);
let mut peaks_path = self.peaks_dir_path.clone();
peaks_path.push(format!("{}.peaks", hash));
peaks_path
}
async fn read_from_cache(&self, audio_path: &Path) -> Result<Option<Peaks>, Error> {
let peaks_path = self.get_peaks_path(audio_path);
if peaks_path.exists() {
let serialized = tokio::fs::read(&peaks_path)
.await
.map_err(|e| Error::Io(peaks_path.clone(), e))?;
let peaks =
bitcode::deserialize::<Peaks>(&serialized).map_err(Error::PeaksDeserialization)?;
Ok(Some(peaks))
} else {
Ok(None)
}
}
async fn read_from_source(&self, audio_path: &Path) -> Result<Peaks, Error> {
let peaks = spawn_blocking({
let audio_path = audio_path.to_owned();
move || compute_peaks(&audio_path)
})
.await??;
let serialized = bitcode::serialize(&peaks).map_err(Error::PeaksSerialization)?;
tokio::fs::create_dir_all(&self.peaks_dir_path)
.await
.map_err(|e| Error::Io(self.peaks_dir_path.clone(), e))?;
let path = self.get_peaks_path(audio_path);
let mut out_file = tokio::fs::File::create(&path)
.await
.map_err(|e| Error::Io(path.clone(), e))?;
out_file
.write_all(&serialized)
.await
.map_err(|e| Error::Io(path.clone(), e))?;
Ok(peaks)
}
fn hash(path: &Path) -> u64 {
let mut hasher = DefaultHasher::new();
path.hash(&mut hasher);
hasher.finish()
}
}
fn compute_peaks(audio_path: &Path) -> Result<Peaks, Error> {
let peaks_per_minute = 4000;
let file =
std::fs::File::open(&audio_path).or_else(|e| Err(Error::Io(audio_path.to_owned(), e)))?;
let media_source = MediaSourceStream::new(Box::new(file), MediaSourceStreamOptions::default());
let mut peaks = Peaks::default();
peaks.interleaved.reserve(5 * peaks_per_minute);
let mut format = symphonia::default::get_probe()
.format(
&Hint::new(),
media_source,
&FormatOptions::default(),
&MetadataOptions::default(),
)
.map_err(Error::MediaProbeError)?
.format;
let track = format
.tracks()
.iter()
.find(|t| t.codec_params.codec != CODEC_TYPE_NULL)
.ok_or_else(|| Error::MediaEmpty(audio_path.to_owned()))?;
let track_id = track.id;
let mut decoder = symphonia::default::get_codecs()
.make(&track.codec_params, &DecoderOptions::default())
.map_err(Error::MediaDecoderError)?;
let (mut min, mut max) = (u8::MAX, u8::MIN);
let mut num_ingested = 0;
loop {
let packet = match format.next_packet() {
Ok(packet) => packet,
Err(symphonia::core::errors::Error::IoError(e))
if e.kind() == std::io::ErrorKind::UnexpectedEof =>
{
break;
}
Err(e) => return Err(Error::MediaPacketError(e)),
};
if packet.track_id() != track_id {
continue;
}
let decoded = match decoder.decode(&packet) {
Ok(d) => d,
Err(_) => continue,
};
let num_channels = decoded.spec().channels.count();
let sample_rate = decoded.spec().rate;
let num_samples_per_peak =
((sample_rate as f32) * 60.0 / (peaks_per_minute as f32)).round() as usize;
let mut buffer = SampleBuffer::<u8>::new(decoded.capacity() as u64, *decoded.spec());
buffer.copy_interleaved_ref(decoded);
for samples in buffer.samples().chunks_exact(num_channels) {
// Merge channels into mono signal
let mut mono: u32 = 0;
for sample in samples {
mono += *sample as u32;
}
mono /= samples.len() as u32;
min = u8::min(min, mono as u8);
max = u8::max(max, mono as u8);
num_ingested += 1;
if num_ingested >= num_samples_per_peak {
peaks.interleaved.push(min);
peaks.interleaved.push(max);
(min, max) = (u8::MAX, u8::MIN);
num_ingested = 0;
}
}
}
Ok(peaks)
}

Some files were not shown because too many files have changed in this diff Show more