Compare commits

...

862 commits

Author SHA1 Message Date
Antoine Gersant
88792f0669 Adds typo fix to changelog 2025-02-05 18:02:14 -08:00
luzpaz
c2fb46f26d
Make Repology badge display 3 columns (#232)
More future-friendly format as the list expands
2025-02-05 17:59:07 -08:00
luzpaz
26ce7e2550
Fix various typos (#231)
Found via `codespell -q 3 -S "*.ai,*.rtf" -L ser,uptodate`
2025-02-05 17:58:04 -08:00
Antoine Gersant
46aed8096e Coverage setup fixes 2025-02-04 23:43:46 -08:00
Antoine Gersant
2aeed5f188 yaml fix 2025-02-04 23:39:04 -08:00
Antoine Gersant
41c4088477 Merge branch 'next' 2025-02-04 23:28:09 -08:00
Antoine Gersant
10946330a8 Adds support for multivalue w/ opus files 2025-02-04 22:19:05 -08:00
Antoine Gersant
2ce035f787 Adds more tests for multivalue field support 2025-02-04 21:24:47 -08:00
Antoine Gersant
956301bfdb Reuse file handle when calling mp3 duration 2025-02-04 01:07:27 -08:00
Antoine Gersant
f35c4efac3 Use id3 duration when available 2025-02-04 00:51:10 -08:00
Antoine Gersant
2a1c93c462 Skip superfluous syscall 2025-02-04 00:39:31 -08:00
Antoine Gersant
7625449434 Update ape dependency to new version 2025-02-03 20:58:28 -08:00
Antoine Gersant
1b0b5bd164 Fixed broken test 2025-02-03 18:10:43 -08:00
Antoine Gersant
809e3f878d Don't list genres with zero valid albums (ie. all songs in this genre have no artist / no album tag) 2025-02-03 01:50:06 -08:00
Antoine Gersant
6862cff185 Implement multi-value support for APE files 2025-02-03 01:09:10 -08:00
Antoine Gersant
36da2c3e70 Removed ydns from suggested ddns services 2025-02-02 19:23:47 -08:00
Antoine Gersant
9923b0f40c Cosmetic change 2025-02-02 14:56:18 -08:00
Antoine Gersant
1df3241ea2 Adds avenue to contribute 2025-02-02 13:54:44 -08:00
Antoine Gersant
77c313637f Adds contribution guidelines 2025-02-02 13:54:02 -08:00
Antoine Gersant
baa31f1056 Removed unused tasks file 2025-02-02 00:56:02 -08:00
Antoine Gersant
cf5d1b7931 - Adds project goals
- Updates feature list
- Readme polish
2025-02-02 00:49:59 -08:00
Antoine Gersant
87c4bdc247 Fixed a bug where tray icon didnt appear on Windows 2025-02-02 00:01:28 -08:00
Antoine Gersant
7c92e90d65 Adds example cover arts 2025-02-01 23:42:09 -08:00
Antoine Gersant
0eb1d7ee75 Updates readme screenshots 2025-02-01 23:41:55 -08:00
Antoine Gersant
dd53d8d284 Slightly more info on migration process 2025-01-31 19:25:12 -08:00
Antoine Gersant
98e10ad682 Fixed codecov badge link 2025-01-31 19:17:22 -08:00
Antoine Gersant
f8b30c4e3d Tentative fix for test coverage setup 2025-01-31 19:14:01 -08:00
Antoine Gersant
5a1753218c yaml fix 2025-01-31 19:02:21 -08:00
Antoine Gersant
646f8297d2 Tentative fix for test coverage setup 2025-01-31 19:01:26 -08:00
Antoine Gersant
d6416e0239 Fixed test failure on Windows 2025-01-31 18:02:13 -08:00
Antoine Gersant
6681322370 Fixed a bug where config file would fail to be created when parent directory does not exist 2025-01-31 17:38:42 -08:00
Antoine Gersant
f0cf3d2675 Use small-collection dataset for migration unit tests 2025-01-31 16:55:37 -08:00
Antoine Gersant
dac7145ce4 Removed swagger assets from makefile 2025-01-31 00:48:23 -08:00
Antoine Gersant
677413ef8c Allow file watch setups to fail 2025-01-31 00:45:03 -08:00
Antoine Gersant
fd3f877f93 Tarpaulin -> grcov 2025-01-31 00:33:39 -08:00
Antoine Gersant
95c7d1a620 Bumped old action 2025-01-31 00:28:01 -08:00
Antoine Gersant
ac88bc9af0 Update install check action 2025-01-31 00:22:00 -08:00
Antoine Gersant
7066e264cd Update toolchain setup action 2025-01-31 00:20:09 -08:00
Antoine Gersant
3cea551ce9 Update release script 2025-01-16 18:35:14 -08:00
Antoine Gersant
b1770fc17e Migrate to native_db 0.8.1 2025-01-16 00:14:53 -08:00
Antoine Gersant
f4009a7fa7 Log request methods 2025-01-16 00:09:29 -08:00
Antoine Gersant
d90b51f752 Fixed merge conflicts 2025-01-15 23:11:57 -08:00
Antoine Gersant
8ccc9cc2ee Utoipa polish 2025-01-15 23:07:48 -08:00
Antoine Gersant
4625bf221d Path param examples 2025-01-15 23:07:48 -08:00
Antoine Gersant
b940ca256b Adds endpoint descriptions 2025-01-15 23:07:48 -08:00
Antoine Gersant
24f27e4f08 Utoipa tag descriptions 2025-01-15 23:07:48 -08:00
Antoine Gersant
bd5aeaf591 API consistency improvements 2025-01-15 23:07:48 -08:00
Antoine Gersant
9707f4a96d Utoipa adds auth requirements 2025-01-15 23:07:48 -08:00
Antoine Gersant
2d92ac03ef Example values for DTO fields 2025-01-15 23:07:48 -08:00
Antoine Gersant
3f5e5eca69 Utoipa accept-version header 2025-01-15 23:07:48 -08:00
Antoine Gersant
dabb034964 Move docs endpoint to /api-docs 2025-01-15 23:07:48 -08:00
Antoine Gersant
cc2d2cedd8 Tag endpoints 2025-01-15 23:07:48 -08:00
Antoine Gersant
bbd63e1b42 Utoipa params iter 2025-01-15 23:07:48 -08:00
Antoine Gersant
df402ed7b8 Utoipa media endpoints 2025-01-15 23:07:48 -08:00
Antoine Gersant
b5a8aea1f8 Utoipa for search and playlist endpoints 2025-01-15 23:07:48 -08:00
Antoine Gersant
350557785c Utoipa collection endpoints 2025-01-15 23:07:48 -08:00
Antoine Gersant
07e8077a38 Utoipa for file browser endpoints 2025-01-15 23:07:48 -08:00
Antoine Gersant
23facd96b9 utoipa user management endpoints 2025-01-15 23:07:48 -08:00
Antoine Gersant
1c3ba3d709 utoipa more endpoints 2025-01-15 23:07:48 -08:00
Antoine Gersant
364710ef79 Utoipa auth endpoint 2025-01-15 23:07:48 -08:00
Antoine Gersant
2e2ddf017b Working utoipa setup 2025-01-15 23:07:47 -08:00
Antoine Gersant
1b142b1855 Utoipa hello world wip 2025-01-15 23:06:54 -08:00
Antoine Gersant
d47fffae4f Use upstream axum-range 2025-01-14 17:35:17 -08:00
Antoine Gersant
11c72240ed Dont log 3xx as error 2025-01-14 17:34:34 -08:00
Antoine Gersant
466bbf5cf3 Migrate to axum 0.8 2025-01-13 21:23:30 -08:00
Antoine Gersant
055a81e6f9 Lint 2025-01-13 21:16:23 -08:00
Antoine Gersant
81e0abc59f Flake update 2025-01-13 20:06:11 -08:00
Antoine Gersant
4826e6aa40 Dont log query parameters 2025-01-13 18:36:46 -08:00
Antoine Gersant
2521ff1ddf Log HTTP requests 2025-01-12 20:38:32 -08:00
Antoine Gersant
00236a99e3 Filter out symphonia log spam 2025-01-12 14:41:11 -08:00
Antoine Gersant
cfc848bf7c Mention migration process in changelog 2025-01-09 22:08:49 -08:00
Antoine Gersant
bf775ebc4c Playlist migration 2025-01-09 21:59:59 -08:00
Antoine Gersant
3ad5e97b75 Settings and auth secret migration 2025-01-07 21:51:43 -08:00
Antoine Gersant
73dc59f833 DB migration skeleton 2025-01-05 17:30:33 -08:00
Antoine Gersant
58d1af5edd Updated changelog 2025-01-02 14:35:17 -08:00
Antoine Gersant
ff7291a246 Adds test for multivalue fields 2025-01-02 12:47:00 -08:00
Antoine Gersant
cfa2cedbc1 Test accented character sorting in search results 2025-01-02 00:13:18 -08:00
Antoine Gersant
b9bcdd46b1 Faster song sorting using dictionary ordering 2025-01-01 23:45:52 -08:00
Antoine Gersant
f371d5e331 Support for accented characters when sorting songs 2025-01-01 23:25:13 -08:00
Antoine Gersant
41187199ba Moved song sorting to collection.rs 2025-01-01 16:46:14 -08:00
Antoine Gersant
de39b2f4a5 Search index build optimizations:
- For ascii bigrams, store song occurences in a vec instead of a hashmap to save on hashing costs (~10% faster). Use ascii values to build vec indices.
- For all bigrams, replace IntMaps with Vec. This allows the same song to be counted multiple times for one bigram, but saves a huge amount of hashing cost (~30%)
2025-01-01 15:57:33 -08:00
Antoine Gersant
cb241d21dd Handle accented characters when sorting more collection results 2025-01-01 13:54:02 -08:00
Antoine Gersant
21d7e3049e Use thin LTO 2025-01-01 13:33:47 -08:00
Antoine Gersant
68b8041f97 Sorting for accented characters 2025-01-01 13:31:11 -08:00
Antoine Gersant
e8845c7ef9 Changelog adjustments 2024-10-13 20:04:29 -07:00
Antoine Gersant
b7719edd8e Updated changelog 2024-10-13 20:02:49 -07:00
Antoine Gersant
ed546ed531 Setup guide tweaks 2024-10-13 00:24:10 -07:00
Antoine Gersant
c640086a3e Cleanup 2024-10-13 00:11:33 -07:00
Antoine Gersant
cf6a092ab7 Setup file watches while indexing is happening 2024-10-12 17:15:22 -07:00
Antoine Gersant
768ea095e1 Skip redundant watch setup 2024-10-12 16:56:20 -07:00
Antoine Gersant
5a5f696366 Reset song count when indexing starts 2024-10-12 16:38:53 -07:00
Antoine Gersant
d1d12aecc5 Adds scanner auto-trigger test 2024-10-12 16:23:31 -07:00
Antoine Gersant
ea75497bf1 Rescan collection when content changes 2024-10-12 16:16:01 -07:00
Antoine Gersant
8100dfceae Replaced channel with notifu 2024-10-12 16:08:27 -07:00
Antoine Gersant
f955eb75c5 Automatically reindex when relevant config changes are made 2024-10-12 14:53:13 -07:00
Antoine Gersant
0a7ae8ebad Adds index status endpoint 2024-10-12 00:19:14 -07:00
Antoine Gersant
090ca387ab Async cleanup 2024-10-11 21:34:36 -07:00
Antoine Gersant
d53681b6c0 Cleanup 2024-10-11 21:21:35 -07:00
Antoine Gersant
d555a2e5f0 Watch config file changes 2024-10-11 21:20:16 -07:00
Antoine Gersant
142d400b8b Create config file on startup 2024-10-11 20:09:39 -07:00
Antoine Gersant
5f585a61d8 Doc updates 2024-10-11 20:04:37 -07:00
Antoine Gersant
08052c25a3 Preserve order of mounts points and users 2024-10-09 23:48:22 -07:00
Antoine Gersant
497b3bb545 Allow clearing DDNS url 2024-10-09 17:29:45 -07:00
Antoine Gersant
32e67dc095 DDNS polish 2024-10-09 16:45:50 -07:00
Antoine Gersant
8b31698cf4 Enable fat LTO in release builds 2024-10-09 16:12:29 -07:00
Antoine Gersant
7a84cc0290 Fixed a bug where blank DDNS url turned into '/' 2024-10-09 14:53:42 -07:00
Antoine Gersant
524e072e9f Fixed typo 2024-10-09 11:12:44 -07:00
Antoine Gersant
5ec0b5f7a5 Write config changes to disk 2024-10-08 23:38:11 -07:00
Antoine Gersant
fb18cb3c4f Test config round trip 2024-10-08 23:03:15 -07:00
Antoine Gersant
0058221e88 Fixed duplicate user test triggering a different error 2024-10-08 22:30:44 -07:00
Antoine Gersant
51283d935f Cleanup 2024-10-08 22:29:09 -07:00
Antoine Gersant
a4e9aea1e4 Read config from disk 2024-10-08 22:28:10 -07:00
Antoine Gersant
7f39d8e8b7 Boilerplate 2024-10-08 22:19:57 -07:00
Antoine Gersant
316f5c0219 Service agnostic DDNS 2024-10-08 21:59:40 -07:00
Antoine Gersant
deeb3e8a05 Cleanup 2024-10-08 20:50:00 -07:00
Antoine Gersant
ae5da0f4f3 Config refactor continued 2024-10-08 20:48:26 -07:00
Antoine Gersant
c7a760e2c2 Cleanup 2024-10-07 23:09:10 -07:00
Antoine Gersant
471e39495c Cleanup 2024-10-07 23:08:49 -07:00
Antoine Gersant
67730f55fb Cleanup 2024-10-07 23:07:53 -07:00
Antoine Gersant
1555c784de Config users refactor 2024-10-07 23:05:35 -07:00
Antoine Gersant
c51ce59fba Removed preferences 2024-10-07 18:08:36 -07:00
Antoine Gersant
a89e3d5145 WIP 2024-10-06 23:12:57 -07:00
Antoine Gersant
658c23e70d Removed /config endpoint 2024-10-06 18:21:28 -07:00
Antoine Gersant
053b684f3a Promot partial collection index during initial scan 2024-10-06 14:27:38 -07:00
Antoine Gersant
1a8bf91628 Index disk serialization without DB 2024-10-06 12:50:39 -07:00
Antoine Gersant
a5061dfc92 Removed last.fm support 2024-10-06 00:29:23 -07:00
Antoine Gersant
2c2b12f536 Upsert playlists 2024-10-05 21:15:12 -07:00
Antoine Gersant
f0a2afe01d Sort playlists alphabetically 2024-10-05 21:13:59 -07:00
Antoine Gersant
9e18a221db Adds polaris.ndb to gitignore 2024-10-05 20:18:47 -07:00
Antoine Gersant
e42c3abfe1 Remove accidental artifact 2024-10-05 20:18:27 -07:00
Antoine Gersant
765de35f89 Playlist DTO 2024-10-05 20:17:20 -07:00
Antoine Gersant
98bcd41e43 Async playlist operations 2024-10-05 20:11:50 -07:00
Antoine Gersant
369bf3821b Introduces data_dir 2024-10-05 20:04:39 -07:00
Antoine Gersant
664ff721e2 ndb playlists first pass 2024-10-04 20:43:53 -07:00
Antoine Gersant
b175e319b7 Introduces playlist header 2024-10-04 18:02:32 -07:00
Antoine Gersant
76535b2f87 Adds tests for genre indexing 2024-10-01 21:20:54 -07:00
Antoine Gersant
d1a0b836cf Adds recently added and main artists to genre payload 2024-09-30 23:12:10 -07:00
Antoine Gersant
071aced10a Adds genre endpoint tests 2024-09-30 18:59:09 -07:00
Antoine Gersant
7f3e091e32 More granular test splitting 2024-09-30 18:51:22 -07:00
Antoine Gersant
232eb7ac12 Index related genres 2024-09-30 00:27:49 -07:00
Antoine Gersant
143da76673 Genre album/artists endpoints 2024-09-29 18:46:10 -07:00
Antoine Gersant
8d51344dc3 Index artists by genre 2024-09-29 16:28:53 -07:00
Antoine Gersant
bff82c3a7c Fixed artifact paths 2024-09-29 15:09:41 -07:00
Antoine Gersant
454b4c00fc Troublshooting 2024-09-29 15:09:41 -07:00
Antoine Gersant
2bbfa064d5 Fail when artifacts are missing 2024-09-29 15:09:41 -07:00
Antoine Gersant
f1e21a4f6e CI churn 2024-09-29 15:09:41 -07:00
Antoine Gersant
9e62dc108c Repair tagging 2024-09-29 15:09:41 -07:00
Antoine Gersant
2992ef89b8 Fixed job dependencies 2024-09-29 15:09:41 -07:00
Antoine Gersant
4548574298 Syntax fixes 2024-09-29 15:09:41 -07:00
Antoine Gersant
72f4604f7a More CI churn 2024-09-29 15:09:41 -07:00
Antoine Gersant
646a8fa587 CI churn 2024-09-29 15:09:41 -07:00
Antoine Gersant
647e1d5614 Changelog for release 0.14.3 2024-09-29 15:09:08 -07:00
Antoine Gersant
c1c0cedccc Release script version churn 2024-09-29 15:08:23 -07:00
Antoine Gersant
d06ad07f51 Fixed artifact paths 2024-09-29 14:25:34 -07:00
Antoine Gersant
f188b2943f Troublshooting 2024-09-29 13:43:24 -07:00
Antoine Gersant
e3041fca6f Fail when artifacts are missing 2024-09-29 13:39:27 -07:00
Antoine Gersant
444d261d0b CI churn 2024-09-29 13:19:37 -07:00
Antoine Gersant
47c73f6196 Repair tagging 2024-09-29 13:06:34 -07:00
Antoine Gersant
63b92718d5 Fixed job dependencies 2024-09-29 12:55:58 -07:00
Antoine Gersant
e11344d2b4 Syntax fixes 2024-09-29 12:54:50 -07:00
Antoine Gersant
0b50a10a36 More CI churn 2024-09-29 12:51:29 -07:00
Antoine Gersant
6a46aaeac6 CI churn 2024-09-29 12:46:17 -07:00
Antoine Gersant
7ae10c6f74 Changelog for release 0.14.3 2024-09-29 12:23:35 -07:00
Antoine Gersant
5d03b7919c Release script version churn 2024-09-29 12:21:08 -07:00
Antoine Gersant
6c2b192f8e Genre key cleanup 2024-09-29 12:15:32 -07:00
Antoine Gersant
ef6951faba Artist key cleanup 2024-09-29 12:14:36 -07:00
Antoine Gersant
e06f79c500 Genre endpoints WIP 2024-09-29 12:06:39 -07:00
Antoine Gersant
cb35ef0ebb Reserve ! character 2024-09-25 17:27:14 -07:00
Antoine Gersant
f21f906eaf Fixed a bug where search results were capped at 200 songs 2024-09-25 17:25:01 -07:00
Antoine Gersant
b943d9aa11 Adds NOT search operator 2024-09-25 17:22:40 -07:00
Antoine Gersant
f971b78856 Skip inoperable filters 2024-09-23 21:00:28 -07:00
Antoine Gersant
971b46be45 Store search index fields in array 2024-09-23 20:35:16 -07:00
Antoine Gersant
ee3f9fd5a0 Use disc number when sorting search results 2024-09-22 20:18:01 -07:00
Antoine Gersant
b5762bd7bf Sort search results 2024-09-22 20:05:20 -07:00
Antoine Gersant
99263ddeca Fixed tests for /search endpoint 2024-09-22 14:19:52 -07:00
Antoine Gersant
be97bccab1 Number fields search 2024-09-22 14:19:52 -07:00
Antoine Gersant
0fe3555560 Avoid false positives when all bigrams match 2024-09-22 14:19:52 -07:00
Antoine Gersant
bdc4f840a4 Case insensitive search 2024-09-22 14:19:52 -07:00
Antoine Gersant
409d79d8a2 Additional search tests 2024-09-22 14:19:52 -07:00
Antoine Gersant
390ee03020 Small perf improvement to search index building 2024-09-22 14:19:52 -07:00
Antoine Gersant
cb33c96548 Adds TODO 2024-09-22 14:19:52 -07:00
Antoine Gersant
5128796825 Fixed false positives in search results 2024-09-22 14:19:52 -07:00
Antoine Gersant
81403960b0 Skip allocations 2024-09-22 14:19:52 -07:00
Antoine Gersant
5e8587c39f Search indexing WIP 2024-09-22 14:19:52 -07:00
Antoine Gersant
e5339ab39a End unquoted literals on reserved symbols 2024-09-22 14:19:52 -07:00
Antoine Gersant
caf12f23b4 Avoid .or() for faster compile times 2024-09-22 14:19:52 -07:00
Antoine Gersant
9a14114e50 Parenthesis and implicit AND support 2024-09-22 14:19:52 -07:00
Antoine Gersant
83b5431994 Boolean operators parsing 2024-09-22 14:19:52 -07:00
Antoine Gersant
b96cd2d781 Search syntax first pass 2024-09-22 14:19:52 -07:00
Antoine Gersant
bc17954db9 Fixed borked tests 2024-09-20 23:56:35 -07:00
Antoine Gersant
e5a8c325a6 Updated changelog 2024-09-18 22:51:23 -07:00
Antoine Gersant
625f4bd006 Adds seed support to get_random_albums 2024-09-18 21:19:59 -07:00
Antoine Gersant
ae4200c6ce Adds support for offset and count parameters in get_recent_albums 2024-09-17 22:27:39 -07:00
Antoine Gersant
6bd0c25d7d Fixed a bug where artists with no album could sneak into collection 2024-09-17 22:27:05 -07:00
Antoine Gersant
2b81355f6d Adds get_albums endpoint 2024-09-16 23:25:41 -07:00
Antoine Gersant
e65cee366d Artist schema iteration 2024-09-08 13:46:47 -07:00
Antoine Gersant
ae876915b4 Adds TODO 2024-09-07 17:44:25 -07:00
Antoine Gersant
7be9f25cb3 Drop support for blank album names 2024-09-07 16:22:39 -07:00
Antoine Gersant
4072e3b07d Adds num_songs_by_genre to artist details 2024-09-07 15:37:17 -07:00
Antoine Gersant
54ce646931 Merge values that only differ by cosmetic characters 2024-09-07 13:25:16 -07:00
Antoine Gersant
e0bf259be3 Track num songs by artist 2024-09-06 19:20:21 -07:00
Antoine Gersant
07324ccca6 Track num_songs_by_genre 2024-09-06 01:34:41 -07:00
Antoine Gersant
c1f24ce96b Artist indexing test 2024-09-05 23:11:26 -07:00
Antoine Gersant
0c12729983 Artist indexing 2024-09-05 23:08:40 -07:00
Antoine Gersant
ad37a14cfa Artist list merges case divergences, excludes VA, reports more album info 2024-09-04 23:54:58 -07:00
Antoine Gersant
309620a088 Adds compact/large mode to changelog 2024-09-04 20:14:25 -07:00
Antoine Gersant
1e0a6062f9 Trailing slash normalization 2024-09-04 18:10:21 -07:00
Antoine Gersant
85cacd8bb7 Mentioned addition of new thumbnail size 2024-09-04 01:21:31 -07:00
Antoine Gersant
7c5ff2e895 Updated changelog 2024-09-03 01:18:22 -07:00
Antoine Gersant
afc5fcb4c2 Async support for thumbnails and peaks 2024-09-02 13:57:25 -07:00
Antoine Gersant
9a30065971 Adds new endpoint to generate audio waveforms 2024-09-02 13:27:46 -07:00
Antoine Gersant
f4b0cb9eb7 Fixed lint 2024-08-27 22:58:37 -07:00
Antoine Gersant
e703f69a48 Adds support for tiny thumbnails 2024-08-25 20:05:27 -07:00
Antoine Gersant
57a0163c04 Adds TODO 2024-08-25 15:57:33 -07:00
Antoine Gersant
5444285327 Adds endpoint to retrieve song metata in bulk 2024-08-25 15:28:16 -07:00
Antoine Gersant
6837994433 Return first 200 songs when returning a list of songs 2024-08-24 23:28:22 -07:00
Antoine Gersant
8141e565e0 Added TODO 2024-08-24 20:58:17 -07:00
Antoine Gersant
a3c2b3bc32 Compress static files 2024-08-16 19:16:28 -07:00
Antoine Gersant
570c2b3894 Browse now skips top-level when it only has one mount 2024-08-15 21:41:05 -07:00
Peder Bergebakken Sundt
f625c57d20 update crate time from 0.3.28 to 0.3.36
Fixes build with rust 1.80.0
2024-08-13 09:17:13 -07:00
Antoine Gersant
d492afc885 Flatten perf improvements: gzip response and parallelize sorting 2024-08-10 11:38:29 -07:00
Antoine Gersant
4112c7d79d Sorting improvements 2024-08-10 10:57:07 -07:00
Antoine Gersant
39407c6551 async cleanup 2024-08-10 10:31:53 -07:00
Antoine Gersant
0afab8d634 Implements artists/ endoint 2024-08-10 10:30:21 -07:00
Antoine Gersant
bc3ed59382 Adds collection tests 2024-08-09 23:04:55 -07:00
Antoine Gersant
636803c0df Sort albums by year first 2024-08-09 22:01:02 -07:00
Antoine Gersant
a7c4c90427 Cosmetic changes 2024-08-09 20:02:23 -07:00
Antoine Gersant
91152fdc08 Removed unused field 2024-08-09 20:00:50 -07:00
Antoine Gersant
1bbeee7f39 Fixed a bug where recent albums were not correctly sorted 2024-08-09 19:43:30 -07:00
Antoine Gersant
6564e7d078 Cleaned collection tests 2024-08-09 18:43:13 -07:00
Antoine Gersant
41c043f863 Cleaned scanner tests 2024-08-09 18:02:15 -07:00
Antoine Gersant
3f645d1011 Repair playlists 2024-08-09 17:40:59 -07:00
Antoine Gersant
6b5c291cb7 Clean up browser tests 2024-08-09 17:22:41 -07:00
Antoine Gersant
310e3b6c4d Fixed a bug where browser entries were not sorted 2024-08-09 16:28:30 -07:00
Antoine Gersant
a2232aa9f2 Semantic indexing for composer/lyricist 2024-08-09 13:02:49 -07:00
Antoine Gersant
0841c15f48 Avoid re-parsing regex 2024-08-09 12:26:40 -07:00
Antoine Gersant
763ba94e9b Single threaded rodeo 2024-08-09 12:11:25 -07:00
Antoine Gersant
a4baa2c792 Perf improvements 2024-08-09 11:24:53 -07:00
Antoine Gersant
e6483cf138 Organization 2024-08-09 10:59:59 -07:00
Antoine Gersant
b014c63af4 Fixed empty albums 2024-08-09 10:27:54 -07:00
Antoine Gersant
6821318a4d Intern strings in collection 2024-08-09 10:25:18 -07:00
Antoine Gersant
0a1f3fa78d Skip unecessary allocations 2024-08-09 08:40:44 -07:00
Antoine Gersant
169b2b5cb8 Keep directory entries sorted as we add them 2024-08-09 08:30:10 -07:00
Antoine Gersant
782da35a7b Skip allocations 2024-08-09 08:27:09 -07:00
Antoine Gersant
2cbb249c46 Less aggressive polling 2024-08-09 08:16:47 -07:00
Antoine Gersant
2f2fdf9056 No longer refcount rodeo to avoid redundant serialization 2024-08-09 08:00:24 -07:00
Antoine Gersant
f0fa985f8a Intern strings in flattened 2024-08-04 19:25:39 -07:00
Antoine Gersant
6b1133e27c Intern browser directories 2024-08-04 19:07:10 -07:00
Antoine Gersant
8f6e72fbd6 Removed tarpaulin noise 2024-08-04 19:00:01 -07:00
Antoine Gersant
2c7eb9f643 Removed unused dependencies 2024-08-03 15:05:44 -07:00
Antoine Gersant
7a17cdc195 Rely on Axum to do percent decoding 2024-08-03 15:01:42 -07:00
Antoine Gersant
16434e6c51 Disable default ureq features 2024-08-03 14:55:02 -07:00
Antoine Gersant
5a14830138 Bump depedencies 2024-08-03 13:57:03 -07:00
Antoine Gersant
845105cf38 Fixed integration tests 2024-08-01 02:08:35 -07:00
Antoine Gersant
cd45836924 Error types consolidation 2024-08-01 00:09:21 -07:00
Antoine Gersant
8f2566f574 Refactor index 2024-07-31 23:38:38 -07:00
Antoine Gersant
a0624f7968 Flatten via trie 2024-07-31 18:00:26 -07:00
Antoine Gersant
7a1d433c8a Return album appearances 2024-07-31 17:07:44 -07:00
Antoine Gersant
ae9f94ce4f Removes MultiString 2024-07-31 16:47:12 -07:00
Antoine Gersant
e8af339cde Browsing via index (WIP) 2024-07-31 03:41:32 -07:00
Antoine Gersant
b4b0e1181f Indexing perf work 2024-07-31 01:43:13 -07:00
Antoine Gersant
72ec7b260a Index artists 2024-07-31 00:11:33 -07:00
Antoine Gersant
35736ee1d5 v7 compat for random/recent endpoints 2024-07-30 23:17:41 -07:00
Antoine Gersant
332e39876e Implements get_album endpoint 2024-07-30 00:24:25 -07:00
Antoine Gersant
b42c6d39e8 Seralize index into DB 2024-07-29 22:56:03 -07:00
Antoine Gersant
1f3cc1ea26 Rebuild index on startup 2024-07-29 21:54:07 -07:00
Antoine Gersant
8db6a2352b Adds ID trait 2024-07-29 20:03:25 -07:00
Antoine Gersant
93e8d7d94b Implement recent albums endpoint 2024-07-29 20:00:53 -07:00
Antoine Gersant
64ef7cb21f Index -> IndexManager 2024-07-29 18:13:40 -07:00
Antoine Gersant
2012258a72 Indexing WIP 2024-07-29 02:07:28 -07:00
Antoine Gersant
2965cbdf7e Index/Browser split 2024-07-28 23:15:26 -07:00
Antoine Gersant
efc27757c7 Updated changelog 2024-07-28 12:59:31 -07:00
Antoine Gersant
91352fc13b Cleanup 2024-07-28 02:34:35 -07:00
Antoine Gersant
470fbc6d1c Fixed toolchain setup 2024-07-27 22:23:49 -07:00
Antoine Gersant
9e9d031f4e Rename toolchain file 2024-07-27 22:20:27 -07:00
Antoine Gersant
caf6feea7a API versioning tests 2024-07-27 18:47:32 -07:00
Antoine Gersant
caa8907297 API versioning 2024-07-27 18:06:19 -07:00
Antoine Gersant
6871f41a99 Dev environment setup 2024-07-27 15:20:23 -07:00
Antoine Gersant
00cc18c798 Dev environment setup 2024-07-27 13:30:42 -07:00
Antoine Gersant
3362a828cd Split index into scanner (populates DB) and index (reads from DB) 2024-07-15 02:11:18 -07:00
Antoine Gersant
9d8d543494 Adds multi-value fields (single row) 2024-07-15 01:29:09 -07:00
Antoine Gersant
5a785a2e16 Update build script to follow removal of crate feature 2024-07-13 19:06:19 -07:00
Antoine Gersant
0f25a12877 Dependency bumps 2024-07-13 19:01:06 -07:00
Antoine Gersant
1c4ef6c5ee Dependency bumps 2024-07-13 18:48:55 -07:00
Antoine Gersant
1020f27413 Better migration error message 2024-07-13 18:28:51 -07:00
Antoine Gersant
0e63f64513 Range requests 2024-07-13 18:25:33 -07:00
Antoine Gersant
153943a3ae Add thumbnails endpoint 2024-07-13 17:58:31 -07:00
Antoine Gersant
d82563efc0 Adds playlist endpoints 2024-07-13 17:44:40 -07:00
Antoine Gersant
274a1f2cf7 Adds lastfm endpoints 2024-07-13 17:28:48 -07:00
Antoine Gersant
18858d8d1a Collection endpoints 2024-07-13 17:17:01 -07:00
Antoine Gersant
03d5568765 Implements more endpoints 2024-07-13 15:48:08 -07:00
Antoine Gersant
5c4631c673 Adds settings endpoints 2024-07-13 14:12:54 -07:00
Antoine Gersant
84921f7db3 Static file serving 2024-07-13 12:40:47 -07:00
Antoine Gersant
08353a717f Axum initial setup 2024-07-13 12:30:02 -07:00
Antoine Gersant
138886e55c Cosmetic change 2024-07-13 11:09:20 -07:00
Antoine Gersant
6884548cd0 Trim dependency features 2024-07-13 01:38:55 -07:00
Antoine Gersant
12a9f2ec3c Diesel -> SQLx 2024-07-13 01:20:27 -07:00
Antoine Gersant
138eacc9fc Merge branch 'master' of https://github.com/agersant/polaris 2024-07-10 23:17:41 -07:00
Antoine Gersant
11775d961b Lints 2024-07-10 23:17:38 -07:00
duydl
77dc2eac23
Add support for m4b format (#208)
* Add support for m4b

* Formatting

* Formatting

---------

Co-authored-by: Antoine Gersant <antoine.gersant@lesforges.org>
2024-05-09 19:59:45 -07:00
Antoine Gersant
7279793d25 0.14.2 changelog 2024-03-13 19:03:43 -07:00
Antoine Gersant
d4a427648e Fixed startup error related to system tray integration 2024-03-13 18:46:35 -07:00
Antoine Gersant
123eee7d2d Tentative fix for Linux install CI 2024-02-02 20:30:04 -08:00
Antoine Gersant
fd6a13083d Autoformat 2024-02-02 20:29:51 -08:00
Antoine Gersant
5ca38939bd Changelog for 0.14.1 2024-02-02 20:26:56 -08:00
Antoine Gersant
c1abd8fe3b Fixed musl linking error 2024-02-02 20:17:36 -08:00
Antoine Gersant
fc0a4fd6eb Tentative fix for linux build 2023-09-12 20:08:12 -07:00
Antoine Gersant
6f24ff248f Depedency bumps 2023-09-08 19:34:39 -07:00
Antoine Gersant
4807b2d3b9 Apply lints 2023-09-08 18:23:34 -07:00
Antoine Gersant
608dabb789 Removed tokio dependency 2023-09-08 18:20:12 -07:00
Peder Bergebakken Sundt
8d38c5b664
id3: -> 1.4.0(git) -> 1.7.0 (#194)
This removes the git dep, which since rust 1.68.0 are not repoducible.
2023-06-17 18:12:58 -07:00
Etienne Dechamps
f6d45c8387
Demote DDNS disabled message to avoid log spam (#188)
Fixes #187
2023-02-11 15:53:07 -08:00
Elise
930fd67ae3
Add a link to Polarios in the README (#183) 2023-01-05 13:21:10 -08:00
Antoine Gersant
00b6444048 Adds social media preview image for Github links 2022-11-27 20:22:26 -08:00
Antoine Gersant
bd330ddd84 Make patch test coverage informational 2022-11-24 20:38:19 -08:00
Antoine Gersant
46a232219d Update changelog for release 0.14.0 2022-11-24 20:27:09 -08:00
Antoine Gersant
a8660793f8 Use TDOR frame for ID3v2 to populate year 2022-11-24 20:22:35 -08:00
Antoine Gersant
eaec68dff0 Specified API errors 2022-11-21 21:37:47 -08:00
Antoine Gersant
1484ecabe9 Log error details instead of sending them in HTTP responses 2022-11-21 18:37:55 -08:00
Antoine Gersant
1812bedfd2 Fixed a bug where systemd init error would not display 2022-11-21 17:31:12 -08:00
Antoine Gersant
c57583d1d4 Removed anyhow dependency 2022-11-21 17:23:14 -08:00
Antoine Gersant
98d00d261d Remove some usage of anyhow 2022-11-21 17:00:15 -08:00
Antoine Gersant
edc7170b89 Removed unused import 2022-11-21 16:53:32 -08:00
Antoine Gersant
e4959be2f4 metadata module error cleanup 2022-11-21 16:51:31 -08:00
Antoine Gersant
fee2f17fb1 Error cleanup 2022-11-21 16:45:18 -08:00
Antoine Gersant
4c5a6bc2d6 Error cleanup 2022-11-21 16:31:49 -08:00
Antoine Gersant
1e9d307a05 Error cleanup 2022-11-21 16:06:18 -08:00
Antoine Gersant
4ec8f2161b Error cleanup 2022-11-21 16:00:22 -08:00
Antoine Gersant
f609afc5ed Structured errors continued 2022-11-21 15:33:50 -08:00
Antoine Gersant
9f0bc06dac Bump API version 2022-11-17 22:31:19 -08:00
Antoine Gersant
d1cb328523 Migrated changelog to a plain text file 2022-11-14 20:57:23 -08:00
Antoine Gersant
33997fc8e1 Trigger demo deployment on release 2022-11-14 02:06:56 -08:00
Antoine Gersant
602c1c03b5 Added demo to readme 2022-11-14 00:46:14 -08:00
Antoine Gersant
f3abb816ff Fixed a bug where all music sources would be deleted when trying to add sources with duplicate names 2022-11-12 14:07:01 -08:00
Antoine Gersant
96d702b79e Improve build times 2022-11-10 01:56:46 -08:00
Antoine Gersant
223894c2b6 Merge branch 'master' of https://github.com/agersant/polaris 2022-11-09 01:23:32 -08:00
Antoine Gersant
bb8d1142d6 Defined a few unspecified errors 2022-11-09 01:23:23 -08:00
Antoine Gersant
822f3ed073 Merged trivial modules 2022-11-09 00:39:48 -08:00
Antoine Gersant
2873f38e04 Merged trivial modules 2022-11-09 00:33:57 -08:00
Antoine Gersant
388901cf65 Moved manager.rs file contents to parent modules 2022-11-09 00:14:52 -08:00
Antoine Gersant
df0de19567 Renamed mod.s rs files 2022-11-08 23:53:02 -08:00
Tobias Schmitz
29ae862aad
Update dependencies (#176)
* update dependencies

* Upgrade rust edition to 2021

* make actix-test a dev-dependency
2022-11-08 19:54:24 -08:00
Antoine Gersant
a5f5a77100 Linter suggestions 2022-11-08 02:04:54 -08:00
Antoine Gersant
63e971059a Removed deprecated authentication methods 2022-11-08 02:01:20 -08:00
Antoine Gersant
d41e837561 Linter suggestions 2022-11-08 01:21:26 -08:00
Tobias Schmitz
f5a2eed423
Migrate to diesel 2.0 (#174) 2022-08-30 11:47:16 -07:00
Tobias Schmitz
41a4b21327
Fix clippy warnings (#175) 2022-08-29 21:17:03 -07:00
Tobias Schmitz
374d0ca56f
Migrate to actix-web 4 (#171)
* Migrate to actix-web 4

* Change expected swagger test status code

* update tokio to 1.0

* fix clippy warnings
2022-04-24 13:55:38 -07:00
Tobias Schmitz
90fd6bbcc9
Update dependencies (#166) 2022-03-20 19:50:14 -07:00
Antoine Gersant
39c8cf7595 Thumbnail and audio endpoints no longer encode payloads 2021-11-28 20:13:54 -08:00
Antoine Gersant
f27bc4ccfc Added TODO 2021-11-27 16:45:25 -08:00
Antoine Gersant
818dfe877c Update codecov github action 2021-11-14 14:17:36 -08:00
Antoine Gersant
b6e9940c76 Updated id3 dependency 2021-11-10 19:47:45 -08:00
pmphfm
e2bf97db99
Code cleanup (#148)
Fixed all most all clippy warnings.
Test: cargo test && cargo clippy
2021-10-19 19:31:17 -07:00
Tobias Schmitz
d01583b406
add api parameter for thumbnail size (#144)
* add api parameter for thumbnail size

* make max_dimension optinal in case of native resolution

* add tests for thumbnail size

* fix typo

* fix thumbnail size tests

* make unwrap more explicit

* remove print statement

* update workflows

* reduce thumbnail variations

* add removed token

* Update coverage.yml

* fix typo

* hopefully prevent coverage timeout

- split up thumnail tests
- reduce threadcount used for test execution

* get thread count using github actions specific step

* use fixed thread count of 4

* run coverage tests in release mode

* ignore large and native thumbnail_size tests in coverage
2021-06-05 02:24:25 -07:00
pmphfm
f104355076
Add few more fields to song information (#141)
* [meta] Add ignore paths to vscode settings

* [feature] Add few more fields to song information

Fields include lyricist, composer, genre, category
and label.
2021-05-20 22:08:43 -07:00
Antoine Gersant
4c25195deb
Updated list of supported formats 2021-04-27 21:43:28 -07:00
gahag
ed581c57cf
Add support for AIFF files (#137)
The new patch in rust-id3 fixes the AIFF API, which is now used to support AIFF files.
2021-04-24 22:05:52 -07:00
gahag
652772ba0e
Implement support for Wave files (#135)
* Implement support for Wave files

Metadata extraction for such format is supported by the latest version of rust-id3, which
has been updated in this commit. The code has been updated to handle such files and call
the new APIs.

* Code review
2021-04-19 21:49:23 -07:00
David Futcher
6c27409ef2
Bump rustfm-scrobble dependency to v1.1.1 (#129) 2021-01-13 19:39:58 -08:00
Antoine Gersant
7a73ae7cc0 Don't emit log file when running in foreground (-f on Linux, polaris-cli.exe on Windows) and --log is not set 2021-01-02 16:03:51 -08:00
Antoine Gersant
2f71cf2db7 Checkout release branch when making a release 2020-12-30 22:56:26 -08:00
Antoine Gersant
4ad8d922f7
Platform-specific improvements (#127)
* Use native-windows-gui crate to manage tray icon
Adds log file support on Windows

* Log file location now works like other paths

* Removed context builder

* Context --> App

* Removed mount URLs from App

* Switch to a nicer crate for forking daemon

* Handle errors from notify_ready

* Add application icon to all Windows Polaris executables, not just those created by the release script

* Add build.rs to release tarball

* Create PID file parent directory if necessary
2020-12-30 21:41:57 -08:00
Antoine Gersant
7edcc38483
Test setup improvements (#125)
* More descriptive test names

* Test writing tools

* Migrate to new test tooling

* Adds test for collection cleaner
2020-12-29 20:05:04 -08:00
Antoine Gersant
7bc8e142c3 Fixed a bug where missing content was not removed from database 2020-12-29 16:14:02 -08:00
Antoine Gersant
487d261843 Removed unecessary dependency 2020-12-27 16:39:48 -08:00
Antoine Gersant
ea7edea79b Workaround for broken install paths on Windows 2020-12-27 00:10:57 -08:00
Antoine Gersant
5ec2ae3f7a Merge branch 'master' of https://github.com/agersant/polaris 2020-12-26 23:23:34 -08:00
Antoine Gersant
cab03f2538 Tentative fix for Windows installer not working 2020-12-26 23:23:10 -08:00
Antoine Gersant
1d57691e8b
Thumbnail fixes (#121)
* Use dedicated thumbnail generation algorithm

* Fixed a bug where some images could not be encoded to JPG due to unsupported pixel formats
2020-12-20 03:27:20 -08:00
Antoine Gersant
72c8ed9289
Support for bearer token authentication (#120)
* User manager can create and recognize auth tokens

* Implement HTTP bearer auth

* Use bearer auth in test-harness

* Can receive auth token via query parameter (useful for media endpoints)
2020-12-20 03:25:45 -08:00
Antoine Gersant
5e065c5e6a
Split configuration module (#117) 2020-12-18 01:14:24 -08:00
Antoine Gersant
e5c1d86577 Last FM unlink now correctly unsets credentials. Missing credentials no longer 401 2020-12-17 19:14:05 -08:00
Antoine Gersant
d4c78a0a31 Fixed a bug where only swagger index could be accessed without trailing slash 2020-12-15 21:12:00 -08:00
Antoine Gersant
ec39e696bb Fixed test on linux 2020-12-15 02:19:49 -08:00
Antoine Gersant
5b2d0a2216 Cosmetic change 2020-12-15 01:48:14 -08:00
Antoine Gersant
404d42d254 album art pattern is now case insensitive 2020-12-15 01:44:46 -08:00
Antoine Gersant
bd48ad1a5c Adds jpeg to default album art filter 2020-12-15 01:32:58 -08:00
Antoine Gersant
c2807b60de
Replace rocket with actix-web (#116)
* Adds actix dependency

* Failed attempt at test harness using actix

* Fixed test panic

* Simplified tests

* Run web server in tests

* Send json payloads

* Static file serving

* Default shutdown timeout

* Implement version endpoint

* Implements #[get("/initial_setup")]

* WIP put_settings endpoint

* Adds AdminRights extractor

* Fixed a bug where AdminRights extractor always failed

* Implements collection endpoints

* Re-use system runnner between calls

* Preserve client headers between API calls (tentative)

* Fixed test interferences

* Implemented more endpoints

* Implemented audio file serving

* Fixed sketchy responses

* Implements thumbnail endpoint

* Login endpoint WIP

* Implement login endpoint

* Auth support

* When using HTTP headers to authenticate, response now includes expected cookies

* Tentative fix for server not responding within docker

* Adds logging middleware + browse troubleshooting

* Tentative fix for path decoding issues

* Tentative fix for broken path decoding

* Fix routing issues w/ paths

* Fixed a bug where auth cookies were sent in every response

* More lenient test timeouts

* Fixed a bug where recent/random endpoints required trailing slashes

* Compilation fix for rocket branch

* More useful test matrix

* Signed session cookies (#106)

* Isolate conflicting dependencies between rocket and actix versions

* Removed macOS from test matrix

* Glorious test harness simplification

* Removed RequestBuilder

* Shutdown on ctrl+c

* Pin to stable

* Drop rocket

* Simplify dependencies

* Removed stray rocket dependency

* Better test matrix

* Skip windows build without bundled sqlite

* Offload thumbnail creation to a thread pool

* Compress responses when possible

* Removed unused manage state

* Fixed a bug where large playlists could not be saved

* Return HTTP 401 for last fm requests without authentication

* Web block (#115)

* web::block around DB operations

* web::block during auth utils hitting DB

* Fixed incorrect http response code for missing thumbnail

* Removed unecessary unwrap

* Eliminated unecessary unwrap
2020-12-14 21:18:44 -08:00
Antoine Gersant
6be6d2a7dc Cosmetic change 2020-12-14 19:32:10 -08:00
Antoine Gersant
e25af0e9b5 Fixed issue where releases may not get their git tag 2020-12-13 20:05:42 -08:00
Antoine Gersant
2c21609699 Fetch URL for uploading release assets 2020-12-13 19:44:41 -08:00
Antoine Gersant
847d61f62b
One click release (#113)
* Make release from Github UI
2020-12-13 19:24:06 -08:00
Antoine Gersant
9c45ad5238
Replace reqwest with ureq (#112)
* Replace reqwest with ureq

* Reqwest-free rustfm-scrobble
2020-12-13 15:53:31 -08:00
Antoine Gersant
dd92d3e6eb
Added license badge 2020-12-12 15:31:41 -08:00
Antoine Gersant
7e4c0fa610 Support for Github dark theme 2020-12-11 02:54:18 -08:00
Antoine Gersant
34e0538562
App features re-organization (#111) 2020-12-10 03:23:00 -08:00
Antoine Gersant
866d82a16c
Make sqlite bundling optional (#110)
* Manually specify libsqlite version

* Make sqlite bundling optional

* Skip tests on windows without bundled sqlite
2020-12-08 01:22:17 -08:00
Antoine Gersant
454d73c754 cargo update 2020-12-08 00:22:16 -08:00
Antoine Gersant
7477158891 Merge branch 'master' of https://github.com/agersant/polaris 2020-12-08 00:16:21 -08:00
Antoine Gersant
55a4f64b3a
Bundle SQLite (#109) 2020-12-08 00:14:16 -08:00
Antoine Gersant
eb917bb9d6 Bumped pbkdf2 2020-12-07 23:35:33 -08:00
Antoine Gersant
e40121c4d0 Bumped crossbeam channel version 2020-12-07 23:33:37 -08:00
Antoine Gersant
0e52047417 Fixed typo 2020-12-07 23:31:05 -08:00
Antoine Gersant
1ebc0d9f44 Bump base64 version 2020-12-07 23:30:51 -08:00
Antoine Gersant
b709a8cd64 Bump anyhow version 2020-12-07 23:30:00 -08:00
Antoine Gersant
5b412718dc Trim diesel features 2020-12-07 23:24:57 -08:00
Antoine Gersant
b678973ef0 Hand-picked subset of image features 2020-12-07 23:06:50 -08:00
Antoine Gersant
351f1b0768 Removed unused dependency 2020-12-07 22:51:36 -08:00
Antoine Gersant
eef75163ce Include toolchain file in release tarball 2020-12-07 22:29:34 -08:00
Antoine Gersant
33121bc0a3
Pin rust toolchain version (#108) 2020-12-07 22:08:37 -08:00
Antoine Gersant
2f67d280fa Removed now standard build-override (https://github.com/rust-lang/cargo/pull/8500) 2020-12-07 21:15:33 -08:00
Antoine Gersant
f03d12de3e Removed unused dependency 2020-12-07 20:10:10 -08:00
Antoine Gersant
b6c446fa02
Rewrote indexer (#107)
* Update index without rayon

* Use crossbeam channels

* Use a single thread for DB insertions

* Better use of rayon in clean()

* Index rewrite

* Parallelize traverser

* Don't swallow send error

* Use Drop trait to flush Inserter work

* Configurable number of traverser threads

* Use channels to manage the work queue instead of Mutex

* Removed unusable profiling feature
2020-12-07 20:07:10 -08:00
Antoine Gersant
8524c7d5fe More accurate teste for web client serving 2020-12-06 02:48:42 -08:00
Antoine Gersant
a3f7a306e5 Added todo 2020-12-05 21:19:12 -08:00
Antoine Gersant
7ea97b0abf More conservative workflow config 2020-12-05 20:38:01 -08:00
Antoine Gersant
8d2ed31fef Validate that session cookie looks somewhat encrypted 2020-12-05 16:53:46 -08:00
Antoine Gersant
0930ef45bb test_swagger_can_get_index_with_trailing_slash 2020-12-03 23:00:16 -08:00
Antoine Gersant
f80a42e666
Trailing slash tests (#105)
* Better test names

* Add tests for recent/random endpoints with trailing slash
2020-12-03 21:31:14 -08:00
Antoine Gersant
2eed57cc47 Added tests that auth cookie headers are not emitted in all requests 2020-12-03 01:33:32 -08:00
Antoine Gersant
e1934a8e92
Cleaned up startup code (#104) 2020-11-30 20:27:39 -08:00
Antoine Gersant
847c26ddfe
Service unit tests improvements (#103)
- Simpler API for TestService
- More granular tests
- Tests for authentication requirements
- Better error handling (and HTTP response codes) for various bad inputs
2020-11-30 01:26:55 -08:00
Antoine Gersant
1ffea255df Avoid unecessary copies 2020-11-26 19:30:42 -08:00
Antoine Gersant
875a52f1b2
Clarified uninstall instructions 2020-11-26 16:04:24 -08:00
Antoine Gersant
538b41a2b4
Use standard directories when running on Linux (#91)
* Use standard Linux directories for application data (https://en.wikipedia.org/wiki/Filesystem_Hierarchy_Standard)

* Use standard system directories

* Cleanup all Polaris files during uninstall

* Expose get_pid_directory to rest of the crate

* Add separate targets for install binary and data files, clean up makefile

* Use environment variables for directory locations during install process

* On Linux, read locations from environment variables at compile time

* Split static_directory in two (web and swagger directories)

* Follow recommendations from the Make manual

* Avoid redundant directories

* Added workflow to validate installer setup

* Added CLI options to locate log file, pid file and cache directory

* Fixed an issue where build command did not support the xdg/system switch

* Renamed log option to log-level

* Fixed an issue where xdg install would do a system build

* Use re-usable action to make linux release

* Avoid nested actions (see https://github.com/actions/runner/issues/646)

* Updated installation instructions

* Replaced deprecated use of set-env
2020-11-26 15:57:08 -08:00
Antoine Gersant
0927f3815e Autoformat 2020-11-25 18:03:02 -08:00
Antoine Gersant
bcebaf499e Test cleanup 2020-11-25 18:02:57 -08:00
Antoine Gersant
e0d1f396a8 Removed support for prefix_url 2020-11-25 17:49:18 -08:00
Antoine Gersant
1c5a536277 Merge branch 'master' of https://github.com/agersant/polaris 2020-11-25 16:54:51 -08:00
Tobias Schmitz
bff49c22ec
Embedded artwork support (#101)
* Embedded artwork support for mp4 and id3 tags

* Embedded artwork support for flac tags.

* small fixes

* use first embedded artwork for directory

* added artwork tests

* updated Cargo.lock

* use first embedded artwork for missing artworks
2020-11-25 15:46:09 -08:00
Tobias Schmitz
4534a84c05
update mp4ameta dependency (#100) 2020-11-21 16:04:05 -08:00
Antoine Gersant
d78011e6bc Cleaned settings tests 2020-09-24 22:39:51 -07:00
Antoine Gersant
23a144761e Deserialize preferences 2020-09-24 22:16:24 -07:00
Antoine Gersant
7e46c6cd5a Added preferences API test 2020-09-24 22:14:40 -07:00
Antoine Gersant
42522ffc78 Codecov badge 2020-09-24 02:57:03 -07:00
Antoine Gersant
209813f25c Codecov settings 2020-09-24 01:51:29 -07:00
Antoine Gersant
a7ef7b2bd0 Added support for APE files 2020-09-24 01:46:29 -07:00
Antoine Gersant
cf67e44d20 Added API test for search without query 2020-09-24 00:07:55 -07:00
Antoine Gersant
ca8f046142 Thumbnails code cleanup 2020-09-23 22:20:27 -07:00
Antoine Gersant
8c32d7351c Updated mp4ameta dependency 2020-09-21 19:28:17 -07:00
Antoine Gersant
341a03574b added test coverage workflow 2020-09-21 02:59:45 -07:00
Antoine Gersant
d9bdea8745 Updated Linux release script to include new location of test inputs 2020-09-19 17:12:26 -07:00
Antoine Gersant
ef8246ecfb Cleanup 2020-09-19 17:10:00 -07:00
Antoine Gersant
cd63564c03 Separate test inputs from outputs 2020-09-19 16:58:19 -07:00
Antoine Gersant
00a1ca18cf
Fixed typos 2020-09-11 23:52:15 -07:00
Antoine Gersant
b7415e6304
Cosmetic change 2020-08-27 02:30:18 -07:00
Antoine Gersant
d8f38e88f0 Fixed dead link 2020-08-27 02:01:19 -07:00
Antoine Gersant
dcfd01c7a3 Sidestep github pages 2020-08-27 02:00:22 -07:00
Antoine Gersant
c91f5815d6 Restored headers 2020-08-27 01:56:06 -07:00
Antoine Gersant
1fa8bbc0ca Merge branch 'master' of https://github.com/agersant/polaris 2020-08-27 01:53:42 -07:00
Antoine Gersant
7901cb43bf Removed redundant headings 2020-08-27 01:53:30 -07:00
Antoine Gersant
04757fc20d
Theme swap 2020-08-27 01:50:09 -07:00
Antoine Gersant
cf6f30345c Fixed github pages links 2020-08-27 01:42:52 -07:00
Antoine Gersant
8e55d31eb0
Cleaned up README (#93)
* Cleaned up README

* Removed setup screenshot
2020-08-27 01:39:50 -07:00
Antoine Gersant
e65d57e24a Bundle test files in Linux release tarball 2020-08-15 19:30:41 -07:00
Antoine Gersant
66bef4d006
Clarity tweaks 2020-08-10 20:48:02 -07:00
Antoine Gersant
7cbf27fce0
Merge pull request #88 from zaethan/opus_integration
add support for opus files
2020-08-07 15:24:01 -07:00
Yannik Böttcher
8de736e563 properly export and use the match macro 2020-08-07 15:03:36 +02:00
Yannik Böttcher
943174bafa move match macro into utils module 2020-08-07 14:41:07 +02:00
Yannik Böttcher
1ff845d48e make case insensitivity mandatory for the tests to pass 2020-08-07 12:54:07 +02:00
Yannik Böttcher
9ee9786c0a include opus sample in test suite 2020-08-07 12:51:32 +02:00
Yannik Böttcher
6c4b4d3e20 order dependencies alphabetically 2020-08-07 12:45:43 +02:00
Yannik Böttcher
b2152cecc3 use match_ignore_case macro 2020-08-07 12:44:36 +02:00
Yannik Böttcher
86a935fd79 update all instances of key.to_str 2020-08-06 21:09:41 +02:00
zaethan
70c7463f6f
Update src/index/metadata.rs
Co-authored-by: Laurențiu Nicola <lnicola@users.noreply.github.com>
2020-08-06 19:07:49 +00:00
zaethan
14a6466c2d
Update src/index/metadata.rs
Co-authored-by: Laurențiu Nicola <lnicola@users.noreply.github.com>
2020-08-06 19:07:21 +00:00
Yannik Böttcher
db97dbea46 use eq_ignore_ascii_case instead of to_uppercase 2020-08-06 19:32:52 +02:00
Yannik Böttcher
60e8f3ec46 remove redundant to_str 2020-08-06 18:17:55 +02:00
Yannik Böttcher
fee96e6b49 add support for opus files 2020-08-06 17:26:50 +02:00
Antoine Gersant
fe5ab5ba48 Tagging cleanup 2020-08-04 23:32:36 -07:00
Antoine Gersant
7db8894d65 Add CLI executable to 'complete' feature 2020-08-04 23:29:31 -07:00
Antoine Gersant
797e2e9526 Merge branch 'master' of https://github.com/agersant/polaris 2020-08-04 23:13:17 -07:00
Antoine Gersant
a2dc0ce37a Bumped version number 2020-08-04 23:13:06 -07:00
Antoine Gersant
b423b76e29 Added CLI executable to windows installer 2020-08-04 23:10:59 -07:00
Antoine Gersant
b0ca61ae2f Formatting 2020-08-04 23:05:09 -07:00
Antoine Gersant
2b7f098836
Merge pull request #87 from lnicola/bump-deps
Bump deps
2020-07-24 00:35:03 -07:00
Laurențiu Nicola
6472349523 Bump deps 2020-07-24 10:13:40 +03:00
Antoine Gersant
17976dc99f
Merge pull request #86 from lnicola/parallel-index
Fix build and populate index in parallel
2020-07-21 01:46:41 -07:00
Laurențiu Nicola
dbb5f79371 Use more threads when populating index 2020-07-21 06:54:20 +03:00
Laurențiu Nicola
8035856fb6 Bump pear and pear_codegen 2020-07-20 16:30:13 +03:00
Antoine Gersant
d19ccc7da2
Merge pull request #82 from rayrrr/master
Enable M4A format support
2020-06-21 20:09:59 -07:00
Ray
337a4020fe year one-liner for mp4 metadata 2020-06-20 22:06:40 -04:00
Ray
f0fc9e8fba bump mp4meta version for bugfix 2020-06-20 22:05:44 -04:00
Ray
7637037e3d add m4a support via mp4ameta crate 2020-06-16 08:06:40 -04:00
Ray
e9346e29ee Add unit test 2020-06-15 15:02:48 -04:00
Antoine Gersant
107e63caca Tentative fix for CI breakage 2020-06-13 19:24:47 -07:00
Ray
2797d5ed91 alphabetize mp4meta import 2020-06-13 21:18:54 -04:00
Ray
d8b1f0c002 Enable M4A format support
- Use https://github.com/Saecki/rust-mp4ameta for M4A metadata

Resolves https://github.com/agersant/polaris/issues/70
2020-06-13 20:58:43 -04:00
Antoine Gersant
eb2fd23281 Updated lock file 2020-06-01 00:02:23 -07:00
Antoine Gersant
eb0c141ebe Bumped version number 2020-05-31 19:23:06 -07:00
Antoine Gersant
31f9a3ecc5
Merge pull request #80 from agersant/admin
Do not let users remove their own admin rights
2020-05-31 19:05:43 -07:00
Antoine Gersant
6142697b4a Do not let users remove their own admin rights 2020-05-31 18:37:08 -07:00
Antoine Gersant
8ef6757acb
Merge pull request #79 from agersant/flatten-prefixes
Fixed a bug where flatten could return songs from adjacent directories
2020-05-31 15:20:27 -07:00
Antoine Gersant
d5c186579a Fixed a bug where flatten could return songs from adjacent directories 2020-05-31 14:43:29 -07:00
Antoine Gersant
99db3eda13 Updated transitive dependencies 2020-05-30 17:30:02 -07:00
Antoine Gersant
45f4369301 Bumped diesel dependency 2020-05-30 17:25:51 -07:00
Antoine Gersant
c3466ca248 Bumped simplelog dependency 2020-05-30 17:11:32 -07:00
Antoine Gersant
0c5bd28d56 Updated HTTP dependency 2020-05-30 17:07:25 -07:00
Antoine Gersant
21cf831f74 Updated image dependency 2020-05-30 17:05:41 -07:00
Antoine Gersant
7b12f8f294 More trivial dependency bumps 2020-05-30 16:59:59 -07:00
Antoine Gersant
29819b930a Trivial dependency updates 2020-05-30 16:56:20 -07:00
Antoine Gersant
371b0d0333 Updated cookie dependency to latest 2020-05-30 16:49:39 -07:00
Antoine Gersant
5e1aeb10fb Moved cookie to dev dependencies 2020-05-30 16:47:44 -07:00
Antoine Gersant
d7c66c3745 Automatically redirect /swagger to /swagger/ 2020-05-30 16:34:06 -07:00
Antoine Gersant
eb7c833de5 Bumped Rocket dependency to 0.4.5 2020-05-30 16:32:34 -07:00
Antoine Gersant
d005b86fb0 Lock file update 2020-05-30 15:54:38 -07:00
Antoine Gersant
2577015872 Bumped version number 2020-05-30 15:24:32 -07:00
Antoine Gersant
1af2ba49be Bumped version number 2020-04-16 20:49:53 -07:00
Antoine Gersant
d7eb66d529 Updated release guide 2020-04-16 20:49:36 -07:00
Antoine Gersant
1825056f26 Merge branch 'master' of https://github.com/agersant/polaris 2020-04-16 20:48:04 -07:00
Antoine Gersant
b02d4da979 Bumped version number 2020-04-16 19:38:48 -07:00
Antoine Gersant
f79b4615c4
Merge pull request #69 from wezm/support-non-linux-unix
Support UNIX platforms that aren't Linux
2020-02-22 18:38:51 -08:00
Wesley Moore
a05c838c5b
Support UNIX platforms that aren't Linux 2020-02-23 12:05:17 +11:00
Laurențiu Nicola
768ee1122c
Bump deps (#67) 2020-02-06 14:20:24 -08:00
Antoine Gersant
cea8906c3d Merge branch 'master' of https://github.com/agersant/polaris 2020-02-02 15:12:21 -08:00
Antoine Gersant
6e3f439d8a Made thumbnail padding optional 2020-02-02 15:11:43 -08:00
Laurențiu Nicola
7428891bde
Drop function_name dependency (#66) 2020-02-02 00:58:30 -08:00
Antoine Gersant
86d61dd964 Updated test config ignore 2020-02-01 20:38:33 -08:00
Antoine Gersant
b413125a46 Fixed powershell syntax 2020-02-01 19:59:04 -08:00
Antoine Gersant
f9f69cd55c Insert correct polaris version in windows installer 2020-02-01 19:42:20 -08:00
Antoine Gersant
7562bb306c Removed gitmodules 2020-02-01 19:32:29 -08:00
Antoine Gersant
68c7a0f14b Version bump 2020-02-01 19:30:37 -08:00
Antoine Gersant
4e76a11e7a Pull latest polaris web release when making a polaris release 2020-02-01 19:29:52 -08:00
Antoine Gersant
503eed8b62 Removed traces of polaris-web submodule 2020-02-01 19:28:31 -08:00
Antoine Gersant
be268a2004 Removing web submodule 2020-02-01 19:16:29 -08:00
Laurențiu Nicola
72fdad45db
Disable optimizations for some build-time crates (#65) 2020-02-01 15:05:48 -08:00
Antoine Gersant
186e3173cd Formatting 2020-01-31 19:16:55 -08:00
Antoine Gersant
312eb15a2b Use partial information from id3 tags that have encoding errors 2020-01-31 19:16:07 -08:00
Antoine Gersant
fc36bb4cee Fixed rare test fluke 2020-01-22 21:52:17 -08:00
Antoine Gersant
cdb1a5233e Updated dependencies 2020-01-22 21:45:11 -08:00
Antoine Gersant
23fc43cbf7 Update dependencies 2020-01-22 21:27:01 -08:00
Antoine Gersant
b7b7c6e737 Unpin nightly rustc version 2020-01-22 21:17:28 -08:00
Antoine Gersant
78c8ca8aa2 Unpin rustc nightly 2020-01-20 18:58:45 -08:00
Antoine Gersant
0c28f54f01 Updated list of useful options 2020-01-20 18:31:02 -08:00
Antoine Gersant
27d0a9e158 Added maintenance guide 2020-01-20 18:30:54 -08:00
Antoine Gersant
b2ee2fe701
Automate release process (#62)
Added Github Actions workflow to generate release
2020-01-20 18:21:47 -08:00
Antoine Gersant
aec941b97f
Merge pull request #61 from agersant/index
Bump to latest mp3-duration
2020-01-19 21:54:12 -08:00
Antoine Gersant
3a0fda972b Bump to latest mp3-duration 2020-01-19 18:17:49 -08:00
Antoine Gersant
e2c9e64bf7
Merge pull request #60 from agersant/index
More readable profile markers
2020-01-19 14:50:15 -08:00
Antoine Gersant
645d5163f3 More readable profile markers 2020-01-19 14:28:04 -08:00
Antoine Gersant
36260dcdce
Merge pull request #59 from agersant/index
Index optimizations
2020-01-19 01:21:37 -08:00
Antoine Gersant
028633d0e6 Re-factored duplicated code 2020-01-19 01:01:43 -08:00
Antoine Gersant
2b30307488 Added more profiling markers 2020-01-19 00:44:24 -08:00
Antoine Gersant
f71a8320e9 Don't return from populate() while still writing to database 2020-01-18 23:38:18 -08:00
Antoine Gersant
d1bb60a1c7 Moved metadata module under index 2020-01-18 23:21:45 -08:00
Antoine Gersant
f6b9e67d4e Error handling for index insertions 2020-01-18 22:20:59 -08:00
Antoine Gersant
b8b3c80be9 Don't emit errors on critical path 2020-01-18 22:20:17 -08:00
Antoine Gersant
1764f3da4d Moved database insertions to separate threads from the file crawl 2020-01-18 21:57:44 -08:00
Antoine Gersant
18bc9594a4 Local variable rename 2020-01-18 19:41:07 -08:00
Antoine Gersant
b6d985859c Renamed IndexBuilder 2020-01-18 19:40:46 -08:00
Antoine Gersant
25b36be073 More accurate index duration display 2020-01-18 17:41:57 -08:00
Antoine Gersant
f9a6d6b6d4 Parallelize work during the clean step 2020-01-18 17:39:13 -08:00
Antoine Gersant
b97ace68ea
Merge pull request #58 from agersant/index
Index refactor
2020-01-18 17:38:59 -08:00
Antoine Gersant
e53b9f5867 Cleaned index API 2020-01-18 17:07:56 -08:00
Antoine Gersant
b1e4be2f8f Split index into submodules 2020-01-18 15:37:43 -08:00
Antoine Gersant
f12d0809d4
Merge pull request #56 from agersant/split-backends
Decouple rocket usage from the rest of the code, database improvements
2020-01-18 15:20:19 -08:00
Antoine Gersant
2efc0df04e Don't build without a backend 2020-01-18 14:59:19 -08:00
Antoine Gersant
ee71df9d0b Lock CI rustc to working nightly (https://github.com/rust-lang/rust/issues/68264) 2020-01-18 14:48:47 -08:00
Antoine Gersant
0e6be32a8c Removed unused feature 2020-01-18 14:37:35 -08:00
Antoine Gersant
f9ebb432b2 Removed redundant derefs 2020-01-18 14:31:10 -08:00
Antoine Gersant
50421e91d6 Log errors in index self_trigger 2020-01-18 14:25:10 -08:00
Antoine Gersant
e64435efa5 Database sanity settings 2020-01-18 14:19:44 -08:00
Antoine Gersant
95f6c62531 Removed redundant transactions (59f59e3ccd) 2020-01-18 13:23:22 -08:00
Antoine Gersant
a4991a620e Removed outdated TODO 2020-01-17 22:08:36 -08:00
Antoine Gersant
9df21737fa Validate auth cookies 2020-01-17 22:02:17 -08:00
Antoine Gersant
fa178b92be Validate partial content support 2020-01-17 21:51:28 -08:00
Antoine Gersant
5ccc006515 Cleaner TestService API 2020-01-17 21:14:25 -08:00
Antoine Gersant
9f4f6b4337 Added TODO 2020-01-17 02:49:00 -08:00
Antoine Gersant
7f7da0050b First half of serve request 2020-01-17 02:37:33 -08:00
Antoine Gersant
3c45150651 Use standard http responses as test service outputs 2020-01-17 01:44:12 -08:00
Antoine Gersant
ed66200689 Decoupled most tests from rocket 2020-01-17 01:17:23 -08:00
Antoine Gersant
9ed0526075 Remobed blanket import 2020-01-16 23:40:10 -08:00
Antoine Gersant
76118756b9 Constant rename 2020-01-16 23:38:46 -08:00
Antoine Gersant
acffa576e2 Removed more actix 2020-01-16 23:38:10 -08:00
Antoine Gersant
0dba7e2e4f Removed actix 2020-01-16 23:37:36 -08:00
Antoine Gersant
6f642c34e2 Non-optional auth secret 2020-01-16 01:58:51 -08:00
Antoine Gersant
f7efeef653 Added doc reference 2020-01-16 01:22:13 -08:00
Antoine Gersant
4194509f45 Use tokio runtime for tests and actix version 2020-01-16 01:03:39 -08:00
Antoine Gersant
289827d6a3 Explicitely start async executor 2020-01-16 00:41:26 -08:00
Antoine Gersant
1c84cde158 Fixed compile error 2020-01-16 00:35:30 -08:00
Antoine Gersant
61c221a2d2 integration test sharing between backends 2020-01-16 00:34:47 -08:00
Antoine Gersant
a83e1af69b automatic db file names 2020-01-15 22:05:41 -08:00
Antoine Gersant
9e48dc408e DB interactions for actix version 2020-01-15 21:58:37 -08:00
Antoine Gersant
052dc88f14 Implemented version endpoint 2020-01-14 22:40:36 -08:00
Antoine Gersant
e248f3b983 Serve web and swagger static files 2020-01-14 01:33:14 -08:00
Antoine Gersant
0a4d05cdc8 Actix hello world 2020-01-13 23:56:46 -08:00
Antoine Gersant
ba901c7873 Optional rocket feature 2020-01-13 23:12:33 -08:00
Antoine Gersant
60f2e330a4 Moved rocket stuff into its own module 2020-01-13 23:04:05 -08:00
Antoine Gersant
fd8a6c64f5 Made one of the two test albums quiet 2020-01-13 00:26:00 -08:00
Antoine Gersant
448198acb6 Prevent deleted users from using the service using their old session 2020-01-12 21:49:22 -08:00
Antoine Gersant
c49fdbab37 Latest polaris-web 2020-01-12 21:15:17 -08:00
Antoine Gersant
18367198a7 Latest polaris-web 2020-01-12 17:16:53 -08:00
Antoine Gersant
eca4f68834 Replaced /api/auth response with cookies 2020-01-12 17:01:30 -08:00
Antoine Gersant
360d864148 Enforce normal logging level 2020-01-12 14:00:50 -08:00
Antoine Gersant
811a35ab4c Latest polaris-web 2020-01-11 17:09:11 -08:00
Antoine Gersant
26596f16bd Latest polaris-web 2020-01-11 02:35:56 -08:00
Antoine Gersant
54a4f9d394 Pulled latest polaris-web 2020-01-11 02:04:43 -08:00
Antoine Gersant
59366c6b03 Added preference fields for web theme 2020-01-11 01:58:22 -08:00
Antoine Gersant
2de5b34a48 Auto-format 2020-01-11 01:56:44 -08:00
Antoine Gersant
0a0a6ce955 Bumped polaris-web 2020-01-07 23:46:22 -08:00
Antoine Gersant
b83f16e6f5 Dont return HTTP errors when LastFM no-ops 2020-01-07 23:28:26 -08:00
Antoine Gersant
d823dce7db Fixed typo 2020-01-04 17:07:35 -08:00
Antoine Gersant
70388095a5 More error fiddling 2020-01-04 17:03:07 -08:00
Antoine Gersant
ddae5cc24f Fixed straggler error 2020-01-04 16:52:36 -08:00
Antoine Gersant
f41f45f600 Simplified error boilerplate 2020-01-04 16:42:28 -08:00
Antoine Gersant
0b0bfac8fb Name cleanup 2020-01-04 09:38:19 -08:00
Antoine Gersant
cbb7e7b97c Removed old diesel workaround 2020-01-04 09:34:09 -08:00
Antoine Gersant
28bb240ae0 Do not wipe users, mount points and ddns config before applying config file 2020-01-04 09:33:36 -08:00
Antoine Gersant
bca8f4ced8 Ignore blank users in config 2020-01-04 09:00:56 -08:00
Antoine Gersant
dbd87704b4 Bumped version number 2020-01-01 08:04:13 -08:00
Antoine Gersant
13a3a3dcdd Latest polaris web 2020-01-01 07:23:45 -08:00
Antoine Gersant
1a768c4c26 cargo update 2019-12-17 14:40:27 -08:00
Antoine Gersant
0d27d357dd Fixed linter warning 2019-12-17 13:30:36 -08:00
Antoine Gersant
77a1e86f84 Fix CI breakage 2019-12-08 16:40:11 -08:00
Antoine Gersant
299aea7d78 Tentative fix for performance issues (https://github.com/SergioBenitez/Rocket/issues/928) 2019-12-08 16:28:52 -08:00
Antoine Gersant
6a2fbed133 Bumped dependencies 2019-11-28 13:54:17 -08:00
Antoine Gersant
74c4a9144b
Merge pull request #49 from lnicola/bump-deps
Bump deps
2019-10-21 23:00:23 -07:00
Laurențiu Nicola
dc903ea29e Bump metaflac to 0.2 2019-10-22 08:31:34 +03:00
Laurențiu Nicola
890ba87fee Mention the libsqlite3-dev requirement in the README 2019-10-22 08:31:34 +03:00
Laurențiu Nicola
44be1966e5 Install libsqlite3-dev on CI 2019-10-22 08:31:34 +03:00
Laurențiu Nicola
cb72ab1ac7 Enable CI on PRs 2019-10-22 08:31:34 +03:00
Laurențiu Nicola
c2115d878b Bump deps 2019-10-22 08:31:34 +03:00
Antoine Gersant
e91bff46b7
Merge pull request #48 from lnicola/sd-notify
Use sd-notify instead of libsystemd
2019-10-01 17:58:42 -07:00
Laurențiu Nicola
ec8742b8eb Use sd-notify instead of libsystemd 2019-10-01 19:22:25 +03:00
Antoine Gersant
064056fc0c Formatting 2019-09-29 00:34:45 -07:00
Antoine Gersant
165ed277b6 Removed travis and appveyor builds 2019-09-29 00:33:23 -07:00
Antoine Gersant
9af3669cf2
Fixed actions URL 2019-09-29 00:09:44 -07:00
Antoine Gersant
fa5233c0e4
Fixed typo 2019-09-29 00:09:24 -07:00
Antoine Gersant
c6b9847e69
Added Github actions badge 2019-09-29 00:08:53 -07:00
Antoine Gersant
5666896275
Renamed workflow 2019-09-29 00:07:56 -07:00
Antoine Gersant
e8fc576052
Don't use third-party action to clone submodules 2019-09-28 23:42:16 -07:00
Antoine Gersant
37edf64e17
Build matrix for OS and features 2019-09-28 23:28:27 -07:00
Antoine Gersant
2632e083f6 Checkout submodules before building 2019-09-28 23:05:35 -07:00
Antoine Gersant
98cd98dd4d Install nightly toolchain 2019-09-28 22:41:56 -07:00
Antoine Gersant
fa0dd54ce6 Fixed yaml syntax 2019-09-28 22:38:12 -07:00
Antoine Gersant
0d7296b024 Github actions hello world 2019-09-28 22:34:03 -07:00
Antoine Gersant
6416943dd0 Updated flamer 2019-09-28 17:02:42 -07:00
Antoine Gersant
22ae33f029 Updated simplelogger 2019-09-28 16:54:37 -07:00
Antoine Gersant
b89ae5a133 Removed unused dependency 2019-09-28 16:42:09 -07:00
Antoine Gersant
8dbaad98f5 Ditched ring dependency, simplified password hashing 2019-09-28 16:38:43 -07:00
Antoine Gersant
fbb5d7d526 Cargo update 2019-09-28 15:39:33 -07:00
Antoine Gersant
6899cf0a81 Pulled latest polaris-web 2019-09-28 14:57:22 -07:00
Antoine Gersant
90fe1629eb Cleaned up static file serving 2019-09-28 00:48:04 -07:00
Antoine Gersant
01af2ee742 Formatting 2019-09-28 00:14:48 -07:00
Antoine Gersant
f2e6a27c0e Wrap content of unix releases in a top-level folder 2019-09-27 22:42:20 -07:00
Antoine Gersant
f070c743cb Pulled latest polaris web 2019-09-27 22:18:40 -07:00
Antoine Gersant
8c5c43e3a9 Pulled latest polaris-web 2019-09-04 20:58:10 -07:00
Antoine Gersant
851d31eef9 Fixed compile errors 2019-09-02 22:17:26 -07:00
Antoine Gersant
d348f15032
Merge pull request #47 from agersant/profile-index
Profile index
2019-09-02 15:19:01 -07:00
Antoine Gersant
9da2c85c14 Use fork of metaflac with buffered reads 2019-09-02 15:15:16 -07:00
Antoine Gersant
45aa6ff0be Ignore flame graph 2019-09-02 15:15:03 -07:00
Antoine Gersant
82ea7983aa Instrument index duration 2019-09-02 14:28:25 -07:00
Antoine Gersant
d2cc868a0a Autoformat 2019-09-02 13:12:48 -07:00
Antoine Gersant
7cf5f7db09 Updated to task system version 2.0 2019-08-31 13:43:46 -07:00
Antoine Gersant
db105966f4 Updated rustfm-scrobble dependency 2019-08-28 22:41:47 -07:00
Antoine Gersant
9d1ad2dc60 Fixed warning when compiling on Windows 2019-08-28 22:41:32 -07:00
Antoine Gersant
d5844d95b9
Merge pull request #41 from lnicola/auth-secret
Provide the secret key to Rocket
2019-08-07 22:20:51 -07:00
Laurențiu Nicola
1bffdf0861 Provide secret key to Rocket 2019-08-08 08:06:29 +03:00
Antoine Gersant
be9b4203f8
Merge pull request #40 from lnicola/sd-notify
Add support for notifying systemd of startup
2019-08-07 20:22:45 -07:00
Laurențiu Nicola
606eae563d Add support for notifying systemd of startup 2019-08-07 14:37:27 +03:00
Antoine Gersant
4d13c96dcc
Merge pull request #38 from lnicola/cleanups
Bump dependencies
2019-08-07 00:21:50 -07:00
Laurențiu Nicola
b1fec94e82 Fix Windows builds 2019-08-07 08:50:24 +03:00
Laurențiu Nicola
c4d7fc62b5 Switch back to app_dirs 1.1 and fix typo 2019-08-07 08:21:28 +03:00
Laurențiu Nicola
6d963c059f Use 2018-style macro imports 2019-08-06 12:51:15 +03:00
Laurențiu Nicola
57ded63cb8 2018 edition idioms 2019-08-06 12:31:30 +03:00
Laurențiu Nicola
b70c8ff622 More dependency updates 2019-08-06 12:25:47 +03:00
Laurențiu Nicola
c94db165d0 cargo update 2019-08-06 10:44:04 +03:00
Antoine Gersant
3d704908aa
Merge pull request #33 from Darksecond/docker-logging-fix
Fix logging in docker.
2019-05-28 18:12:41 -07:00
Tim Peters
b878ac555e Implement suggestions 2019-05-28 19:15:13 +02:00
Tim Peters
0224a5dee3 Fix logging in docker.
Docker by default doesn't have a TTY. The TermLogger doesn't work in that case.
If the TermLogger can't be initiated, try the SimpleLogger.
See https://github.com/Drakulix/simplelog.rs/issues/30 for a similar case.
2019-05-27 21:10:11 +02:00
Antoine Gersant
f3e48b8507
Update README.md 2019-04-09 23:51:47 -07:00
Antoine Gersant
c3a2ecd7fc Fixed syntax error 2019-04-09 23:35:14 -07:00
Antoine Gersant
244d0e9ed5 Fixed unaccurate redirect to index.html 2019-04-09 00:39:11 -07:00
Antoine Gersant
e5ae0a5e0a Merge remote-tracking branch 'origin/master' 2019-04-09 00:28:37 -07:00
Antoine Gersant
d8305ddd46 Clone submodules for CI 2019-04-09 00:28:30 -07:00
Antoine Gersant
21ada701e8 Removed unused import 2019-04-09 00:28:23 -07:00
Antoine Gersant
af3b9ab92c
Update README.md 2019-04-08 22:46:52 -07:00
Antoine Gersant
26a894c0b1 Properly redirect to index.html so relative src properties in html work 2019-04-08 22:44:53 -07:00
Antoine Gersant
c3b75e6058 revert previous change 2019-04-08 19:45:41 -07:00
Antoine Gersant
6e1ea501a2 Merge remote-tracking branch 'origin/master' 2019-04-08 19:40:56 -07:00
Antoine Gersant
eb605ab882 Testing root URL 2019-04-08 19:40:34 -07:00
Antoine Gersant
a3725d1110
Merge pull request #30 from agersant/bundle-swagger
Bundle swagger
2019-04-07 23:42:46 -07:00
Antoine Gersant
bf67ccfda8 Ship swagger files with installers 2019-04-07 23:41:24 -07:00
Antoine Gersant
69c8c93277 Fixed warnings 2019-04-07 23:28:31 -07:00
Antoine Gersant
c8655a2447 Serve swagger files under /swagger 2019-04-07 23:24:15 -07:00
Antoine Gersant
b190385dbd Allow tests to compile 2019-04-07 19:31:36 -07:00
Antoine Gersant
d59b80b2b3
Update README.md 2019-04-07 19:23:02 -07:00
Antoine Gersant
95d4942522 relative paths for swagger dependencies 2019-04-07 19:01:02 -07:00
Antoine Gersant
32a808b962 Ship swagger files and mount them on /swagger 2019-04-07 18:54:53 -07:00
Antoine Gersant
a4578194f0 Bumped version number 2019-04-07 18:53:43 -07:00
Antoine Gersant
f2509cf02f Updated file paths 2019-04-07 18:10:34 -07:00
Antoine Gersant
9d03f95d5f Updated links to swagger in docs 2019-04-07 18:07:14 -07:00
Antoine Gersant
fce30f9c58 Renamed docs\api to docs\swagger 2019-04-07 18:06:44 -07:00
Antoine Gersant
c9683401f2 Added server prefix 2019-04-07 18:06:07 -07:00
Antoine Gersant
44dd550e46 Added link to documentation 2019-04-07 17:52:38 -07:00
Antoine Gersant
2d9ecb18b4 Formatting 2019-04-07 17:50:31 -07:00
Antoine Gersant
0fd436db2a Formatting 2019-04-07 17:49:28 -07:00
Antoine Gersant
bc39111a4b Updated page title 2019-04-07 17:48:07 -07:00
Antoine Gersant
1f1bf80176 Added title and logo to docs page 2019-04-07 17:47:27 -07:00
Antoine Gersant
cec69f5834 Set theme jekyll-theme-minimal 2019-04-07 17:43:23 -07:00
Antoine Gersant
c12f4f35e3 Changed documentation landing page format 2019-04-07 17:40:48 -07:00
Antoine Gersant
e5436bcb7b Added documentation index 2019-04-07 17:38:31 -07:00
Antoine Gersant
6bcac3338f
Merge pull request #29 from agersant/docs
Added API docs
2019-04-07 17:34:19 -07:00
Antoine Gersant
4f5e58dce7 Documented last.fm endpoints 2019-04-07 17:32:55 -07:00
Antoine Gersant
96a6504aa3 Fixed linked to API spec 2019-04-07 17:07:12 -07:00
Antoine Gersant
caf275330e Added playlist endpoints 2019-04-07 17:04:33 -07:00
Antoine Gersant
56ded64da6 Added /serve endpoint 2019-04-07 16:50:58 -07:00
Antoine Gersant
e4ddcfe831 Added search endpoint 2019-04-07 16:45:02 -07:00
Antoine Gersant
f94276513b Removed swagger example data 2019-04-07 16:27:59 -07:00
Antoine Gersant
d7d53fb264 Documented flatten, random and recent endpoints 2019-04-07 16:25:49 -07:00
Antoine Gersant
35bb77e7a3 Examples in schema objects 2019-04-07 16:22:40 -07:00
Antoine Gersant
357d7b27e8 Added preferences and browse endpoints 2019-04-07 16:08:19 -07:00
Antoine Gersant
50cc373b69 Documented auth endpoint 2019-04-07 14:46:01 -07:00
Antoine Gersant
2be0fc90c0 Documented a few endpoints 2019-04-06 18:59:53 -07:00
Antoine Gersant
0c7b982ac8 Moved API docs to a subdirectory 2019-04-06 18:14:55 -07:00
Antoine Gersant
7cfeee698b Merge branch 'master' into docs 2019-04-06 18:13:20 -07:00
Antoine Gersant
5ab7c38aa4 Pulled latest polaris web 2019-04-06 18:08:36 -07:00
Antoine Gersant
df5651b5fd Pulled latest polaris-web 2019-04-06 17:46:37 -07:00
Antoine Gersant
f1048d1ee2 Added placeholder API docs 2019-03-03 22:13:38 -08:00
Antoine Gersant
4763d13484 Updated install instructions to use nightly channel of rust 2019-02-28 20:28:42 -08:00
Antoine Gersant
6d41f3c90d Updated to latest polaris web 2019-02-27 22:17:48 -08:00
Antoine Gersant
94602317ad Merge branch 'rocket' 2019-02-27 22:13:10 -08:00
Antoine Gersant
55952a7d28 Fixed partial content responses 2019-02-19 23:47:31 -08:00
Antoine Gersant
b24e6e077d Fixed expected HTTP response 2019-02-19 23:45:31 -08:00
Antoine Gersant
7aabd6b15e Added tests for serve endpoints 2019-02-19 23:42:55 -08:00
Antoine Gersant
727f830988 Added logging around HTTP range handling 2019-02-19 21:07:25 -08:00
Antoine Gersant
f77be4a1e1 Removed unused errors 2019-02-19 19:29:35 -08:00
Antoine Gersant
e0d3b3034a Fixed warning outside of building tests 2019-02-19 18:02:22 -08:00
Antoine Gersant
3009636f88 Updated dependencies 2019-02-19 18:02:09 -08:00
Antoine Gersant
58482bf512 Remove lastfm test placeholder 2018-11-12 22:32:49 -08:00
Antoine Gersant
b781071b4e Got rid of raw json strings in unit tests 2018-11-12 22:31:43 -08:00
Antoine Gersant
64e86f5079 Formatting 2018-11-12 22:10:48 -08:00
Antoine Gersant
0185634071 Added unit tests for playlists API 2018-11-12 22:10:32 -08:00
Antoine Gersant
43538853a4 Added unit test for api/flatten 2018-11-12 21:53:40 -08:00
Antoine Gersant
79992b7120 Added unit test for api/browse 2018-11-12 21:52:04 -08:00
Antoine Gersant
cc17c6db97 Added unit test for /api/search 2018-11-12 20:20:56 -08:00
Antoine Gersant
3f5a84ba8a Added test for api/trigger_index 2018-11-11 19:27:30 -08:00
Antoine Gersant
552e4fda9d Added unit test for api/recent 2018-11-11 19:20:56 -08:00
Antoine Gersant
8a4c327fa8 Added unit test for api/random 2018-11-11 19:20:06 -08:00
Antoine Gersant
be1479a40c Test that settings endpoint requires auth 2018-11-11 19:12:16 -08:00
Antoine Gersant
f5d46a0aa3 Added unit test for api/auth 2018-11-11 19:10:51 -08:00
Antoine Gersant
c18134058b Added unit test for api/settings 2018-11-11 18:51:13 -08:00
Antoine Gersant
264c38b7fd API tests skeleton 2018-11-11 18:11:05 -08:00
Antoine Gersant
132a8c3dbf Added unit test for api/initial_setup 2018-11-11 18:03:14 -08:00
Antoine Gersant
5f91da915e Added unit test for api/version 2018-11-11 17:49:38 -08:00
Antoine Gersant
a3968e9cb7 Moved server initialization outside of main for easier testing 2018-11-11 12:11:18 -08:00
Antoine Gersant
ed81d24b7b Formatting 2018-11-11 12:10:09 -08:00
Antoine Gersant
c1c70d4fbf Updated dependencies 2018-11-11 11:33:18 -08:00
Antoine Gersant
4e68293450 Simplified error syntax 2018-11-11 11:19:28 -08:00
Antoine Gersant
2cbc1645ee Pulled latest polaris-web 2018-11-11 11:16:06 -08:00
Antoine Gersant
9dbf8526dc Renamed rocket_api to api 2018-11-11 11:11:24 -08:00
Antoine Gersant
c6d5f7b7ee Fixed a bug where auth cookie was interfering with polaris-web cookie 2018-11-11 00:17:38 -08:00
Antoine Gersant
0cd82a338d Merge branch 'master' into rocket 2018-11-10 23:44:20 -08:00
Antoine Gersant
72c40d5fcd Removed unused errors 2018-11-10 23:37:17 -08:00
Antoine Gersant
e0b3ea4b98 Fixed a bug where credentials were not being validated 2018-11-10 23:34:31 -08:00
Antoine Gersant
f0b360e3d0 Use rocket branch of polaris-web 2018-11-10 19:39:34 -08:00
Antoine Gersant
37a4521e15 Updated rustfm-scrobble to version with recent reqwest dep and nicer API 2018-11-03 17:50:42 -07:00
Antoine Gersant
03a248a020 Undo edition changes, doest not compile on stable 2018-11-02 19:49:33 -07:00
Antoine Gersant
4b4ab8145c Rust 2018 2018-10-30 23:49:30 -07:00
Antoine Gersant
fdf40f2683 Removed outdated test 2018-10-30 23:47:29 -07:00
Antoine Gersant
59b0dda760 Switch to Rust 2018 2018-10-30 23:37:32 -07:00
Antoine Gersant
cba28e8e2c Run server on expected port (5050 or custom) 2018-10-28 19:09:32 -07:00
Antoine Gersant
ed2ae20951 Allow auth via HTTP authorization header 2018-10-28 19:04:21 -07:00
Antoine Gersant
7e11b651ed Fixed a bug where DB wasn't accessed with the correct type 2018-10-28 17:51:46 -07:00
Antoine Gersant
af17c821df Bumped version number 2018-10-28 17:43:54 -07:00
Antoine Gersant
75449aa0e0 Dont run server on main thread 2018-10-28 17:42:08 -07:00
Antoine Gersant
0f2556516a Removed more unused dependencies 2018-10-28 17:33:41 -07:00
Antoine Gersant
a8a98fdd22 Removed iron 2018-10-28 17:27:47 -07:00
Antoine Gersant
5786c99b3a Fixed test compilation error 2018-10-28 17:07:11 -07:00
Antoine Gersant
c25dc8155f Implemented last fm endpoints 2018-10-28 17:05:14 -07:00
Antoine Gersant
4af2c0f09e Formatting 2018-10-28 17:05:04 -07:00
Antoine Gersant
9f84b352b9 Formatting 2018-10-28 17:03:25 -07:00
Antoine Gersant
8354eeff1d Implemented playlist endpoints 2018-10-28 16:13:00 -07:00
Antoine Gersant
2ee33e7615 Added preferences endpoint 2018-10-28 15:52:09 -07:00
Antoine Gersant
ea299312d0 Fixed a bug where authentication yielded incorrect usernames 2018-10-28 15:46:35 -07:00
Antoine Gersant
35514182aa Bumped version for breaking changes on string encoding and json input 2018-10-28 15:14:51 -07:00
Antoine Gersant
84507e4d86 Fixed a bug where names with square brackets tripped routing 2018-10-28 15:13:34 -07:00
Antoine Gersant
5d0ead96e4 Removed TODO 2018-10-28 14:03:28 -07:00
Antoine Gersant
777cca245e Added support for range header when serving files 2018-10-28 14:00:25 -07:00
Antoine Gersant
89e72d00ae Partial implementation of the serve endpoint 2018-10-28 12:21:46 -07:00
Antoine Gersant
cabc72116a Added search endpoints 2018-10-28 11:03:21 -07:00
Antoine Gersant
91919a6628 Added endpoints for recent and random albums 2018-10-28 10:56:04 -07:00
Antoine Gersant
e7a5fcf01b Autoformat 2018-10-28 10:53:44 -07:00
Antoine Gersant
5f28c44506 Added endpoints for browse and flatten 2018-10-28 10:53:11 -07:00
Antoine Gersant
bc9a3a461a Added auth endpoint 2018-10-28 10:33:54 -07:00
Antoine Gersant
ed949b9678 Require admin rights to trigger reindex 2018-10-28 10:22:28 -07:00
Antoine Gersant
36e6016e67 Added endpoint for trigger reindex 2018-10-28 10:19:07 -07:00
Antoine Gersant
00968f0a4f Only expect nightly to build on CI 2018-10-27 17:27:47 -07:00
Antoine Gersant
769c12833a Implemented endpoint to write settings 2018-10-27 17:26:19 -07:00
Antoine Gersant
0ebcc8a280 Autoformat 2018-10-27 17:00:49 -07:00
Antoine Gersant
e8d1baa652 Implemented endpoints to load welcome page 2018-10-27 16:59:05 -07:00
Antoine Gersant
06c694ab4a Moved rocket API to a separate file 2018-10-27 15:24:52 -07:00
Antoine Gersant
33ae1c07b2 Ported version endpoint to rocket 2018-10-27 15:19:31 -07:00
Antoine Gersant
43894d71f7 Merge branch 'master' into rocket 2018-10-27 15:05:07 -07:00
Antoine Gersant
5de59d8687 Muted diesel warning 2018-10-27 15:02:15 -07:00
Antoine Gersant
64afc8b6f5 Mount static files 2018-10-27 14:33:28 -07:00
Antoine Gersant
b528b77f77 Rocket Hello World 2018-10-27 14:03:56 -07:00
Antoine Gersant
054ff10329 Updated dependencies 2018-10-27 10:56:04 -07:00
Antoine Gersant
ccaeaca9d7 Formatting 2018-10-27 10:43:35 -07:00
Antoine Gersant
b72517995d Clippy suggestions 2018-10-27 10:43:18 -07:00
Antoine Gersant
ee432a106d Added migrations folder to Linux release tarballs 2018-10-26 20:01:41 -07:00
Antoine Gersant
ffc30afbd8 Pulled latest polaris web 2018-10-24 15:59:59 -07:00
Antoine Gersant
495d6a0b79 Fixed a bug where exe and static files were in the wrong directory 2018-10-24 15:58:34 -07:00
Antoine Gersant
94d39be08b Fixed init error on Windows 2018-10-24 15:58:14 -07:00
Antoine Gersant
7814984e02 Log static web files location 2018-10-24 15:26:01 -07:00
Antoine Gersant
1f9bb320df Bumped version number 2018-10-24 14:45:14 -07:00
Antoine Gersant
d894ae2783 Cleanup 2018-10-08 21:28:05 -07:00
Antoine Gersant
120ee72e1d Save thumbnails as jpeg 2018-10-07 18:52:58 -07:00
Antoine Gersant
f905bc4f73 Added API endpoint to unlink last.fm account 2018-10-07 18:06:29 -07:00
Antoine Gersant
a84f13214d Pulled latest polaris-web 2018-10-07 14:00:59 -07:00
Antoine Gersant
3fba584671 Last FM auth API now returns HTML content supplied by caller 2018-10-07 13:55:49 -07:00
Antoine Gersant
cd40ce374e Fixed build breakage on Linux 2018-10-06 21:50:51 -07:00
Antoine Gersant
e1bd70e71a Updated dependencies 2018-10-06 21:32:55 -07:00
Antoine Gersant
0d14530c8b Updated dependencies 2018-10-06 21:30:29 -07:00
Antoine Gersant
4aa2386a0b Updated dependency 2018-10-06 18:15:56 -07:00
Antoine Gersant
fea0b4ef25 Updated diesel dependency 2018-10-06 18:13:44 -07:00
Antoine Gersant
4048ff818c Updated dependency 2018-10-06 17:30:01 -07:00
Antoine Gersant
0d05db9b46 Updated dependencies 2018-10-06 17:24:06 -07:00
Antoine Gersant
b71a1f6768 Updated dependencies 2018-10-06 17:03:06 -07:00
Antoine Gersant
15a10f0ba8 Rustfmt 2018-10-06 16:30:21 -07:00
Antoine Gersant
ec69a3568f Fixed clippy lints 2018-10-06 16:29:46 -07:00
Antoine Gersant
0297b351bf Rustfmt 2018-10-06 15:46:30 -07:00
Antoine Gersant
42140d3137 Merge branch 'master' of https://github.com/agersant/polaris 2018-10-06 14:46:16 -07:00
Antoine Gersant
dde1403dbc Pulled latest polaris web 2018-10-06 14:46:10 -07:00
Antoine Gersant
2092813258 Changed LastFM auth flow from application flow to web-flow 2018-10-06 14:41:25 -07:00
Antoine Gersant
c073e827c7
Added link to polaris-android 2018-04-06 01:15:37 -07:00
Antoine Gersant
5565469b87
Fixed title hierarchy 2018-04-06 01:12:50 -07:00
Antoine Gersant
9725efe331
Added link to docker-polaris 2018-04-06 01:11:45 -07:00
Antoine Gersant
c52ec3d30c Added support for lastfm scrobbling
- Added user preferences
- Added time and location to log entries
2018-03-06 21:36:10 -08:00
Antoine Gersant
42d1bfb882 Autoformat 2018-01-06 14:37:26 -08:00
Antoine Gersant
c8810c8683 Less verbose module references 2018-01-06 14:35:55 -08:00
Antoine Gersant
da01a3f8c2 Updated winapi dependency 2018-01-06 14:31:48 -08:00
Antoine Gersant
718151e3cb Pulled latest polaris-web 2018-01-06 09:21:45 -08:00
Antoine Gersant
c999977a03 Merge remote-tracking branch 'origin/master' 2017-12-12 21:24:17 -08:00
Antoine Gersant
063726f92f Autoformat 2017-12-12 21:23:39 -08:00
Antoine Gersant
a9bdb7bb2d Updated deprecated diesel construct 2017-12-12 21:23:29 -08:00
Antoine Gersant
e6769aa8f6
Merge pull request #20 from jxs/master
update rust-ape dependency
2017-12-12 20:01:45 -08:00
João Oliveira
02ba4e34a2 update rust-ape dependency 2017-12-12 00:27:32 +00:00
Antoine Gersant
d310c1fab0 Updated diesel dependency 2017-12-10 20:41:52 -08:00
Antoine Gersant
abc401ce06 Fixed a bug where mp3 file durations were not read 2017-12-10 19:41:12 -08:00
Antoine Gersant
a3defb8700 Merge remote-tracking branch 'origin/master' 2017-12-10 19:05:59 -08:00
Antoine Gersant
52835c4b97 Index now measures duration of mp3 files 2017-12-10 19:05:19 -08:00
Antoine Gersant
7571d13e16
Merge pull request #19 from jxs/update-dependencies
update id3 and metaflac dependencies, closes #16
2017-12-09 19:34:40 -08:00
João Oliveira
b28eaf72c8 update id3 and metaflac dependencies, closes #16 2017-12-10 02:30:51 +00:00
Antoine Gersant
2bd1b8220d Fixed startup error when terminal isn't available 2017-12-09 16:21:57 -08:00
Antoine Gersant
fc9049fea5 Merge remote-tracking branch 'origin/master' 2017-12-09 00:39:13 -08:00
Antoine Gersant
8e97e7346d Write PID file when forking (unix only) 2017-12-09 00:35:23 -08:00
Antoine Gersant
a35a9907ae
Merge pull request #17 from jxs/master
update ring to version 0.11.0
2017-12-07 15:49:15 -08:00
João Oliveira
f49a0a9503 update ring to version 0.11.0 2017-12-07 14:16:49 +00:00
Antoine Gersant
d09e0d96fe
Merge pull request #15 from jxs/master
Calculate flac duration
2017-12-06 21:06:43 -08:00
João Oliveira
4e52572638 update cargo lock 2017-12-04 03:02:38 +00:00
João Oliveira
e0641e5253 add song duration for flac files 2017-12-03 21:35:00 +00:00
João Oliveira
418c8e52ab add duration to song table and default it's value to None for all
formats on song tags
2017-12-03 13:24:04 +00:00
Antoine Gersant
d06263cff7 Fixed a bug where search terms with spaces didn't work 2017-11-03 20:09:42 -07:00
Antoine Gersant
962ba5efc7 Merge branch 'search' 2017-11-03 13:50:24 -07:00
Antoine Gersant
62688a8402 Implemented search 2017-11-03 13:42:42 -07:00
Antoine Gersant
24c6fcb4f4 Formatting 2017-11-03 12:29:10 -07:00
Antoine Gersant
e079574879 Fixed diesel warnings 2017-11-03 12:28:41 -07:00
Antoine Gersant
65f3e268dc Bump to latest diesel, stub search function 2017-11-03 12:14:53 -07:00
Antoine Gersant
96f8480b2d Merge pull request #12 from jxs/master
update project to use log crate and replace println! calls with equivalent log level macro calls
2017-10-14 12:23:44 -07:00
João Oliveira
55d6e412d6 update project to use log crate and replace println! calls with equivalent log level macro calls 2017-10-14 04:34:54 +01:00
Antoine Gersant
b24f3f1d10 Merge pull request #13 from jxs/bugfix/api-http-auth
fix bug on api.rs, only insert username in the session if authentication was successful
2017-10-13 18:53:00 -07:00
João Oliveira
130aa70c2b fix bug on api.rs, only insert username in the session if authentication was successful 2017-10-13 18:59:42 +01:00
Antoine Gersant
953dea1929 Bumped version number to 0.7.1 2017-10-08 19:04:44 -07:00
Antoine Gersant
80c37ce1bb Autoformat 2017-10-08 15:26:45 -07:00
Antoine Gersant
8f83d92a42 Merge remote-tracking branch 'origin/master' 2017-10-08 15:26:10 -07:00
Antoine Gersant
f4977f523d Improved performance of thumbnail generation 2017-10-08 15:25:46 -07:00
Antoine Gersant
e5b0ee02d7 Merge pull request #11 from jxs/master
add prefix_url to config options to allow polaris to run behind a reverse proxy
2017-10-07 19:58:39 -07:00
João Oliveira
a82af0f0b8 add prefix_url to config options to allow polaris to run behind a reverse proxy 2017-10-03 00:36:21 +01:00
Antoine Gersant
a3f7223722 Added contributing.md 2017-09-28 23:16:26 -07:00
Antoine Gersant
fec28ceb80 Removed unused import 2017-09-27 00:26:36 -07:00
185 changed files with 16262 additions and 5110 deletions

9
.codecov.yml Normal file
View file

@ -0,0 +1,9 @@
coverage:
range: "0...100"
status:
patch:
default:
informational: true
project:
default:
informational: true

1
.envrc Normal file
View file

@ -0,0 +1 @@
use flake

View file

@ -0,0 +1,28 @@
name: 'Make a Linux Release'
description: 'Creates archive containing files to install Polaris on on a Linux system'
inputs:
version-number:
description: 'Polaris version number'
required: true
default: '0.0'
output-file:
description: 'File path where the resulting archive should be stored'
required: false
default: 'polaris.tar.gz'
runs:
using: "composite"
steps:
- name: Download Polaris Web
run: |
curl -L -o web.zip https://github.com/agersant/polaris-web/releases/latest/download/web.zip
unzip web.zip
shell: bash
- name: Set Polaris version
run: echo "POLARIS_VERSION=${{ inputs.version-number }}" >> $GITHUB_ENV
shell: bash
- name: Build archive
run: res/unix/release_script.sh
shell: bash
- name: Copy archive to output location
run: cp release/polaris.tar.gz ${{ inputs.output-file }}
shell: bash

View file

@ -0,0 +1,28 @@
name: 'Make a Windows Release'
description: 'Creates archive containing files to install Polaris on on a Windows system'
inputs:
version-number:
description: 'Polaris version number'
required: true
default: '0.0'
output-file:
description: 'File path where the resulting installer should be stored'
required: false
default: 'polaris.msi'
runs:
using: "composite"
steps:
- name: Download Polaris Web
run: |
curl -L -o web.zip https://github.com/agersant/polaris-web/releases/latest/download/web.zip
unzip web.zip
shell: bash
- name: Set Polaris Version
run: echo "POLARIS_VERSION=${{ inputs.version-number }}" >> $GITHUB_ENV
shell: bash
- name: Build Installer
run: res/windows/release_script
shell: pwsh
- name: Copy installer to output location
run: cp release/polaris.msi ${{ inputs.output-file }}
shell: bash

25
.github/workflows/build.yml vendored Normal file
View file

@ -0,0 +1,25 @@
name: Build
on:
pull_request:
push:
jobs:
test:
name: Run Tests
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-latest, windows-latest]
features: ["", --features ui]
steps:
- name: Install libsqlite3-dev
if: contains(matrix.os, 'ubuntu') && !contains(matrix.features, 'bundle-sqlite')
run: sudo apt-get update && sudo apt-get install libsqlite3-dev
- uses: actions/checkout@v4
- uses: actions-rust-lang/setup-rust-toolchain@v1
- uses: actions-rs/cargo@v1
with:
command: test
args: --release ${{ matrix.features }}

48
.github/workflows/coverage.yml vendored Normal file
View file

@ -0,0 +1,48 @@
name: Test Coverage
on:
pull_request:
branches:
- master
push:
branches:
- master
jobs:
test:
name: Measure Test Coverage
runs-on: ubuntu-latest
steps:
- name: Checkout Polaris
uses: actions/checkout@v4
- uses: actions-rust-lang/setup-rust-toolchain@v1
with:
components: llvm-tools-preview
- name: Install grcov
run: cargo install grcov
- name: Run tests
run: cargo test --no-fail-fast
env:
RUSTFLAGS: "-Cinstrument-coverage"
- name: Gather coverage results
run: >
grcov
.
-t lcov
-o coverage.txt
--llvm
--branch
--ignore-not-existing
--binary-path ./target/debug/
--excl-line "#\[derive\("
--excl-br-line "#\[derive\("
--excl-start "mod tests \{"
--excl-br-start "mod tests \{"
- name: Upload Results
uses: codecov/codecov-action@v2
with:
fail_ci_if_error: true
verbose: true
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}

19
.github/workflows/deploy-demo.yml vendored Normal file
View file

@ -0,0 +1,19 @@
name: Deploy Demo Server
on:
workflow_dispatch:
release:
types: [released]
jobs:
trigger:
name: Trigger Demo Build
runs-on: ubuntu-latest
steps:
- name: Repository Dispatch
uses: peter-evans/repository-dispatch@v2
with:
token: ${{ secrets.POLARIS_DEMO_ACCESS_TOKEN }}
repository: agersant/polaris-demo
event-type: polaris-release

108
.github/workflows/release.yml vendored Normal file
View file

@ -0,0 +1,108 @@
on:
workflow_dispatch:
inputs:
versionNumber:
description: "User-facing version number (eg: 0.13.0)"
required: true
name: Make Release
jobs:
branch_and_tag:
name: Update Release Branch
runs-on: ubuntu-latest
steps:
- name: Merge to Release Branch
uses: devmasx/merge-branch@v1.3.1
with:
type: now
target_branch: release
github_token: ${{ secrets.GITHUB_TOKEN }}
- name: Checkout Release Branch
uses: actions/checkout@v4
with:
ref: release
- name: Update Polaris Version in Cargo.toml
run: gawk -i inplace '/^version/ { if (count == 0) { $3 = "\"${{ github.event.inputs.versionNumber }}\""; count++ } } 1' Cargo.toml
- name: Commit Cargo.toml Version Change
uses: EndBug/add-and-commit@v9
with:
message: "Updated version number"
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Add <version number> Git Tag
run: |
git config --global user.name ${{ github.actor }}
git config --global user.email "<>"
git tag -f -a ${{ github.event.inputs.versionNumber }} -m "Version number"
git push -f --tags
windows:
name: Windows
runs-on: windows-latest
needs: branch_and_tag
steps:
- name: Checkout Polaris
uses: actions/checkout@v4
with:
ref: release
- name: Install Rust Toolchain
uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Make release
uses: ./.github/actions/make-windows-release
with:
version-number: ${{ github.event.inputs.versionNumber }}
output-file: Polaris_${{ github.event.inputs.versionNumber }}.msi
- name: Upload installer
uses: actions/upload-artifact@v4
with:
if-no-files-found: error
name: windows-artifact
path: Polaris_${{ github.event.inputs.versionNumber }}.msi
linux:
name: Linux
runs-on: ubuntu-latest
needs: branch_and_tag
steps:
- name: Checkout Polaris
uses: actions/checkout@v4
with:
ref: release
- name: Make release
uses: ./.github/actions/make-linux-release
with:
version-number: ${{ github.event.inputs.versionNumber }}
output-file: Polaris_${{ github.event.inputs.versionNumber }}.tar.gz
- name: Upload release
uses: actions/upload-artifact@v4
with:
if-no-files-found: error
name: linux-artifact
path: Polaris_${{ github.event.inputs.versionNumber }}.tar.gz
create_release:
name: Create Github Release
runs-on: ubuntu-latest
needs: [windows, linux]
steps:
- name: Download artifacts
uses: actions/download-artifact@v4
with:
merge-multiple: true
- name: Make Github release
uses: softprops/action-gh-release@v2
with:
body: 'Release notes are documented in [CHANGELOG.md](https://github.com/agersant/polaris/blob/master/CHANGELOG.md)'
draft: true
prerelease: false
name: Polaris ${{ github.event.inputs.versionNumber }}
tag_name: ${{ github.event.inputs.versionNumber }}
fail_on_unmatched_files: true
files: |
Polaris_${{ github.event.inputs.versionNumber }}.tar.gz
Polaris_${{ github.event.inputs.versionNumber }}.msi

128
.github/workflows/validate-install.yml vendored Normal file
View file

@ -0,0 +1,128 @@
on:
pull_request:
push:
name: Validate Install
jobs:
package_linux_release:
name: Package Linux Release
runs-on: ubuntu-latest
steps:
- name: Checkout Polaris
uses: actions/checkout@v1
- name: Make release
uses: ./.github/actions/make-linux-release
with:
version-number: "0.0.0"
output-file: polaris.tar.gz
- name: Upload packaged release
uses: actions/upload-artifact@v4
with:
if-no-files-found: error
name: linux-release
path: polaris.tar.gz
validate_linux_system_install:
name: Linux System Install
runs-on: ubuntu-latest
needs: package_linux_release
steps:
- name: Download release
uses: actions/download-artifact@v4
with:
name: linux-release
path: .
- name: Extract release
run: tar -xzvf polaris.tar.gz --strip-components=1
- name: Preview Install
run: make preview
- name: Preview Install w/ Custom Prefix
run: make preview PREFIX=/some/random/prefix
- uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Install
run: sudo --preserve-env=PATH make install
- name: Run Polaris
run: sudo /usr/local/bin/polaris && sleep 5s
- name: Make a request
run: curl -f http://localhost:5050
- name: Stop Polaris
run: sudo kill -KILL $(sudo cat /usr/local/var/run/polaris/polaris.pid)
- name: Uninstall
run: sudo make uninstall
validate_linux_xdg_install:
name: Linux XDG Install
runs-on: ubuntu-latest
needs: package_linux_release
steps:
- name: Download release
uses: actions/download-artifact@v4
with:
name: linux-release
path: .
- name: Extract release
run: tar -xzvf polaris.tar.gz --strip-components=1
- name: Preview Install
run: make preview-xdg
- name: Preview Install w/ Custom XDG_DATA_HOME
run: make preview-xdg XDG_DATA_HOME=/my/own/xdg/home
- uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Install
run: make install-xdg
- name: Run Polaris
run: $HOME/.local/bin/polaris && sleep 5s
- name: Make a request
run: curl -f http://localhost:5050
- name: Stop Polaris
run: kill -KILL $(cat /tmp/polaris-1001/polaris.pid)
- name: Uninstall
run: make uninstall-xdg
package_windows_release:
name: Package Windows Release
runs-on: windows-latest
steps:
- name: Checkout Polaris
uses: actions/checkout@v1
- name: Install Rust Toolchain
uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Make release
uses: ./.github/actions/make-windows-release
with:
version-number: "0.0.0"
output-file: polaris.msi
- name: Upload packaged release
uses: actions/upload-artifact@v4
with:
if-no-files-found: error
name: windows-release
path: polaris.msi
validate_windows_install:
name: Windows Install
runs-on: windows-latest
needs: package_windows_release
steps:
- name: Download release
uses: actions/download-artifact@v4
with:
name: windows-release
path: .
- name: Install
run: msiexec /i polaris.msi /qn
- name: Run Polaris
run: |
start $env:LOCALAPPDATA/Permafrost/Polaris/polaris-cli.exe
sleep 5
- name: Make a request
run: curl -f http://localhost:5050
- name: Stop Polaris
run: taskkill /IM polaris-cli.exe
- name: Uninstall
run: msiexec /x polaris.msi /qn

29
.gitignore vendored
View file

@ -1,10 +1,25 @@
# Dev environment
.direnv
# Build output
target
# Test output
test-output
# Local config for quick iteration
TestConfig.toml
# Runtime artifacts
auth.secret
collection.index
polaris.log
polaris.ndb
polaris.pid
profile.json
/peaks
/thumbnails
# Release process artifacts (usually runs on CI)
release
*.res
test/*.sqlite
*.sqlite-journal
*.sqlite-wal
*.sqlite-shm
tmp
TestConfigLinux.toml
TestConfigWindows.toml

3
.gitmodules vendored
View file

@ -1,3 +0,0 @@
[submodule "web"]
path = web
url = https://github.com/agersant/polaris-web.git

View file

@ -1,2 +1 @@
write_mode = "Overwrite"
hard_tabs = true

View file

@ -1,10 +0,0 @@
language: rust
rust:
- stable
- beta
- nightly
matrix:
allow_failures:
- rust: beta
- rust: nightly

10
.vscode/settings.json vendored Normal file
View file

@ -0,0 +1,10 @@
{
"files.watcherExclude": {
"**/target/**": true,
"**/test-output/**": true
},
"files.exclude": {
"**/target": true,
"**/test-output": true
}
}

32
.vscode/tasks.json vendored
View file

@ -1,32 +0,0 @@
{
"version": "0.1.0",
"showOutput": "always",
"tasks": [
// Run test environment
{
"taskName": "Run",
"options": { "cwd": "${workspaceRoot}" },
"command": "cargo",
"args": ["run", "--", "-c", "./TestConfigWindows.toml", "-d", "test/db.sqlite", "-w", "../polaris-web"]
},
// Run unit tests
{
"isTestCommand": true,
"taskName": "Test",
"options": { "cwd": "${workspaceRoot}" },
"command": "cargo",
"args": ["test"]
},
// Compile
{
"taskName": "Compile",
"options": { "cwd": "${workspaceRoot}" },
"command": "cargo",
"args": ["check"]
}
]
}

427
CHANGELOG.md Normal file
View file

@ -0,0 +1,427 @@
# Changelog
## Unreleased Changes
- Fixed a typo in the log message that is written after applying configuration changes. (thanks @luzpaz)
## Polaris 0.15.0
### Server
- Added support for browsing the music collection by metadata (by artist, by genre, etc.).
- Added support for multi-value metadata for the following song fields: `artist`, `album artist`, `composer`, `genre`, `label` and `lyricist`.
- Added support for structured search query syntax.
- Added capability to extract audio waveform data.
- Configuration data (user credentials, music directories, etc.) is now stored in a plain-text file which Polaris can read and write to.
- ⚠️ The configuration format is now ([documented](docs/CONFIGURATION.md)) and slightly simpler than in previous versions.
- Persistent data, such as playlists, is now saved in a directory that may be configured with the `--data` CLI option or the `POLARIS_DATA_DIR` environment variable.
- ⚠️ Upon first launch, configuration data and playlists will be migrated from the Polaris 0.14.0 database into their new homes. After successful migration, the old database file will be deleted and the server will finally start. This migration functionality will be removed in future Polaris versions.
- Collection scans are now automatically triggered when configuration changes or files are added/removed.
- ⚠️ Dynamic DNS now works with any provider that supports updates over HTTP without header-based auth. This means YDNS is no longer an option, and you need to input a new URL for DDNS updates.
- ⚠️ Removed last.fm integration due to maintenance concerns (abandoned libraries, broken account linking) and mismatch with project goals.
- Removed periodic collection scans.
### Web client
- Every page has been updated to a new visual style.
- The file browser is now displayed as an interactive tree on a single page.
- The file browser now supports common navigation keyboard shortcuts.
- The file browser now supports jumping to a visible file or folder by typing the start of its name.
- The file browser now omits the top-level directory when only one music folder has been configured.
- The current playlist now has two display modes: compact or with album art.
- Songs in the current playlist can now be selected and re-ordered with the mouse.
- Added a button to display statistics about the current playlist.
- Added new pages to browse the music collection by genre.
- Added new pages to browse the music collection by artist.
- Added a new page to browse the music collection by album.
- The Recently Added Albums and Random Albums pages now distinguish albums by file metadata instead of file path.
- When navigating back to the Random Albums page, the shuffle ordering is now preserved.
- The current playlist now supports common navigation keyboard shortcuts.
- The seekbar for the current song being played has been replaced with a waveform visualization.
- The title of the current song in the player can be clicked to display its metadata
- Improved responsiveness when queuing large amounts of songs at once.
- The `Settings > Collection` page now shows the current status of collection scanning.
- Theme preferences have been reset and are now stored client-side.
- Accent color is now configured as a saturation multiplier and base hue, which are used to generate a full color ramp.
### API
- API version is now 8.0.
- Documentation is now served under `/api-docs` instead of `/swagger` (eg. `http://localhost:5050/api-docs`)
- Clients are now expected to send their preferred API major version in a `Accept-Version` header. Omitting this currently defaults to `7`, but will become an error in future Polaris releases. Support for API version 7 will be removed entirely in a future release.
- Most API responses now support gzip compression.
- The response format of the `/browse`, `/flatten`, `/get_playlist`, `/search/<query>` endpoints has been modified to accommodate large lists.
- Added new endpoints to query albums and artists.
- The `/random` and `/recent` albums are deprecated in favor of `/albums/random` and `/albums/recent`. These endpoints now have optional parameters for RNG seeding and pagination.
- The `/search/<query>` endpoint now requires a non-empty query (`/search/` now returns HTTP status code 404, regardless of API version).
- The `/search/<query>` endpoint now supports per-field queries and boolean combinators.
- The `/thumbnail` endpoint supports a new size labeled `tiny`, which returns 40x40px images.
- Added a new `/get_songs` endpoint which returns song metadata in bulk.
- Added a new `/peaks` endpoint which returns audio signal peaks that can be used to draw waveform visualizations.
- Added a new `/index_status` endpoint which returns the status of music collection scans.
- Removed the `/config` and `/preferences` API endpoints.
- Removed the `/ddns` API endpoints, merged into the existing `/settings` endpoints.
## Polaris 0.14.3
### Server
- Fixed a build error (https://github.com/rust-lang/rust/issues/127343) with recent versions of the Rust compiler (thanks @pbsds)
- Added support for m4b audio files (thanks @duydl)
## Polaris 0.14.2
### Server
- Fixed a startup error in Windows packaged builds
## Polaris 0.14.1
### Server
- Fixed compilation issue when using musl toolchains
- Log messages that DDNS is not setup have been downgraded to debug level
### Web client
- Fixed a bug where non-ASCII files or directories were not always alphabetically sorted (thanks @dechamps)
- Fixed a bug where after linking a last.fm account, clicking the account name would not link to the expected page
## Polaris 0.14.0
### General
- Changes are now documented in `CHANGELOG.md` instead of inside individual Github releases
### Server
- API version is now 7.0
- ⚠️ Removed support for authentication via cookies (deprecated in Polaris 0.13.0)
- ⚠️ Removed support for authentication via the `Basic` scheme when using the HTTP `Authorization` header (deprecated in Polaris 0.13.0)
- Fixed a bug where all music sources would be deleted when trying to add sources with duplicate names
- Additional metadata fields are now indexed: lyricist, composer, genre and label (thanks @pmphfm)
- Endpoints returning thumbnail images or audio files no longer use HTTP `content-encoding`
- When indexing files with ID3v2 tags, the "Original Date Released" frame can now be used to populate the year associated with a song
- The `/thumbnail` endpoint now supports an optional parameter for small/large/native image sizing. (thanks @Saecki)
- Log file now contain more details about the cause of failed HTTP requests (3xx, 4xx, 5xx)
- Startup failures now generate clearer error messages
### Web client
- Volume slider now applies non-linearly
- Artist names are now displayed in the Random Albums and Recent Albums pages
## Polaris 0.13.5
### Server
- Added support for AIFF and WAVE files (thanks @gahag)
### Web Client
- Improved performance when scrolling large playlists
- Fixed display and playback issues when a song was used multiple times in a playlist
- Playlist duration can now display number of days
- Fixed a bug where the playlist panel could have blank space in very tall browser windows
- Major dependencies updates
## Polaris 0.13.4
### Server
Adjustments to logging behavior.
On Linux:
- Running without `-f` emits a log file
- Running with `-f` and no `--log` option does not emit a log file
- Running with `-f` and `--log` option emits a log file
On Windows:
- Running with UI feature (`polaris.exe` in releases) emits a log file
- Running without UI feature (`polaris-cli.exe` in releases) and no --log option does not emit a log file
- Running without UI feature (`polaris-cli.exe` in releases) and --log option emits a log file
## Polaris 0.13.3
### Server
- Fixed a bug where music that is no longer on disk was still considered in the collection, even after re-indexing
- On Windows, Polaris now creates a log file
- On Linux, Polaris now creates a log file, even when running with the -f option
## Polaris 0.13.2
### Web client
- Fixed a bug where it was not possible to view or edit which users have administrator rights
- Fixed a bug where, in some cases, drag and dropping a specific disc from an album would not queue the entire disc
## Polaris 0.13.1
### Server
- Fixed a bug where the Windows installer would create unusable installations. #122
## Polaris 0.13.0
### API changes
- Bumped API version number to 6.0.
- Added new endpoints to manage users, mount points and settings more granularly.
- Added support for authenticating via bearer tokens generated by the /auth endpoint. These token can be submitted via Bearer HTTP Authorization headers, or as a URL parameters (`?auth_token=…`).
- Authentication using cookies or Basic HTTP Authorization headers is deprecated and will be removed in a future revision.
- Authentication cookies no longer expire after 24 hours. The newly added bearer tokens also have no expiration date.
- Last.fm account linking now requires a short-lived auth token obtain from the newly added `lastfm/link_token' endpoint.
Server
- ⚠Breaking change⚠ If you use a config file, the `reindex_every_n_seconds` and `album_art_pattern` fields must now be in a [settings] section.
- ⚠Breaking change⚠ The installation process on Linux has changed a lot. See the README for updated installation instructions. A summary of the changes is available [here](https://github.com/ogarcia/docker-polaris/issues/2).
- Embedded album art is now supported for mp3, flac and m4a files (thanks @Saecki).
- OPUS files can now be indexed and streamed (thanks @zaethan).
- APE files can now be indexed and streamed.
- The collection indexer has been rewritten for better performance. This also fixed an issue where on some machines, the web client would be unusable while indexing (thanks @inicola for the code reviews).
- Thumbnail generation is now slightly faster, and works with more pixel formats (notably RGBA16).
- Polaris now uses actix-web instead or rocket. This change fixes numerous performance and stability issues.
- Sqlite is now bundled by default when building Polaris and was removed from the list of prerequisites. This can be controlled with the `bundle-sqlite` feature flag when compiling Polaris.
- The default album art pattern now includes the jpeg extension in addition to jpg.
- Album art patterns are now case insensitive.
Web client
- ⚠Breaking change⚠ Your current playlist will appear broken after this update. Please clear the current playlist using the trash can icon. Saved playlists are not affected.
- Added a logout button.
- Reworked interface for managing user accounts.
- Added a shuffle button to randomly re-order the content of the current playlist.
- The total duration of the current playlist is now displayed.
- Audio output can now be toggled on/off by clicking the volume icon.
- Individual discs from multi-disc albums can now be dragged into the playlist.
- When browsing to an album, songs are now displayed and queued in filepath order.
- Fixed a bug where albums could not be dragged from the random or recent views.
- Fixed a bug where directories with a # sign in their name could not be browsed to.
## Polaris 0.12.0
### Server
- Library indexing speed is now significantly faster
- When indexing files that have malformed ID3 tags, information preceding the error will no longer be discarded
- Deleted users can no longer make requests using an existing session
- When using a config file, existing users, mounts points and DDNS settings are no longer removed before applying the configuration
- When using a config file to create users, blank usernames are now ignored
- Improved architecture and added more unit tests
API Changes
- API version number bumped to 4.0
- The auth endpoint now returns HTTP cookies instead of a JSON response
- Client requests to update Last.fm status no longer return an error if no Last.fm account is associated with the user
- The thumbnail endpoint now supports an option to disable padding to a square image
Web client
- The web client now uses Vue instead of Riot as its UI framework
- Added support for theming
## Polaris 0.11.0
### Server
- Compatible with current versions of the Rust nightly compiler
- Fixed a rare crash when indexing corrupted mp3 files
- On Linux, Polaris now notifies systemd after starting up
- Release tarball for Linux version now includes a top-level directory
- User sessions no longer break across server restarts (more improvements still to do on this: #36)
- ⚠️ Breaking change: due to improvements in Polaris credentials management, you will have to re-create your users and playlists after upgrading to this version. If you want to preserve your playlists, you can use a program like DB Browser for SQLite to back up your playlists (from db.sqlite within your Polaris installation directory) and restore them after you re-create users with the same names.
### Web client
- Song durations are now listed when available
- Fixed a bug where clicking on breadcrumbs did not always work when the Polaris server is hosted on Windows
- Current track info now shows in browser tab title
- Fixed a semi-rare bug where indexing would not start during initial setup flow
- Improved handling of untagged songs
- Fixed a bug where playlist had padding in Chrome
- Fixed a bug where folder icons did not render on some systems
Thank you to @lnicola for working on most of the server changes!
## Polaris 0.10.0
### Server
- Polaris servers now ship with an interactive API documentation, available at http://localhost:5050/swagger
- When using a prefix URL in Polaris config files, a / will no longer be added automatically at the end of the prefix
### Web client
- Automatically bring up player panel when songs are queued
- Fixed a bug where songs were not always correctly sorted by track number in browser panel
- Fixed a bug where some button hitboxes didn't match their visuals
## Polaris 0.9.0
### Server
- Rewrote all endpoints and server setup using Rocket instead of Iron
- Fixed a bug where special characters in URL to collection folders were not handled correctly (bumped API version number)
- Server API is now unit tested
- Fixed a bug where lastFM integration endpoints did not work
- ⚠️ Compiling Polaris now requires the nightly version of the Rust compiler
### Web client
- Encode special characters in URL to collection folders
## Polaris 0.8.0
### Server
- Added new API endpoints for search
- Added new API endpoints for Last.fm integration
- Thumbnails are now stored as .jpg images instead of .png
- Duration of some audio files is now being indexed
- On Linux when running as a forking process, a .pid file will be written
- Fixed a bug where usernames were inserted in session even after failed authentication
### Web client
- Added search panel
- Added settings tab to link Last.fm account
## Polaris 0.7.1
### Server
- Added support for prefix_url option in configuration files
- Improved performance of thumbnail creation
## Polaris 0.7.0
### Server
- Added support for the Partial-Content HTTP header when serving music, this fixes several streaming/seeking issues when using the web client (especially in Chrome)
- New API endpoints for playlist management
- New command line argument (-p) to run on a custom port (contribution from @jxs)
- New command line argument (-f) to run in foreground on Linux (contribution from @jxs)
- Fixed a bug where tracks were queued out of order
- Updated program icon on Windows
Web client
- Added support for playlists
- Added a button to to queue the current directory (thanks @jxs)
## Polaris 0.6.0
### Server
- Internal improvements to database management (now using Diesel)
- Configuration settings are now stored in the database, polaris.toml config files are no longer loaded by default
- Added API endpoints to read and write configuration
- User passwords are now encrypted in storage
- Fixed a bug where results of api/browse were not sorted correctly
Web client
- Settings can now be edited from the web UI
- Collection re-index can now be triggered from the web UI
- Added initial setup configuration flow to help set up first user and mount point
- Visual changes
## Polaris 0.5.1
This is a minor release, pushing quite a bit of internal cleanup in the wild.
Server
- Removed OpenSSL dependency on Windows
- No longer send a HTTP cookie after authentication
## Polaris 0.5.0
This releases adds Linux support and a variety of improvements to the web client.
### Server
- Added Linux support
- Moved location of configuration file on Windows to `%appdata%\Permafrost\Polaris\polaris.toml`
### Web client
- Performance improvements from upgrading RiotJS to 3.4.4 (from 2.6.2)
- Added support for browsing random and recently added albums
- Minor visual changes (colors, whitespace, etc.)
- Updated favicon
- Fixed a bug where songs containing special characters in their title would not play
- Persist playlist and player state across sessions
## Polaris 0.4.0
This release adds new features supporting the development of polaris-android.
### Server
- Added API endpoint to pull recently added albums
- Added support for the Authorization HTTP header (in addition to the existing /auth API endpoint)
## Polaris 0.3.0
This release is an intermediate release addressing issues with the installation process and updating internals.
### General
- Fixed missing OpenSSL DLL in Windows installer (fixes Issue #3)
- Split every file into an individual installer component
### Server
- Added API endpoint to pull random albums
- Upgraded dependencies
- Added unit tests to indexing and metadata decoding
### Web client
- Web interface playlist now displays more tracks (enough to fill a 4k monitor at normal font size)
## Polaris 0.2.0
This release is focused on polish and performance, solidifying the basics that were put together in version 0.1.0. Here are the major changes:
### General
- Polaris now has a project logo
- Windows installer now supports upgrading an existing install (from 0.2.0 to higher - versions)
- Added support for multi-disc albums
### Server
- Major performance improvements to /browse and /flatten API requests (up to 1000x - faster for large requests)
- Added API endpoint for version number
- Album covers are now served as thumbnails rather than at source size
- Moved configuration file outside of /Program Files
- Added support for Ogg Vorbis, FLAC and APE metadata
- Fixed a bug where most albums didn't show an artist name
- Fixed a bug where uppercase extensions were not recognized
- Upgraded compiler to Rust 1.13
### Web client
- Complete visual overhaul of the Polaris web client
- Performance improvements for handling large playlist in Polaris web client
- Added error messages when playing songs in unsupported formats
## Polaris 0.1.0
This is the very first Polaris release, celebrating the minimum viable product!
Features in this release:
- Server application with Windows Installer
- Support for multiple users
- Support for serving custom music directories
- Support for custom album art pattern matching
- Support for broadcasting IP to YDNS
- Web UI to browse collection, manage playlist and listen to music

3869
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,50 +1,104 @@
[package]
name = "polaris"
version = "0.7.0"
version = "0.0.0"
authors = ["Antoine Gersant <antoine.gersant@lesforges.org>"]
edition = "2021"
build = "build.rs"
[features]
ui = []
ui = ["native-windows-gui", "native-windows-derive"]
[profile.release]
lto = "thin"
[dependencies]
ape = "0.1.2"
app_dirs = "1.1.1"
diesel = { version = "0.16.0", features = ["sqlite"] }
diesel_codegen = { version = "0.16.0", features = ["sqlite"] }
error-chain = "0.11.0"
getopts = "0.2.15"
hyper = "0.11.2"
id3 = "0.2.0"
image = "0.15.0"
iron = "0.5.1"
lewton = "0.6.2"
metaflac = "0.1.7"
mount = "0.3.0"
params = { git = "https://github.com/euclio/params", branch="update" }
rand = "0.3.15"
regex = "0.2.2"
ring = "0.9.7"
reqwest = "0.6.2"
router = "0.5.1"
secure-session = "0.2.0"
serde = "1.0"
serde_derive = "1.0"
serde_json = "1.0"
staticfile = "0.4.0"
toml = "0.4.5"
typemap = "0.3"
url = "1.2.0"
ape = "0.6"
axum-extra = { version = "0.10.0", features = ["typed-header"] }
axum-range = { version = "0.5.0" }
bitcode = { version = "0.6.3", features = ["serde"] }
branca = "0.10.1"
chumsky = "0.9.3"
enum-map = { version = "2.7.3", features = ["serde"] }
getopts = "0.2.21"
headers = "0.4"
http = "1.1.0"
icu_collator = "1.5.0"
id3 = "1.14.0"
lasso2 = { version = "0.8.2", features = ["serialize"] }
lewton = "0.10.2"
log = "0.4.22"
metaflac = "0.2.7"
mp3-duration = "0.1.10"
mp4ameta = "0.11.0"
native_db = "0.8.1"
native_model = "0.4.20"
nohash-hasher = "0.2.0"
notify = { version = "6.1.1", default-features = false }
notify-debouncer-full = { version = "0.3.1", default-features = false }
num_cpus = "1.14.0"
# TODO upstream PR: https://github.com/yboettcher/opus_headers/pull/7
opus_headers = { git = "https://github.com/agersant/opus_headers", branch = "multivalue" }
pbkdf2 = "0.11"
rand = "0.8"
rayon = "1.10.0"
regex = "1.10.5"
rusqlite = { version = "0.32.0", features = ["bundled"] }
serde = { version = "1.0.147", features = ["derive"] }
serde_derive = "1.0.147"
serde_json = "1.0.122"
simplelog = "0.12.2"
symphonia = { version = "0.5.4", features = [
"all-codecs",
"all-formats",
"opt-simd",
] }
tinyvec = { version = "1.8.0", features = ["serde"] }
thiserror = "1.0.62"
tokio = { version = "1.39", features = ["macros", "rt-multi-thread"] }
tokio-util = { version = "0.7.11", features = ["io"] }
toml = "0.8.19"
tower = { version = "0.5.2" }
tower-http = { version = "0.6.2", features = [
"compression-gzip",
"fs",
"normalize-path",
] }
trie-rs = { version = "0.4.2", features = ["serde"] }
unicase = "2.7.0"
ureq = { version = "2.10.0", default-features = false, features = ["tls"] }
utoipa = { version = "5.3", features = ["axum_extras"] }
utoipa-axum = { version = "0.1" }
utoipa-scalar = { version = "0.2", features = ["axum"] }
[dependencies.rusqlite]
version = "0.12.0"
features = ["bundled"]
[dependencies.axum]
version = "0.8.1"
default-features = false
features = ["http1", "json", "tokio", "tower-log", "query"]
[dependencies.image]
version = "0.25.2"
default-features = false
features = ["bmp", "gif", "jpeg", "png"]
[target.'cfg(windows)'.dependencies]
winapi = { git = "https://github.com/retep998/winapi-rs", branch="0.2" }
kernel32-sys = { git = "https://github.com/retep998/winapi-rs", branch="0.2" }
shell32-sys = { git = "https://github.com/retep998/winapi-rs", branch="0.2" }
user32-sys = { git = "https://github.com/retep998/winapi-rs", branch="0.2" }
uuid = "0.5.0"
native-windows-gui = { version = "1.0.13", default-features = false, features = [
"cursor",
"image-decoder",
"message-window",
"menu",
"tray-notification",
], optional = true }
native-windows-derive = { version = "1.0.5", optional = true }
[target.'cfg(unix)'.dependencies]
unix-daemonize = "0.1.2"
daemonize = "0.5"
sd-notify = "0.4.2"
[target.'cfg(windows)'.build-dependencies]
embed-resource = "2.4.2"
winres = "0.1"
[dev-dependencies]
axum-test = "17.0"
bytes = "1.7.1"
percent-encoding = "2.2"

123
README.md
View file

@ -1,92 +1,71 @@
[![Linux Build Status](https://travis-ci.org/agersant/polaris.svg?branch=master)](https://travis-ci.org/agersant/polaris)
[![Windows Build Status](https://ci.appveyor.com/api/projects/status/w0gsnq7mo4bu0wne/branch/master?svg=true)](https://ci.appveyor.com/project/agersant/polaris)
<div align="center">
<h1><img src="res/readme/logo.png?raw=true"/></h1>
<img src="res/readme/logo.png?raw=true"/>
Polaris is a music streaming application, designed to let you enjoy your music collection from any computer or mobile device. Polaris works by streaming your music directly from your own computer, without uploading it to a third-party. It is free and open-source software, without any kind of premium version. The only requirement is that your computer stays on while it streams music!
[![Actions Status](https://github.com/agersant/polaris/workflows/Build/badge.svg)](https://github.com/agersant/polaris/actions)
[![codecov](https://codecov.io/github/agersant/polaris/graph/badge.svg?token=EQqCmBEf2T)](https://codecov.io/github/agersant/polaris)
[![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](LICENSE-MIT)
# Getting Started
![Polaris Web UI](res/readme/web_ui.png?raw=true "Polaris Web UI")
</div>
## Requirements
# About
One of the following:
- Windows 7 or newer
- Linux (any reasonably modern distribution should do)
Polaris is a self-hosted music streaming server, to enjoy your music collection from any computer or mobile device. It is free and open-source software, without any kind of premium version.
## Installation
The goals of this project are:
- 🔥 Exceptional performance and responsiveness
- 📚️ First-class support for large music collections (100,000+ songs)
- 📦️ Ease of installation, deployment and maintenance
- ✨ Beautiful user interface
### Windows
1. Download the [latest installer](https://github.com/agersant/polaris/releases/latest) (you want the .msi file)
2. Run the installer
3. That's it, you're done!
# Try It Out!
You can now start Polaris from the start menu or from your desktop, Polaris will also start automatically next time you restart your computer. You can tell when Polaris is running by its icon in the notification area (near the clock and volume controls).
Check out the demo over at https://demo.polaris.stream, featuring a selection of Creative Commons Music. The credentials to access this server are:
### Linux
Username: `demo_user`
Password: `demo_password`
#### Dependencies
# Features
1. Install OpenSSL and its headers. This is most likely available from your distribution's package manager. For instance on Ubuntu, execute `sudo apt-get install libssl-dev`
2. Install the Rust compiler by executing `curl https://sh.rustup.rs -sSf | sh` or using an [alternative method](https://www.rust-lang.org/en-US/install.html)
- 🖥️ Runs on Windows, Linux, BSD, or through Docker
- 🔊 Support for `flac`, `mp3`, `mp4`, `mpc`, `ogg`, `opus`, `ape`, `wav` and `aiff` files
- 🌈 Dark mode variants and customizable color palette
- 💿️ Browse your music by album, artist or genre
- 📂 Browse your music as a file tree
- 🌊 Song audio-waveform visualization
- 🏷️ Support for multi-value fields in song metadata (eg. multiple artists per song)
- 🔍️ Powerful search functionality with per-field queries
- ⚙️ Plain-text configuration also editable with built-in UI
- 👥 Setup multiple users, each with their own playlists
- 📱 Listen to your music on the go:
- Polaris Android ([Google Play Store](https://play.google.com/store/apps/details?id=agersant.polaris) · [F-Droid](https://f-droid.org/packages/agersant.polaris/) · [Repository](https://github.com/agersant/polaris-android))
- Polarios ([App Store](https://apps.apple.com/app/polarios/id1662366309) · [Repository](https://gitlab.com/elise/Polarios)) [third-party]
#### Polaris installation
1. Download the [latest release]((https://github.com/agersant/polaris/releases/latest)) of Polaris (you want the .tar.gz file)
2. Extract the polaris archive in a directory and open a terminal in that directory
3. Execute `make install` (this may take several minutes)
# Installation
This installation process puts the polaris executable in `~/.local/bin/polaris` and several data files under `~/.local/share/polaris`.
[Installation documentation](docs/SETUP.md)
From here, you might want to adjust your system to run Polaris on login using Cron, Systemd or whichever method your distribution endorses.
[Streaming from remote devices](docs/DDNS.md)
If you want to uninstall Polaris, execute `make uninstall` from the extracted archive's directory. This will simply delete the directories created by the install process.
[![Packaging status](https://repology.org/badge/vertical-allrepos/polaris-streaming.svg?columns=3)](https://repology.org/project/polaris-streaming/versions)
### Test Run
# Documentation
- Start Polaris using the shortcut on your desktop (Windows) or by running the executable in `~/.local/bin/polaris` (Linux)
- In your Web browser, access http://localhost:5050
- You will see a welcome page that will guide you through the Polaris configuration
- 📒 [Changelog](CHANGELOG.md)
- 🔧 [Configuration](docs/CONFIGURATION.md)
- 👷 [Contribute to Polaris](docs/CONTRIBUTING.md)
- 🛟 [Maintenance Runbooks](docs/MAINTENANCE.md)
![Polaris Web UI](res/readme/web_ui.png?raw=true "Polaris Web UI")
The Polaris server API is documented via [OpenAPI](https://demo.polaris.stream/api-docs/). Every installation of Polaris distributes this interactive documentation. To access it, open http://localhost:5050/api-docs/ in your browser on the machine running Polaris.
### Streaming From Other Devices
# Credits & License Information
If you're only interested in streaming on your local network, you can skip this section. If you want to stream from school, from work, or on the go, this is for you.
Music featured in the demo installation:
#### Dynamic DNS
You can access your Polaris installation from anywhere via your computer's public IP address, but there are two problems with that:
- IP addresses are difficult to remember
- Most ISP don't give you a fixed IP address
A solution to these problems is to set up Dynamic DNS, so that your installation can always be reached at a fixed URL.
The steps below will walk you through setting up YDNS and Polaris to give your installation a fixed URL. If you have another solution in mind, or prefer using another Dynamic DNS service, skip to the next section.
1. Register for a free account on https://ydns.io
2. On the YDNS website, access the "My Hosts" page and press the + sign for "Add Host"
3. Fill the host form as described below:
- Domain: ydns.eu
- Name: This part is up to you, whatever you enter will be in the URL you use to access Polaris
- Content: Leave the default. Take a note whether the value looks like a IPv4 address (format: xxx.xxx.xxx.xxx) or a IPv6 address (format: xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx)
- Type: Dynamic IP
4. If the content field looked like a IPv4 address: skip to step #6
5. If the content field looked like a IPv6 address:
- Click on your host name (eg. yourdomain.ydns.eu)
- You should now see a page which looks like this:
![YDNS Records](res/readme/ydns_records.png?raw=true "YDNS Records")
- Click on the green "+" icon on the right
- Fill out the new form as described:
- Make sure the `Type` field is set to `A`
- Set content to 0.0.0.0
- You should now be back on the "records" page which was pictured above
- Click on the ID number on the left (#28717 in the example above) of the column that has AAAA listed as its "Type".
- Click on the red trash can icon in the corner to delete this record
- Done!
6. In the Polaris web interface, access the `Dynamic DNS` tab of the settings screen:
- Update the hostname field to match what you set in step 5. (eg. http://yourdomain.ydns.eu)
- Update the username field to the email address you use when creating your YDNS account
- Update the password field with your YDNS API password. You can find this password on https://ydns.io: click on the "User" icon in the top right and then `Preferences > API`.
#### Port Forwarding
Configure port forwarding on your router to redirect port 80 towards port 5050 on the computer where you run Polaris. The exact way to do this depends on your router manufacturer and model.
Don't forget to restart Polaris to apply your configuration changes, and access your music from other computers at http://yourdomain.ydns.eu
- [Chris Zabriskie - Abandon Babylon](https://chriszabriskie.bandcamp.com/album/abandon-babylon) [(License)](https://creativecommons.org/licenses/by/3.0/)
- [Chris Zabriskie - Angie's Sunday Service](https://chriszabriskie.bandcamp.com/album/angies-sunday-service) [(License)](https://creativecommons.org/licenses/by/3.0/)
- [glaciære - pool water blue](https://steviasphere.bandcamp.com/album/pool-water-blue) [(License)](https://creativecommons.org/licenses/by/3.0/)
- [glaciære - light ripples](https://steviasphere.bandcamp.com/album/light-ripples) [(License)](https://creativecommons.org/licenses/by/3.0/)
- [Koresma South](https://koresma.bandcamp.com/album/south) [(License)](https://creativecommons.org/licenses/by-nc-sa/3.0/)
- [Pete Murphy - Essence EP](https://petemurphy.bandcamp.com/album/falling-down-the-fred-astaires-solo-jazz-piano) [(License)](https://creativecommons.org/licenses/by-nc-sa/3.0/)
- [Rameses B - Essence EP](https://ramesesb.bandcamp.com/album/essence-ep) [(License)](https://creativecommons.org/licenses/by-nc-nd/3.0/)

View file

@ -1,87 +0,0 @@
# Appveyor configuration template for Rust using rustup for Rust installation
# https://github.com/starkat99/appveyor-rust
## Operating System (VM environment) ##
# Rust needs at least Visual Studio 2013 Appveyor OS for MSVC targets.
os: Visual Studio 2015
## Build Matrix ##
# This configuration will setup a build for each channel & target combination (12 windows
# combinations in all).
#
# There are 3 channels: stable, beta, and nightly.
#
# Alternatively, the full version may be specified for the channel to build using that specific
# version (e.g. channel: 1.5.0)
#
# The values for target are the set of windows Rust build targets. Each value is of the form
#
# ARCH-pc-windows-TOOLCHAIN
#
# Where ARCH is the target architecture, either x86_64 or i686, and TOOLCHAIN is the linker
# toolchain to use, either msvc or gnu. See https://www.rust-lang.org/downloads.html#win-foot for
# a description of the toolchain differences.
# See https://github.com/rust-lang-nursery/rustup.rs/#toolchain-specification for description of
# toolchains and host triples.
#
# Comment out channel/target combos you do not wish to build in CI.
#
# You may use the `cargoflags` and `RUSTFLAGS` variables to set additional flags for cargo commands
# and rustc, respectively. For instance, you can uncomment the cargoflags lines in the nightly
# channels to enable unstable features when building for nightly. Or you could add additional
# matrix entries to test different combinations of features.
environment:
matrix:
### MSVC Toolchains ###
# Stable 64-bit MSVC
- channel: stable
target: x86_64-pc-windows-msvc
# Beta 64-bit MSVC
- channel: beta
target: x86_64-pc-windows-msvc
# Nightly 64-bit MSVC
- channel: nightly
target: x86_64-pc-windows-msvc
#cargoflags: --features "unstable"
### Allowed failures ###
# See Appveyor documentation for specific details. In short, place any channel or targets you wish
# to allow build failures on (usually nightly at least is a wise choice). This will prevent a build
# or test failure in the matching channels/targets from failing the entire build.
matrix:
allow_failures:
- channel: nightly
- channel: beta
## Install Script ##
# This is the most important part of the Appveyor configuration. This installs the version of Rust
# specified by the 'channel' and 'target' environment variables from the build matrix. This uses
# rustup to install Rust.
#
# For simple configurations, instead of using the build matrix, you can simply set the
# default-toolchain and default-host manually here.
install:
#Rust install
- appveyor DownloadFile https://win.rustup.rs/ -FileName rustup-init.exe
- rustup-init -yv --default-toolchain %channel% --default-host %target%
- set PATH=%PATH%;%USERPROFILE%\.cargo\bin
- rustc -vV
- cargo -vV
## Build Script ##
# 'cargo test' takes care of building for us, so disable Appveyor's build stage. This prevents
# the "directory does not contain a project or solution file" error.
build: false
# Uses 'cargo test' to run tests and build. Alternatively, the project may call compiled programs
#directly or perform other testing commands. Rust will automatically be placed in the PATH
# environment variable.
test_script:
- cargo test --features "ui" --verbose %cargoflags%

13
build.rs Normal file
View file

@ -0,0 +1,13 @@
#[cfg(windows)]
fn main() {
let mut res = winres::WindowsResource::new();
res.set_icon("./res/windows/application/icon_polaris_512.ico");
res.compile().unwrap();
embed_resource::compile(
"res/windows/application/polaris-manifest.rc",
embed_resource::NONE,
);
}
#[cfg(unix)]
fn main() {}

View file

@ -1,13 +0,0 @@
#!/bin/sh
echo "Creating output directory"
mkdir -p release/tmp
echo "Copying package files"
cp -r web src Cargo.toml Cargo.lock res/unix/Makefile release/tmp
echo "Creating tarball"
POLARIS_VERSION=$(grep -m 1 ^version Cargo.toml | awk '{print $3}' | tr -d '"\r\n')
tar -zc -C release/tmp -f release/polaris-$POLARIS_VERSION.tar.gz .
echo "Cleaning up"
rm -rf release/tmp

View file

@ -1,44 +0,0 @@
Get-ChildItem "Cargo.toml" | % {
$conf = $_ | Get-Content -raw
$conf -match 'version\s+=\s+"(.*)"' | out-null
$POLARIS_VERSION = $matches[1]
}
"Compiling resource file"
RC /fo res\windows\application\application.res res\windows\application\application.rc
""
"Compiling executable"
cargo rustc --release --features "ui" -- -C link-args="/SUBSYSTEM:WINDOWS /ENTRY:mainCRTStartup res\windows\application\application.res"
""
"Creating output directory"
New-Item .\release\tmp -type directory -Force | Out-Null
Remove-Item -Recurse .\release\tmp\*
""
"Copying to output directory"
Copy-Item .\res\windows\installer\license.rtf .\release\tmp\
Copy-Item .\res\windows\installer\banner.bmp .\release\tmp\
Copy-Item .\res\windows\installer\dialog.bmp .\release\tmp\
Copy-Item .\target\release\polaris.exe .\release\tmp\
Copy-Item .\web\img .\release\tmp\web\img -recurse
Copy-Item .\web\js .\release\tmp\web\js -recurse
Copy-Item .\web\lib .\release\tmp\web\lib -recurse
Copy-Item .\web\style .\release\tmp\web\style -recurse
Copy-Item .\web\tags .\release\tmp\web\tags -recurse
Copy-Item .\web\favicon.png .\release\tmp\web\
Copy-Item .\web\index.html .\release\tmp\web\
""
"Creating installer"
heat dir .\release\tmp\web\ -ag -g1 -dr AppDataPolaris -cg WebUI -sfrag -var wix.WebUIDir -out .\release\tmp\web_ui_fragment.wxs
candle -wx -ext WixUtilExtension -arch x64 -out .\release\tmp\web_ui_fragment.wixobj .\release\tmp\web_ui_fragment.wxs
candle -wx -ext WixUtilExtension -arch x64 -out .\release\tmp\installer.wixobj .\res\windows\installer\installer.wxs
light -dWebUIDir=".\release\tmp\web" -wx -ext WixUtilExtension -ext WixUIExtension -spdb -sw1076 -sice:ICE38 -sice:ICE64 -out .\release\Polaris_$POLARIS_VERSION.msi .\release\tmp\installer.wixobj .\release\tmp\web_ui_fragment.wixobj
"Cleaning up"
Remove-Item -Recurse .\release\tmp
""
Read-Host -Prompt "All clear! Press Enter to exit"

50
docs/CONFIGURATION.md Normal file
View file

@ -0,0 +1,50 @@
# Configuration
Polaris configuration resides in a single text file whose format is documented below. You can use the Polaris web UI to modify the configuration, or write to it in any text editor. You may edit the configuration file while Polaris is running.
## Location
The location of the configuration file is always logged during Polaris startup. It is determined as follows:
- From the `--config` (or `-c`) CLI option if present. This option must point to the `.toml` file.
- If the CLI option is not specified, Polaris will look for a `polaris.toml` file, inside the directory specified by the `POLARIS_CONFIG_DIR` environment variable _at compilation time_. When using the Windows installer, this will be `%LOCALAPPDATA%/Permafrost/Polaris/polaris.toml`. When using the supplied Makefile, the default is either `/usr/local/etc/polaris` (for a system-wide installations), or `~/.config/polaris` (for a XDG installation).
- If `POLARIS_CONFIG_DIR` was not set when Polaris was compiled, it will default to `.` on Linux, and the `LOCALAPPDATA` location mentioned above on Windows. This behavior on Windows may change in future releases.
## Format
The configuration file uses the [TOML](https://toml.io/) format. Everything in the configuration file is optional and may be omitted (unless mentioned otherwise).
```toml
# Regular expression used to identify album art in files adjacent to an audio file
album_art_pattern = "Folder.(jpeg|jpg|png)"
# A URL Polaris will regularly make requests to in order to update Dynamic DNS
ddns_url = "https://example.com?token=foobar"
# Array of locations Polaris should scan to find music files
[[mount_dirs]]
# Directory to scan
source = "/home/example/music"
# User-facing name for this directory (must be unique)
name = "My Music 🎧️"
[[mount_dirs]]
source = "/mnt/example/more_music"
name = "Extra Music 🎵"
# Array of user accounts who can connect to the Polaris server
[[users]]
# Username for login
name = "example-user"
# If true, user will have access to all settings in the web UI
admin = true
# Plain text password for this user. Will be ignored if hashed_password is set. Polaris will never write to this field. For each user, at least one of initial_password and hashed_password must be set.
initial_password = "top-secret-password"
# Hashed and salted password for the user. Polaris will create this field if unset.
hashed_password = "$pbkdf2-sha256$i=10000,l=32$SI8LjK1KtvcawhgmWGJgRA$t9btMwhUTQ8r3vqI1xhArn19J7Jezyoi461fFjhZXGU"
[[users]]
name = "other-user"
admin = true
initial_password = "amospheric-strawberry64"
```

37
docs/CONTRIBUTING.md Normal file
View file

@ -0,0 +1,37 @@
# Contributing
## Guidelines
While Polaris is free and open-source software, it is not very open to code contributions. The reasons behind this are:
- Polaris is a hobby project. I don't want it to feel like my day job, where I do a lot of code reviews, mentoring and tech leadership.
- I am committed to maintaining this software for a very long time. I would rather maintain code that I mostly wrote myself.
This still leave room for a few avenues to contribute:
- Help answering questions in the issue tracker.
- Package Polaris for a Linux distribution
- Documentation improvements or writing user guides.
- Satellite projects (eg. [docker-polaris](https://github.com/ogarcia/docker-polaris), [polarios](https://gitlab.com/elise/Polarios))
- Bug fixes.
For non-trivial new features, you are welcome to maintain a fork. If you need help finding your way around the code, feel free to open a [discussion thread](https://github.com/agersant/polaris/discussions).
## Compiling and running Polaris
1. [Install Rust](https://www.rust-lang.org/en-US/install.html) (stable toolchain)
2. Clone the polaris depot with this command: `git clone https://github.com/agersant/polaris.git`
3. You can now run compile and run polaris from the newly created directory with the command: `cargo run`
Polaris supports a few command line arguments which are useful during development:
- `-c some/config.toml` sets the location of the [configuration](/docs/CONFIGURATION.md) file.
- `--data some/path` sets the folder Polaris will use to store runtime data such as playlists, collection index and auth secrets.
- `-w some/path/to/web/dir` lets you point to the directory to be served as the web interface. You can find a suitable directory in your Polaris install (under `/web`), or from the [latest polaris-web release](https://github.com/agersant/polaris-web/releases/latest/download/web.zip).
- `-f` (on Linux) makes Polaris not fork into a separate process.
Putting it all together, a typical command to compile and run the program would be: `cargo run -- -w web -c test-config.toml`
While Polaris is running, access the web UI at [http://localhost:5050](http://localhost:5050).
## Running unit tests
That's the easy part, simply run `cargo test`!

21
docs/DDNS.md Normal file
View file

@ -0,0 +1,21 @@
# Streaming from other devices
These instructions apply to users running Polaris on a home network. When deploying to cloud services or VPS, configurations requirements will differ.
## Port forwarding
Configure port forwarding on your router to redirect port 80 traffic towards port 5050 towards the computer running Polaris. The exact way to do this depends on your router manufacturer and model.
## Dynamic DNS
You can access your Polaris installation from anywhere via your computer's public IP address, but there are two problems with that:
- IP addresses are difficult to remember
- Most ISP don't give you a fixed IP address
A solution to these problems is to set up Dynamic DNS, so that your installation can always be reached at a fixed URL.
1. Reserve a URL with a dynamic DNS provider such as https://www.duckdns.org/ or https://freemyip.com/.
2. The dynamic DNS provider gives you a unique Update URL that can be used to tell them where to send traffic. For example, `freemyip.com` gives you this URL immediately after claiming a subdomain. Other providers may show it in your profile page, etc.
3. Access your Polaris instance (http://localhost:5050 by default).
4. Go to the `Setting page` and into the `Dynamic DNS` section.
5. Set the Update URL to the one you obtained in step 2.

10
docs/MAINTENANCE.md Normal file
View file

@ -0,0 +1,10 @@
# Maintenance
## How to make a release
- Update CHANGELOG.md to reflect new release
- On Github, go to **Actions**, select the **Make Release** workflow and click **Run workflow**
- Select the branch to deploy (usually `master`)
- Input a user-facing version name (eg: **0.13.0**)
- Click the **Run workflow** button
- After CI completes, move the release from Draft to Published

30
docs/SETUP.md Normal file
View file

@ -0,0 +1,30 @@
# Installation
## On Windows
1. Download the [latest installer](https://github.com/agersant/polaris/releases/latest) (you want the .msi file)
2. Run the installer
3. Launch Polaris from the start menu
4. In your web browser, access http://localhost:5050
## In a docker container
To run polaris from a Docker container, please follow instructions from the [docker-polaris](https://github.com/ogarcia/docker-polaris) repository.
## From source on Linux
### Dependencies
1. Install OpenSSL, SQLite and their respective headers (eg. `sudo apt-get install libsqlite3-dev libssl-dev`).
2. Install `binutils` and `pkg-config` (eg. `sudo apt-get install binutils pkg-config`).
2. Install the Rust compiler by executing `curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh` or using an [alternative method](https://www.rust-lang.org/en-US/install.html)
### Polaris installation
1. Download the [latest release]((https://github.com/agersant/polaris/releases/latest)) of Polaris (you want the .tar.gz file)
2. Extract the Polaris archive in a directory and open a terminal in that directory
3. To install Polaris within your home directory, execute `make install-xdg`. This installation follows the [XDG Base Directory Specification](https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html). You can use `make preview-xdg` to see which directories the install process would use.
4. If you prefer a system-wide install, execute `make install` (without the `-xdg` suffix). If you use `sudo` to perform such a system install, you may need the `-E` option so that your sudo user find the Rust binaries: `sudo -E make install`. This installation follows the [GNU Standard Installation Directories](https://www.gnu.org/prep/standards/html_node/Directory-Variables.html). You can use `make preview` to see which directories the install process would use.
From here, you might want to adjust your system to run Polaris on login using Systemd, Cron or whichever method your distribution endorses.
If you want to uninstall Polaris, execute `make uninstall-xdg` from the extracted archive's directory (or `make uninstall` if you made a system-wide install). This will delete all the files and directories listed above (including your configuration, playlists, etc.). If you customized the install process by specifying environment variables like `PREFIX`, make sure they are set to the same values when running the uninstall command.

BIN
docs/res/logo_no_text.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 17 KiB

View file

Before

Width:  |  Height:  |  Size: 16 KiB

After

Width:  |  Height:  |  Size: 16 KiB

46
flake.lock generated Normal file
View file

@ -0,0 +1,46 @@
{
"nodes": {
"nixpkgs": {
"locked": {
"lastModified": 1736701207,
"narHash": "sha256-jG/+MvjVY7SlTakzZ2fJ5dC3V1PrKKrUEOEE30jrOKA=",
"rev": "ed4a395ea001367c1f13d34b1e01aa10290f67d6",
"revCount": 737298,
"type": "tarball",
"url": "https://api.flakehub.com/f/pinned/NixOS/nixpkgs/0.1.737298%2Brev-ed4a395ea001367c1f13d34b1e01aa10290f67d6/01945f5f-4175-7e72-8809-a1e482c4a443/source.tar.gz"
},
"original": {
"type": "tarball",
"url": "https://flakehub.com/f/NixOS/nixpkgs/0.1.%2A.tar.gz"
}
},
"root": {
"inputs": {
"nixpkgs": "nixpkgs",
"rust-overlay": "rust-overlay"
}
},
"rust-overlay": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1736735482,
"narHash": "sha256-QOA4jCDyyUM9Y2Vba+HSZ/5LdtCMGaTE/7NkkUzBr50=",
"owner": "oxalica",
"repo": "rust-overlay",
"rev": "cf960a1938ee91200fe0d2f7b2582fde2429d562",
"type": "github"
},
"original": {
"owner": "oxalica",
"repo": "rust-overlay",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

58
flake.nix Normal file
View file

@ -0,0 +1,58 @@
{
description = "A Nix-flake-based Rust development environment";
inputs = {
nixpkgs.url = "https://flakehub.com/f/NixOS/nixpkgs/0.1.*.tar.gz";
rust-overlay = {
url = "github:oxalica/rust-overlay";
inputs.nixpkgs.follows = "nixpkgs";
};
};
outputs = { self, nixpkgs, rust-overlay }:
let
supportedSystems = [ "x86_64-linux" "aarch64-linux" "x86_64-darwin" "aarch64-darwin" ];
forEachSupportedSystem = f: nixpkgs.lib.genAttrs supportedSystems (system: f {
pkgs = import nixpkgs {
inherit system;
overlays = [ rust-overlay.overlays.default self.overlays.default ];
};
});
in
{
overlays.default = final: prev: {
rustToolchain =
let
rust = prev.rust-bin;
in
if builtins.pathExists ./rust-toolchain.toml then
rust.fromRustupToolchainFile ./rust-toolchain.toml
else if builtins.pathExists ./rust-toolchain then
rust.fromRustupToolchainFile ./rust-toolchain
else
rust.stable.latest.default.override {
extensions = [ "rust-src" "rustfmt" ];
};
};
devShells = forEachSupportedSystem ({ pkgs }: {
default = pkgs.mkShell {
packages = with pkgs; [
rustToolchain
openssl
pkg-config
cargo-deny
cargo-edit
cargo-watch
rust-analyzer
samply
];
env = {
# Required by rust-analyzer
RUST_SRC_PATH = "${pkgs.rustToolchain}/lib/rustlib/src/rust/library";
};
};
});
};
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.2 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.3 MiB

Binary file not shown.

BIN
res/readme/dark_mode.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 723 KiB

BIN
res/readme/logo.afdesign Normal file

Binary file not shown.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 48 KiB

After

Width:  |  Height:  |  Size: 47 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 276 KiB

After

Width:  |  Height:  |  Size: 722 KiB

View file

@ -1,21 +1,101 @@
POLARIS_BIN_DIR := ~/.local/bin/polaris
POLARIS_DATA_DIR := ~/.local/share/polaris
.PHONY: all build build-system build-xdg cargo-build clean preview preview-system preview-xdg list-paths install install-bin install-data install-system install-xdg uninstall uninstall-bin uninstall-data uninstall-system uninstall-xdg
all: build
UID := $(shell id -u)
build:
PREFIX ?= /usr/local
EXEC_PREFIX ?= $(PREFIX)
BINDIR ?= $(EXEC_PREFIX)/bin
DATAROOTDIR ?= $(PREFIX)/share
DATADIR ?= $(DATAROOTDIR)
SYSCONFDIR ?= $(PREFIX)/etc
LOCALSTATEDIR ?= $(PREFIX)/var
RUNSTATEDIR ?= $(LOCALSTATEDIR)/run
%-system: POLARIS_BIN_PATH := $(BINDIR)/polaris
%-system: export POLARIS_WEB_DIR := $(DATADIR)/polaris/web
%-system: export POLARIS_CONFIG_DIR := $(SYSCONFDIR)/polaris
%-system: export POLARIS_DATA_DIR := $(LOCALSTATEDIR)/lib/polaris
%-system: export POLARIS_DB_DIR := $(LOCALSTATEDIR)/lib/polaris
%-system: export POLARIS_LOG_DIR := $(LOCALSTATEDIR)/log/polaris
%-system: export POLARIS_CACHE_DIR := $(LOCALSTATEDIR)/cache/polaris
%-system: export POLARIS_PID_DIR := $(RUNSTATEDIR)/polaris
XDG_CACHE_HOME ?= $(HOME)/.cache
XDG_CONFIG_HOME ?= $(HOME)/.config
XDG_DATA_HOME ?= $(HOME)/.local/share
XDG_BINDIR ?= $(HOME)/.local/bin
XDG_DATADIR ?= $(XDG_DATA_HOME)/polaris
XDG_CACHEDIR ?= $(XDG_CACHE_HOME)/polaris
XDG_CONFIGDIR ?= $(XDG_CONFIG_HOME)/polaris
ifdef $(XDG_RUNTIME_DIR)
XDG_PIDDIR ?= $(XDG_RUNTIME_DIR)/polaris
else
XDG_PIDDIR ?= /tmp/polaris-$(UID)
endif
%-xdg: POLARIS_BIN_PATH := $(XDG_BINDIR)/polaris
%-xdg: export POLARIS_WEB_DIR := $(XDG_DATADIR)/web
%-xdg: export POLARIS_CONFIG_DIR := $(XDG_CONFIGDIR)
%-xdg: export POLARIS_DATA_DIR := $(XDG_DATADIR)
%-xdg: export POLARIS_DB_DIR := $(XDG_DATADIR)
%-xdg: export POLARIS_LOG_DIR := $(XDG_CACHEDIR)
%-xdg: export POLARIS_CACHE_DIR := $(XDG_CACHEDIR)
%-xdg: export POLARIS_PID_DIR := $(XDG_PIDDIR)
# Build
build-system: cargo-build
build-xdg: cargo-build
build: build-system
all: build-system
cargo-build:
cargo build --release
install: build
install -d $(POLARIS_BIN_DIR)
install -d $(POLARIS_DATA_DIR)
install ./target/release/polaris $(POLARIS_BIN_DIR)
cp -r ./web $(POLARIS_DATA_DIR)
@echo "Polaris installation complete!"
clean:
cargo clean
uninstall:
rm -r $(POLARIS_BIN_DIR)
rm -r $(POLARIS_DATA_DIR)
# Preview
preview-system: list-paths
preview-xdg: list-paths
preview: preview-system
list-paths:
$(info POLARIS_BIN_PATH is $(POLARIS_BIN_PATH))
$(info POLARIS_WEB_DIR is $(POLARIS_WEB_DIR))
$(info POLARIS_CONFIG_DIR is $(POLARIS_CONFIG_DIR))
$(info POLARIS_DATA_DIR is $(POLARIS_DATA_DIR))
$(info POLARIS_DB_DIR is $(POLARIS_DB_DIR))
$(info POLARIS_LOG_DIR is $(POLARIS_LOG_DIR))
$(info POLARIS_CACHE_DIR is $(POLARIS_CACHE_DIR))
$(info POLARIS_PID_DIR is $(POLARIS_PID_DIR))
# Install
install-system: install-bin install-data
install-xdg: install-bin install-data
install: install-system
install-bin: cargo-build
install -Dm755 ./target/release/polaris $(POLARIS_BIN_PATH)
install-data:
install -d $(POLARIS_WEB_DIR)
cp -rT ./web $(POLARIS_WEB_DIR)
# Uninstall
uninstall-system: uninstall-bin uninstall-data
uninstall-xdg: uninstall-bin uninstall-data
uninstall: uninstall-system
uninstall-bin:
rm $(POLARIS_BIN_PATH)
uninstall-data:
rm -rf $(POLARIS_WEB_DIR)
rm -rf $(POLARIS_CONFIG_DIR)
rm -rf $(POLARIS_DATA_DIR)
rm -rf $(POLARIS_DB_DIR)
rm -rf $(POLARIS_LOG_DIR)
rm -rf $(POLARIS_CACHE_DIR)
rm -rf $(POLARIS_PID_DIR)

12
res/unix/release_script.sh Executable file
View file

@ -0,0 +1,12 @@
#!/bin/sh
echo "Creating output directory"
mkdir -p release/tmp/polaris
echo "Copying package files"
cp -r web src test-data build.rs Cargo.toml Cargo.lock rust-toolchain.toml res/unix/Makefile release/tmp/polaris
echo "Creating tarball"
tar -zc -C release/tmp -f release/polaris.tar.gz polaris
echo "Cleaning up"
rm -rf release/tmp

View file

@ -1,15 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level="asInvoker" uiAccess="false" />
</requestedPrivileges>
</security>
</trustInfo>
<asmv3:application>
<asmv3:windowsSettings xmlns="http://schemas.microsoft.com/SMI/2005/WindowsSettings">
<dpiAware>true</dpiAware>
</asmv3:windowsSettings>
</asmv3:application>
</assembly>

View file

@ -1,7 +0,0 @@
#define IDI_POLARIS 0x101
#define IDI_POLARIS_TRAY 0x102
CREATEPROCESS_MANIFEST_RESOURCE_ID RT_MANIFEST "application.manifest"
IDI_POLARIS ICON "icon_polaris_512.ico"
IDI_POLARIS_TRAY ICON "icon_polaris_outline_64.ico"

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.8 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 31 KiB

View file

@ -0,0 +1,2 @@
#define RT_MANIFEST 24
1 RT_MANIFEST "polaris.exe.manifest"

View file

@ -0,0 +1,21 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity
version="1.0.0.0"
processorArchitecture="*"
name="app"
type="win32"
/>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.Windows.Common-Controls"
version="6.0.0.0"
processorArchitecture="*"
publicKeyToken="6595b64144ccf1df"
language="*"
/>
</dependentAssembly>
</dependency>
</assembly>

View file

@ -1,67 +1,61 @@
<?xml version='1.0' encoding='windows-1252'?>
<Wix xmlns='http://schemas.microsoft.com/wix/2006/wi' xmlns:util="http://schemas.microsoft.com/wix/UtilExtension">
<Product Name='Polaris' Id='CFFE1C21-E19E-45AB-A314-058CAC23779A' UpgradeCode='FF16B075-1D36-47F4-BE37-D95BBC1A412C' Language='1033' Codepage='1252' Version='0.7.0' Manufacturer='Permafrost'>
<Package Id='*' Keywords='Installer' Platform='x64' InstallScope='perUser' Description='Polaris Installer' Manufacturer='Permafrost' Languages='1033' Compressed='yes' SummaryCodepage='1252' />
<MajorUpgrade DowngradeErrorMessage='A newer version of Polaris is already installed.' Schedule='afterInstallExecute' />
<Media Id='1' Cabinet='Sample.cab' EmbedCab='yes' DiskPrompt='Installation Media #1' />
<Property Id='DiskPrompt' Value='Polaris Installation [1]' />
<Directory Id='TARGETDIR' Name='SourceDir'>
<Directory Id='AppDataFolder'>
<Directory Id='AppDataPermafrost' Name='Permafrost'>
<Directory Id='AppDataPolaris' Name='Polaris' FileSource='.'>
<Component Id='MainExecutable' Guid='*'>
<File Source='polaris.exe' KeyPath='yes' Checksum='yes'>
<Shortcut Id='StartupMenuPolaris' Directory='StartupFolder' Name='Polaris' WorkingDirectory='AppDataPolaris' Icon='polaris.exe' IconIndex='0' Advertise='yes' />
<Shortcut Id='StartMenuPolaris' Directory='ProgramMenuDir' Name='Polaris' WorkingDirectory='AppDataPolaris' Icon='polaris.exe' IconIndex='0' Advertise='yes' />
<Shortcut Id='DesktopPolaris' Directory='DesktopFolder' Name='Polaris' WorkingDirectory='AppDataPolaris' Icon='polaris.exe' IconIndex='0' Advertise='yes' />
</File>
</Component>
</Directory>
</Directory>
</Directory>
<Directory Id='DesktopFolder' Name='Desktop' />
<Directory Id='StartupFolder' Name='Startup' />
<Directory Id='ProgramMenuFolder' Name='Programs'>
<Directory Id='ProgramMenuDir' Name='Permafrost'>
<Component Id="ProgramMenuDir" Guid='*'>
<RemoveFolder Id='ProgramMenuDir' On='uninstall' />
<RegistryValue Root='HKCU' Key='Software\Permafrost\Polaris' Name='ProgramMenuEntry' Type='string' Value='' KeyPath='yes' />
</Component>
</Directory>
</Directory>
</Directory>
<!--Remove extra files after uninstall (db, thumbnails, etc.)-->
<Property Id="EXTRADATAPATH">
<RegistrySearch Root="HKCU" Key="Software\Permafrost\Polaris" Name="CleanupExtraData" Type="raw" Id="ExtraDataPathSearch" />
</Property>
<DirectoryRef Id="AppDataPermafrost">
<Component Id="CleanupExtraData" Guid="DF415F12-A1B4-48EE-98BC-E0B75AF556AD">
<RegistryValue Root="HKCU" Key="Software\Permafrost\Polaris" Name="CleanupExtraData" Type="string" Value="[AppDataPermafrost]" KeyPath="yes" />
<util:RemoveFolderEx On="uninstall" Property="EXTRADATAPATH" />
</Component>
</DirectoryRef>
<Feature Id='Complete' Level='1'>
<ComponentRef Id='MainExecutable' />
<ComponentRef Id='ProgramMenuDir' />
<ComponentRef Id='CleanupExtraData' />
<ComponentGroupRef Id="WebUI" />
</Feature>
<Icon Id='polaris.exe' SourceFile='polaris.exe' />
<Property Id='ARPPRODUCTICON' Value='polaris.exe' />
<Property Id='WIXUI_INSTALLDIR' Value='INSTALL_DIR' />
<UIRef Id='WixUI_Minimal' />
<WixVariable Id='WixUILicenseRtf' Value='license.rtf' />
<WixVariable Id='WixUIDialogBmp' Value='dialog.bmp' />
<WixVariable Id='WixUIBannerBmp' Value='banner.bmp' />
</Product>
</Wix>
<?xml version="1.0" encoding="windows-1252"?>
<Wix xmlns="http://schemas.microsoft.com/wix/2006/wi" xmlns:util="http://schemas.microsoft.com/wix/UtilExtension">
<Product Name="Polaris" Id="*" UpgradeCode="FF16B075-1D36-47F4-BE37-D95BBC1A412C" Language="1033" Codepage="1252" Manufacturer="Permafrost" Version="0.12.3">
<Package Id="*" Keywords="Installer" Platform="x64" InstallScope="perUser" Description="Polaris Installer" Manufacturer="Permafrost" Languages="1033" Compressed="yes" SummaryCodepage="1252" />
<MajorUpgrade DowngradeErrorMessage="A newer version of Polaris is already installed." Schedule="afterInstallExecute" />
<Media Id="1" Cabinet="Sample.cab" EmbedCab="yes" DiskPrompt="Installation Media #1" />
<Property Id="DiskPrompt" Value="Polaris Installation [1]" />
<Directory Id="TARGETDIR" Name="SourceDir">
<Directory Id="LocalAppDataFolder">
<Directory Id="AppDataPermafrost" Name="Permafrost">
<Directory Id="AppDataPolaris" Name="Polaris" FileSource=".">
<Component Id="MainExecutable" Guid="*">
<File Source="polaris.exe" KeyPath="yes" Checksum="yes">
<Shortcut Id="StartupMenuPolaris" Directory="StartupFolder" Name="Polaris" WorkingDirectory="AppDataPolaris" Icon="polaris.exe" IconIndex="0" Advertise="yes" />
<Shortcut Id="StartMenuPolaris" Directory="ProgramMenuDir" Name="Polaris" WorkingDirectory="AppDataPolaris" Icon="polaris.exe" IconIndex="0" Advertise="yes" />
<Shortcut Id="DesktopPolaris" Directory="DesktopFolder" Name="Polaris" WorkingDirectory="AppDataPolaris" Icon="polaris.exe" IconIndex="0" Advertise="yes" />
</File>
</Component>
<Component Id="CLIExecutable" Guid="*">
<File Source="polaris-cli.exe" KeyPath="yes" Checksum="yes" />
</Component>
</Directory>
</Directory>
</Directory>
<Directory Id="DesktopFolder" Name="Desktop" />
<Directory Id="StartupFolder" Name="Startup" />
<Directory Id="ProgramMenuFolder" Name="Programs">
<Directory Id="ProgramMenuDir" Name="Permafrost">
<Component Id="ProgramMenuDir" Guid="*">
<RemoveFolder Id="ProgramMenuDir" On="uninstall" />
<RegistryValue Root="HKCU" Key="Software\Permafrost\Polaris" Name="ProgramMenuEntry" Type="string" Value="" KeyPath="yes" />
</Component>
</Directory>
</Directory>
</Directory>
<!--Remove extra files after uninstall (db, thumbnails, etc.)-->
<Property Id="EXTRADATAPATH">
<RegistrySearch Root="HKCU" Key="Software\Permafrost\Polaris" Name="CleanupExtraData" Type="raw" Id="ExtraDataPathSearch" />
</Property>
<DirectoryRef Id="AppDataPermafrost">
<Component Id="CleanupExtraData" Guid="DF415F12-A1B4-48EE-98BC-E0B75AF556AD">
<RegistryValue Root="HKCU" Key="Software\Permafrost\Polaris" Name="CleanupExtraData" Type="string" Value="[AppDataPermafrost]" KeyPath="yes" />
<util:RemoveFolderEx On="uninstall" Property="EXTRADATAPATH" />
</Component>
</DirectoryRef>
<Feature Id="Complete" Level="1">
<ComponentRef Id="MainExecutable" />
<ComponentRef Id="CLIExecutable" />
<ComponentRef Id="ProgramMenuDir" />
<ComponentRef Id="CleanupExtraData" />
<ComponentGroupRef Id="WebUI" />
</Feature>
<Icon Id="polaris.exe" SourceFile="polaris.exe" />
<Property Id="ARPPRODUCTICON" Value="polaris.exe" />
<Property Id="WIXUI_INSTALLDIR" Value="INSTALL_DIR" />
<UIRef Id="WixUI_Minimal" />
<WixVariable Id="WixUILicenseRtf" Value="license.rtf" />
<WixVariable Id="WixUIDialogBmp" Value="dialog.bmp" />
<WixVariable Id="WixUIBannerBmp" Value="banner.bmp" />
</Product>
</Wix>

View file

@ -0,0 +1,52 @@
if (!(Test-Path env:POLARIS_VERSION)) {
throw "POLARIS_VERSION environment variable is not defined"
}
""
"Compiling executable"
# TODO: Uncomment the following once Polaris can do variable expansion of %LOCALAPPDATA%
# And remove the code setting these as defaults in `service/mod.rs`
# $script:INSTALL_DIR = "%LOCALAPPDATA%\Permafrost\Polaris"
# $env:POLARIS_WEB_DIR = "$INSTALL_DIR\web"
# $env:POLARIS_DB_DIR = "$INSTALL_DIR"
# $env:POLARIS_LOG_DIR = "$INSTALL_DIR"
# $env:POLARIS_CACHE_DIR = "$INSTALL_DIR"
# $env:POLARIS_PID_DIR = "$INSTALL_DIR"
cargo rustc --release --features "ui" -- -o ".\target\release\polaris.exe"
cargo rustc --release -- -o ".\target\release\polaris-cli.exe"
""
"Creating output directory"
New-Item .\release\tmp -type directory -Force | Out-Null
Remove-Item -Recurse .\release\tmp\*
""
"Copying to output directory"
Copy-Item .\res\windows\installer\license.rtf .\release\tmp\
Copy-Item .\res\windows\installer\banner.bmp .\release\tmp\
Copy-Item .\res\windows\installer\dialog.bmp .\release\tmp\
Copy-Item .\target\release\polaris.exe .\release\tmp\
Copy-Item .\target\release\polaris-cli.exe .\release\tmp\
Copy-Item .\web .\release\tmp\web -recurse
""
"Inserting version number in installer config"
[xml]$wxs = Get-Content .\res\windows\installer\installer.wxs
$wxs.Wix.Product.SetAttribute("Version", $env:POLARIS_VERSION)
$wxs.Save('.\res\windows\installer\installer.wxs')
""
"Creating installer"
$heat_exe = Join-Path $env:WIX bin\heat.exe
& $heat_exe dir .\release\tmp\web\ -ag -g1 -dr AppDataPolaris -cg WebUI -sfrag -var wix.WebUIDir -out .\release\tmp\web_ui_fragment.wxs
$candle_exe = Join-Path $env:WIX bin\candle.exe
& $candle_exe -wx -ext WixUtilExtension -arch x64 -out .\release\tmp\web_ui_fragment.wixobj .\release\tmp\web_ui_fragment.wxs
& $candle_exe -wx -ext WixUtilExtension -arch x64 -out .\release\tmp\installer.wixobj .\res\windows\installer\installer.wxs
$light_exe = Join-Path $env:WIX bin\light.exe
& $light_exe -dWebUIDir=".\release\tmp\web" -wx -ext WixUtilExtension -ext WixUIExtension -spdb -sw1076 -sice:ICE38 -sice:ICE64 -out .\release\polaris.msi .\release\tmp\installer.wixobj .\release\tmp\web_ui_fragment.wixobj
"Cleaning up"
Remove-Item -Recurse .\release\tmp

4
rust-toolchain.toml Normal file
View file

@ -0,0 +1,4 @@
[toolchain]
channel = "stable"
components = [ "rust-src", "rustfmt" ]
profile = "default"

View file

@ -1,611 +0,0 @@
use diesel::prelude::*;
use iron::prelude::*;
use iron::headers::{Authorization, Basic, Range};
use iron::{AroundMiddleware, Handler, status};
use mount::Mount;
use router::Router;
use params;
use secure_session::middleware::{SessionMiddleware, SessionConfig};
use secure_session::session::{SessionManager, ChaCha20Poly1305SessionManager};
use serde_json;
use std::fs;
use std::io;
use std::path::*;
use std::ops::Deref;
use std::sync::{Arc, Mutex};
use std::sync::mpsc::Sender;
use typemap;
use url::percent_encoding::percent_decode;
use config;
use config::MiscSettings;
use db::{ConnectionSource, DB};
use db::misc_settings;
use errors::*;
use index;
use playlist;
use user;
use serve;
use thumbnails::*;
use utils::*;
use vfs::VFSSource;
const CURRENT_MAJOR_VERSION: i32 = 2;
const CURRENT_MINOR_VERSION: i32 = 1;
#[derive(Deserialize, Serialize)]
struct Session {
username: String,
}
struct SessionKey {}
impl typemap::Key for SessionKey {
type Value = Session;
}
fn get_auth_secret<T>(db: &T) -> Result<String>
where T: ConnectionSource
{
use self::misc_settings::dsl::*;
let connection = db.get_connection();
let misc: MiscSettings = misc_settings.get_result(connection.deref())?;
Ok(misc.auth_secret.to_owned())
}
pub fn get_handler(db: Arc<DB>, index: Arc<Mutex<Sender<index::Command>>>) -> Result<Chain> {
let api_handler = get_endpoints(db.clone(), index);
let mut api_chain = Chain::new(api_handler);
let auth_secret = get_auth_secret(db.deref())?;
let session_manager =
ChaCha20Poly1305SessionManager::<Session>::from_password(auth_secret.as_bytes());
let session_config = SessionConfig::default();
let session_middleware =
SessionMiddleware::<Session,
SessionKey,
ChaCha20Poly1305SessionManager<Session>>::new(session_manager,
session_config);
api_chain.link_around(session_middleware);
Ok(api_chain)
}
fn get_endpoints(db: Arc<DB>, index_channel: Arc<Mutex<Sender<index::Command>>>) -> Mount {
let mut api_handler = Mount::new();
{
api_handler.mount("/version/", self::version);
{
let db = db.clone();
api_handler.mount("/auth/",
move |request: &mut Request| self::auth(request, db.deref()));
}
{
let db = db.clone();
api_handler.mount("/initial_setup/", move |request: &mut Request| {
self::initial_setup(request, db.deref())
});
}
}
{
let mut auth_api_mount = Mount::new();
{
let db = db.clone();
auth_api_mount.mount("/browse/",
move |request: &mut Request| self::browse(request, db.deref()));
}
{
let db = db.clone();
auth_api_mount.mount("/flatten/",
move |request: &mut Request| self::flatten(request, db.deref()));
}
{
let db = db.clone();
auth_api_mount.mount("/random/",
move |request: &mut Request| self::random(request, db.deref()));
}
{
let db = db.clone();
auth_api_mount.mount("/recent/",
move |request: &mut Request| self::recent(request, db.deref()));
}
{
let db = db.clone();
auth_api_mount.mount("/serve/",
move |request: &mut Request| self::serve(request, db.deref()));
}
{
let mut settings_router = Router::new();
let get_db = db.clone();
let put_db = db.clone();
settings_router.get("/",
move |request: &mut Request| {
self::get_config(request, get_db.deref())
},
"get_config");
settings_router.put("/",
move |request: &mut Request| {
self::put_config(request, put_db.deref())
},
"put_config");
let mut settings_api_chain = Chain::new(settings_router);
let admin_req = AdminRequirement { db: db.clone() };
settings_api_chain.link_around(admin_req);
auth_api_mount.mount("/settings/", settings_api_chain);
}
{
let index_channel = index_channel.clone();
let mut reindex_router = Router::new();
reindex_router.post("/",
move |_: &mut Request| self::trigger_index(index_channel.deref()),
"trigger_index");
let mut reindex_api_chain = Chain::new(reindex_router);
let admin_req = AdminRequirement { db: db.clone() };
reindex_api_chain.link_around(admin_req);
auth_api_mount.mount("/trigger_index/", reindex_api_chain);
}
{
let mut playlist_router = Router::new();
let put_db = db.clone();
let list_db = db.clone();
let read_db = db.clone();
let delete_db = db.clone();
playlist_router.put("/",
move |request: &mut Request| {
self::save_playlist(request, put_db.deref())
},
"save_playlist");
playlist_router.get("/list",
move |request: &mut Request| {
self::list_playlists(request, list_db.deref())
},
"list_playlists");
playlist_router.get("/read/:playlist_name",
move |request: &mut Request| {
self::read_playlist(request, read_db.deref())
},
"read_playlist");
playlist_router.delete("/:playlist_name",
move |request: &mut Request| {
self::delete_playlist(request, delete_db.deref())
},
"delete_playlist");
auth_api_mount.mount("/playlist/", playlist_router);
}
let mut auth_api_chain = Chain::new(auth_api_mount);
let auth = AuthRequirement { db: db.clone() };
auth_api_chain.link_around(auth);
api_handler.mount("/", auth_api_chain);
}
api_handler
}
fn path_from_request(request: &Request) -> Result<PathBuf> {
let path_string = request
.url
.path()
.join(&::std::path::MAIN_SEPARATOR.to_string());
let decoded_path = percent_decode(path_string.as_bytes()).decode_utf8()?;
Ok(PathBuf::from(decoded_path.deref()))
}
struct AuthRequirement {
db: Arc<DB>,
}
impl AroundMiddleware for AuthRequirement {
fn around(self, handler: Box<Handler>) -> Box<Handler> {
Box::new(AuthHandler {
db: self.db,
handler: handler,
}) as Box<Handler>
}
}
struct AuthHandler {
handler: Box<Handler>,
db: Arc<DB>,
}
impl Handler for AuthHandler {
fn handle(&self, req: &mut Request) -> IronResult<Response> {
{
let mut auth_success = false;
// Skip auth for first time setup
if user::count(self.db.deref())? == 0 {
auth_success = true;
}
// Auth via Authorization header
if !auth_success {
if let Some(auth) = req.headers.get::<Authorization<Basic>>() {
if let Some(ref password) = auth.password {
auth_success =
user::auth(self.db.deref(), auth.username.as_str(), password.as_str())?;
req.extensions
.insert::<SessionKey>(Session { username: auth.username.clone() });
}
}
}
// Auth via Session
if !auth_success {
auth_success = req.extensions.get::<SessionKey>().is_some();
}
// Reject
if !auth_success {
return Err(Error::from(ErrorKind::AuthenticationRequired).into());
}
}
self.handler.handle(req)
}
}
struct AdminRequirement {
db: Arc<DB>,
}
impl AroundMiddleware for AdminRequirement {
fn around(self, handler: Box<Handler>) -> Box<Handler> {
Box::new(AdminHandler {
db: self.db,
handler: handler,
}) as Box<Handler>
}
}
struct AdminHandler {
handler: Box<Handler>,
db: Arc<DB>,
}
impl Handler for AdminHandler {
fn handle(&self, req: &mut Request) -> IronResult<Response> {
{
let mut auth_success = false;
// Skip auth for first time setup
if user::count(self.db.deref())? == 0 {
auth_success = true;
}
if !auth_success {
match req.extensions.get::<SessionKey>() {
Some(s) => auth_success = user::is_admin(self.db.deref(), &s.username)?,
_ => return Err(Error::from(ErrorKind::AuthenticationRequired).into()),
}
}
if !auth_success {
return Err(Error::from(ErrorKind::AdminPrivilegeRequired).into());
}
}
self.handler.handle(req)
}
}
fn version(_: &mut Request) -> IronResult<Response> {
#[derive(Serialize)]
struct Version {
major: i32,
minor: i32,
}
let current_version = Version {
major: CURRENT_MAJOR_VERSION,
minor: CURRENT_MINOR_VERSION,
};
match serde_json::to_string(&current_version) {
Ok(result_json) => Ok(Response::with((status::Ok, result_json))),
Err(e) => Err(IronError::new(e, status::InternalServerError)),
}
}
fn initial_setup(_: &mut Request, db: &DB) -> IronResult<Response> {
#[derive(Serialize)]
struct InitialSetup {
has_any_users: bool,
};
let initial_setup = InitialSetup { has_any_users: user::count(db)? > 0 };
match serde_json::to_string(&initial_setup) {
Ok(result_json) => Ok(Response::with((status::Ok, result_json))),
Err(e) => Err(IronError::new(e, status::InternalServerError)),
}
}
fn auth(request: &mut Request, db: &DB) -> IronResult<Response> {
let username;
let password;
{
let input = request.get_ref::<params::Params>().unwrap();
username = match input.find(&["username"]) {
Some(&params::Value::String(ref username)) => username.clone(),
_ => return Err(Error::from(ErrorKind::MissingUsername).into()),
};
password = match input.find(&["password"]) {
Some(&params::Value::String(ref password)) => password.clone(),
_ => return Err(Error::from(ErrorKind::MissingPassword).into()),
};
}
if !user::auth(db, username.as_str(), password.as_str())? {
return Err(Error::from(ErrorKind::IncorrectCredentials).into());
}
request
.extensions
.insert::<SessionKey>(Session { username: username.clone() });
#[derive(Serialize)]
struct AuthOutput {
admin: bool,
}
let auth_output = AuthOutput { admin: user::is_admin(db.deref(), &username)? };
let result_json = serde_json::to_string(&auth_output);
let result_json = match result_json {
Ok(j) => j,
Err(e) => return Err(IronError::new(e, status::InternalServerError)),
};
Ok(Response::with((status::Ok, result_json)))
}
fn browse(request: &mut Request, db: &DB) -> IronResult<Response> {
let path = path_from_request(request);
let path = match path {
Err(e) => return Err(IronError::new(e, status::BadRequest)),
Ok(p) => p,
};
let browse_result = index::browse(db, &path)?;
let result_json = serde_json::to_string(&browse_result);
let result_json = match result_json {
Ok(j) => j,
Err(e) => return Err(IronError::new(e, status::InternalServerError)),
};
Ok(Response::with((status::Ok, result_json)))
}
fn flatten(request: &mut Request, db: &DB) -> IronResult<Response> {
let path = path_from_request(request);
let path = match path {
Err(e) => return Err(IronError::new(e, status::BadRequest)),
Ok(p) => p,
};
let flatten_result = index::flatten(db, &path)?;
let result_json = serde_json::to_string(&flatten_result);
let result_json = match result_json {
Ok(j) => j,
Err(e) => return Err(IronError::new(e, status::InternalServerError)),
};
Ok(Response::with((status::Ok, result_json)))
}
fn random(_: &mut Request, db: &DB) -> IronResult<Response> {
let random_result = index::get_random_albums(db, 20)?;
let result_json = serde_json::to_string(&random_result);
let result_json = match result_json {
Ok(j) => j,
Err(e) => return Err(IronError::new(e, status::InternalServerError)),
};
Ok(Response::with((status::Ok, result_json)))
}
fn recent(_: &mut Request, db: &DB) -> IronResult<Response> {
let recent_result = index::get_recent_albums(db, 20)?;
let result_json = serde_json::to_string(&recent_result);
let result_json = match result_json {
Ok(j) => j,
Err(e) => return Err(IronError::new(e, status::InternalServerError)),
};
Ok(Response::with((status::Ok, result_json)))
}
fn serve(request: &mut Request, db: &DB) -> IronResult<Response> {
let virtual_path = path_from_request(request);
let virtual_path = match virtual_path {
Err(e) => return Err(IronError::new(e, status::BadRequest)),
Ok(p) => p,
};
let vfs = db.get_vfs()?;
let real_path = vfs.virtual_to_real(&virtual_path);
let real_path = match real_path {
Err(e) => return Err(IronError::new(e, status::NotFound)),
Ok(p) => p,
};
let metadata = match fs::metadata(real_path.as_path()) {
Ok(meta) => meta,
Err(e) => {
let status = match e.kind() {
io::ErrorKind::NotFound => status::NotFound,
io::ErrorKind::PermissionDenied => status::Forbidden,
_ => status::InternalServerError,
};
return Err(IronError::new(e, status));
}
};
if !metadata.is_file() {
return Err(Error::from(ErrorKind::CannotServeDirectory).into());
}
if is_song(real_path.as_path()) {
let range_header = request.headers.get::<Range>();
return serve::deliver(&real_path, range_header);
}
if is_image(real_path.as_path()) {
return art(request, real_path.as_path());
}
Err(Error::from(ErrorKind::UnsupportedFileType).into())
}
fn art(_: &mut Request, real_path: &Path) -> IronResult<Response> {
let thumb = get_thumbnail(real_path, 400);
match thumb {
Ok(path) => Ok(Response::with((status::Ok, path))),
Err(e) => Err(IronError::from(e)),
}
}
fn get_config(_: &mut Request, db: &DB) -> IronResult<Response> {
let c = config::read(db)?;
let result_json = serde_json::to_string(&c);
let result_json = match result_json {
Ok(j) => j,
Err(e) => return Err(IronError::new(e, status::InternalServerError)),
};
Ok(Response::with((status::Ok, result_json)))
}
fn put_config(request: &mut Request, db: &DB) -> IronResult<Response> {
let input = request.get_ref::<params::Params>().unwrap();
let config = match input.find(&["config"]) {
Some(&params::Value::String(ref config)) => config,
_ => return Err(Error::from(ErrorKind::MissingConfig).into()),
};
let config = config::parse_json(config)?;
config::amend(db, &config)?;
Ok(Response::with(status::Ok))
}
fn trigger_index(channel: &Mutex<Sender<index::Command>>) -> IronResult<Response> {
let channel = channel.lock().unwrap();
let channel = channel.deref();
if let Err(e) = channel.send(index::Command::REINDEX) {
return Err(IronError::new(e, status::InternalServerError));
};
Ok(Response::with(status::Ok))
}
fn save_playlist(request: &mut Request, db: &DB) -> IronResult<Response> {
let username = match request.extensions.get::<SessionKey>() {
Some(s) => s.username.clone(),
None => return Err(Error::from(ErrorKind::AuthenticationRequired).into()),
};
let input = request.get_ref::<params::Params>().unwrap();
let playlist = match input.find(&["playlist"]) {
Some(&params::Value::String(ref playlist)) => playlist,
_ => return Err(Error::from(ErrorKind::MissingPlaylist).into()),
};
#[derive(Deserialize)]
struct SavePlaylistInput {
name: String,
tracks: Vec<String>,
}
let playlist = match serde_json::from_str::<SavePlaylistInput>(playlist) {
Ok(p) => p,
Err(e) => return Err(IronError::new(e, status::BadRequest)),
};
playlist::save_playlist(&playlist.name, &username, &playlist.tracks, db)?;
Ok(Response::with(status::Ok))
}
fn list_playlists(request: &mut Request, db: &DB) -> IronResult<Response> {
let username = match request.extensions.get::<SessionKey>() {
Some(s) => s.username.clone(),
None => return Err(Error::from(ErrorKind::AuthenticationRequired).into()),
};
#[derive(Serialize)]
struct ListPlaylistsOutput {
name: String,
}
let playlist_name = playlist::list_playlists(&username, db)?;
let playlists: Vec<ListPlaylistsOutput> = playlist_name
.into_iter()
.map(|p| ListPlaylistsOutput { name: p })
.collect();
let result_json = serde_json::to_string(&playlists);
let result_json = match result_json {
Ok(j) => j,
Err(e) => return Err(IronError::new(e, status::InternalServerError)),
};
Ok(Response::with((status::Ok, result_json)))
}
fn read_playlist(request: &mut Request, db: &DB) -> IronResult<Response> {
let username = match request.extensions.get::<SessionKey>() {
Some(s) => s.username.clone(),
None => return Err(Error::from(ErrorKind::AuthenticationRequired).into()),
};
let params = request.extensions.get::<Router>().unwrap();
let ref playlist_name = match params.find("playlist_name") {
Some(s) => s,
_ => return Err(Error::from(ErrorKind::MissingPlaylistName).into()),
};
let playlist_name = match percent_decode(playlist_name.as_bytes()).decode_utf8() {
Ok(s) => s,
Err(_) => return Err(Error::from(ErrorKind::EncodingError).into()),
};
let songs = playlist::read_playlist(&playlist_name, &username, db)?;
let result_json = serde_json::to_string(&songs);
let result_json = match result_json {
Ok(j) => j,
Err(e) => return Err(IronError::new(e, status::InternalServerError)),
};
Ok(Response::with((status::Ok, result_json)))
}
fn delete_playlist(request: &mut Request, db: &DB) -> IronResult<Response> {
let username = match request.extensions.get::<SessionKey>() {
Some(s) => s.username.clone(),
None => return Err(Error::from(ErrorKind::AuthenticationRequired).into()),
};
let params = request.extensions.get::<Router>().unwrap();
let ref playlist_name = match params.find("playlist_name") {
Some(s) => s,
_ => return Err(Error::from(ErrorKind::MissingPlaylistName).into()),
};
let playlist_name = match percent_decode(playlist_name.as_bytes()).decode_utf8() {
Ok(s) => s,
Err(_) => return Err(Error::from(ErrorKind::EncodingError).into()),
};
playlist::delete_playlist(&playlist_name, &username, db)?;
Ok(Response::with(status::Ok))
}

317
src/app.rs Normal file
View file

@ -0,0 +1,317 @@
use std::fs;
use std::path::{Path, PathBuf};
use log::info;
use rand::rngs::OsRng;
use rand::RngCore;
use tokio::fs::try_exists;
use tokio::task::spawn_blocking;
use crate::app::legacy::*;
use crate::paths::Paths;
pub mod auth;
pub mod config;
pub mod ddns;
pub mod formats;
pub mod index;
pub mod legacy;
pub mod ndb;
pub mod peaks;
pub mod playlist;
pub mod scanner;
pub mod thumbnail;
#[cfg(test)]
pub mod test;
#[derive(thiserror::Error, Debug)]
pub enum Error {
#[error(transparent)]
ThreadPoolBuilder(#[from] rayon::ThreadPoolBuildError),
#[error(transparent)]
ThreadJoining(#[from] tokio::task::JoinError),
#[error("Filesystem error for `{0}`: `{1}`")]
Io(PathBuf, std::io::Error),
#[error(transparent)]
FileWatch(#[from] notify::Error),
#[error(transparent)]
SQL(#[from] rusqlite::Error),
#[error(transparent)]
Ape(#[from] ape::Error),
#[error("ID3 error in `{0}`: `{1}`")]
Id3(PathBuf, id3::Error),
#[error("Metaflac error in `{0}`: `{1}`")]
Metaflac(PathBuf, metaflac::Error),
#[error("Mp4aMeta error in `{0}`: `{1}`")]
Mp4aMeta(PathBuf, mp4ameta::Error),
#[error(transparent)]
Opus(#[from] opus_headers::ParseError),
#[error(transparent)]
Vorbis(#[from] lewton::VorbisError),
#[error("Could not find a Vorbis comment within flac file")]
VorbisCommentNotFoundInFlacFile,
#[error("Could not read thumbnail image in `{0}`:\n\n{1}")]
Image(PathBuf, image::error::ImageError),
#[error("This file format is not supported: {0}")]
UnsupportedFormat(&'static str),
#[error("No tracks found in audio file: {0}")]
MediaEmpty(PathBuf),
#[error(transparent)]
MediaDecodeError(symphonia::core::errors::Error),
#[error(transparent)]
MediaDecoderError(symphonia::core::errors::Error),
#[error(transparent)]
MediaPacketError(symphonia::core::errors::Error),
#[error(transparent)]
MediaProbeError(symphonia::core::errors::Error),
#[error(transparent)]
PeaksSerialization(bitcode::Error),
#[error(transparent)]
PeaksDeserialization(bitcode::Error),
#[error(transparent)]
NativeDatabase(#[from] native_db::db_type::Error),
#[error("Could not initialize database")]
NativeDatabaseCreationError(native_db::db_type::Error),
#[error("DDNS update query failed with HTTP status code `{0}`")]
UpdateQueryFailed(u16),
#[error("DDNS update query failed due to a transport error")]
UpdateQueryTransport,
#[error("Auth secret does not have the expected format")]
AuthenticationSecretInvalid,
#[error("Missing auth secret")]
AuthenticationSecretNotFound,
#[error("Missing settings")]
MiscSettingsNotFound,
#[error("Index album art pattern is not a valid regex")]
IndexAlbumArtPatternInvalid,
#[error("DDNS update URL is invalid")]
DDNSUpdateURLInvalid,
#[error("Could not deserialize configuration: `{0}`")]
ConfigDeserialization(toml::de::Error),
#[error("Could not serialize configuration: `{0}`")]
ConfigSerialization(toml::ser::Error),
#[error("Could not deserialize collection")]
IndexDeserializationError,
#[error("Could not serialize collection")]
IndexSerializationError,
#[error("Invalid Directory")]
InvalidDirectory(String),
#[error("The following virtual path could not be mapped to a real path: `{0}`")]
CouldNotMapToRealPath(PathBuf),
#[error("The following real path could not be mapped to a virtual path: `{0}`")]
CouldNotMapToVirtualPath(PathBuf),
#[error("User not found")]
UserNotFound,
#[error("Directory not found: {0}")]
DirectoryNotFound(PathBuf),
#[error("Artist not found")]
ArtistNotFound,
#[error("Album not found")]
AlbumNotFound,
#[error("Genre not found")]
GenreNotFound,
#[error("Song not found")]
SongNotFound,
#[error("Invalid search query syntax")]
SearchQueryParseError,
#[error("Playlist not found")]
PlaylistNotFound,
#[error("No embedded artwork was found in `{0}`")]
EmbeddedArtworkNotFound(PathBuf),
#[error("Cannot use empty username")]
EmptyUsername,
#[error("Cannot use empty password")]
EmptyPassword,
#[error("Username already exists")]
DuplicateUsername,
#[error("Username does not exist")]
IncorrectUsername,
#[error("Password does not match username")]
IncorrectPassword,
#[error("Invalid auth token")]
InvalidAuthToken,
#[error("Incorrect authorization scope")]
IncorrectAuthorizationScope,
#[error("Failed to hash password")]
PasswordHashing,
#[error("Failed to encode authorization token")]
AuthorizationTokenEncoding,
#[error("Failed to encode Branca token")]
BrancaTokenEncoding,
}
#[derive(Clone)]
pub struct App {
pub port: u16,
pub web_dir_path: PathBuf,
pub ddns_manager: ddns::Manager,
pub scanner: scanner::Scanner,
pub index_manager: index::Manager,
pub config_manager: config::Manager,
pub peaks_manager: peaks::Manager,
pub playlist_manager: playlist::Manager,
pub thumbnail_manager: thumbnail::Manager,
}
impl App {
pub async fn new(port: u16, paths: Paths) -> Result<Self, Error> {
fs::create_dir_all(&paths.data_dir_path)
.map_err(|e| Error::Io(paths.data_dir_path.clone(), e))?;
fs::create_dir_all(&paths.web_dir_path)
.map_err(|e| Error::Io(paths.web_dir_path.clone(), e))?;
let peaks_dir_path = paths.cache_dir_path.join("peaks");
fs::create_dir_all(&peaks_dir_path).map_err(|e| Error::Io(peaks_dir_path.clone(), e))?;
let thumbnails_dir_path = paths.cache_dir_path.join("thumbnails");
fs::create_dir_all(&thumbnails_dir_path)
.map_err(|e| Error::Io(thumbnails_dir_path.clone(), e))?;
let auth_secret_file_path = paths.data_dir_path.join("auth.secret");
Self::migrate_legacy_auth_secret(&paths.db_file_path, &auth_secret_file_path).await?;
let auth_secret = Self::get_or_create_auth_secret(&auth_secret_file_path).await?;
let config_manager = config::Manager::new(&paths.config_file_path, auth_secret).await?;
let ddns_manager = ddns::Manager::new(config_manager.clone());
let ndb_manager = ndb::Manager::new(&paths.data_dir_path)?;
let index_manager = index::Manager::new(&paths.data_dir_path).await?;
let scanner = scanner::Scanner::new(index_manager.clone(), config_manager.clone()).await?;
let peaks_manager = peaks::Manager::new(peaks_dir_path);
let playlist_manager = playlist::Manager::new(ndb_manager);
let thumbnail_manager = thumbnail::Manager::new(thumbnails_dir_path);
let app = Self {
port,
web_dir_path: paths.web_dir_path,
ddns_manager,
scanner,
index_manager,
config_manager,
peaks_manager,
playlist_manager,
thumbnail_manager,
};
app.migrate_legacy_db(&paths.db_file_path).await?;
Ok(app)
}
async fn migrate_legacy_auth_secret(
db_file_path: &PathBuf,
secret_file_path: &PathBuf,
) -> Result<(), Error> {
if !try_exists(db_file_path)
.await
.map_err(|e| Error::Io(db_file_path.clone(), e))?
{
return Ok(());
}
if try_exists(secret_file_path)
.await
.map_err(|e| Error::Io(secret_file_path.clone(), e))?
{
return Ok(());
}
info!(
"Migrating auth secret from database at `{}`",
db_file_path.to_string_lossy()
);
let secret = spawn_blocking({
let db_file_path = db_file_path.clone();
move || read_legacy_auth_secret(&db_file_path)
})
.await??;
tokio::fs::write(secret_file_path, &secret)
.await
.map_err(|e| Error::Io(secret_file_path.clone(), e))?;
Ok(())
}
async fn migrate_legacy_db(&self, db_file_path: &PathBuf) -> Result<(), Error> {
if !try_exists(db_file_path)
.await
.map_err(|e| Error::Io(db_file_path.clone(), e))?
{
return Ok(());
}
let Some(config) = tokio::task::spawn_blocking({
let db_file_path = db_file_path.clone();
move || read_legacy_config(&db_file_path)
})
.await??
else {
return Ok(());
};
info!(
"Found usable config in legacy database at `{}`, beginning migration process",
db_file_path.to_string_lossy()
);
info!("Migrating configuration");
self.config_manager.apply_config(config).await?;
self.config_manager.save_config().await?;
info!("Migrating playlists");
for (name, owner, songs) in read_legacy_playlists(
db_file_path,
self.index_manager.clone(),
self.scanner.clone(),
)
.await?
{
self.playlist_manager
.save_playlist(&name, &owner, songs)
.await?;
}
info!(
"Deleting legacy database at `{}`",
db_file_path.to_string_lossy()
);
delete_legacy_db(db_file_path).await?;
info!(
"Completed migration from `{}`",
db_file_path.to_string_lossy()
);
Ok(())
}
async fn get_or_create_auth_secret(path: &Path) -> Result<auth::Secret, Error> {
match tokio::fs::read(&path).await {
Ok(s) => Ok(auth::Secret(
s.try_into()
.map_err(|_| Error::AuthenticationSecretInvalid)?,
)),
Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
let mut secret = auth::Secret::default();
OsRng.fill_bytes(secret.as_mut());
tokio::fs::write(&path, &secret)
.await
.map_err(|_| Error::AuthenticationSecretInvalid)?;
Ok(secret)
}
Err(e) => return Err(Error::Io(path.to_owned(), e)),
}
}
}

95
src/app/auth.rs Normal file
View file

@ -0,0 +1,95 @@
use std::time::{SystemTime, UNIX_EPOCH};
use pbkdf2::password_hash::{PasswordHash, PasswordHasher, PasswordVerifier, SaltString};
use pbkdf2::Pbkdf2;
use rand::rngs::OsRng;
use serde::{Deserialize, Serialize};
use crate::app::Error;
#[derive(Clone, Default)]
pub struct Secret(pub [u8; 32]);
impl AsRef<[u8]> for Secret {
fn as_ref(&self) -> &[u8] {
&self.0
}
}
impl AsMut<[u8]> for Secret {
fn as_mut(&mut self) -> &mut [u8] {
&mut self.0
}
}
#[derive(Debug)]
pub struct Token(pub String);
#[derive(Debug, PartialEq, Eq, Deserialize, Serialize)]
pub enum Scope {
PolarisAuth,
}
#[derive(Debug, PartialEq, Eq, Deserialize, Serialize)]
pub struct Authorization {
pub username: String,
pub scope: Scope,
}
pub fn hash_password(password: &str) -> Result<String, Error> {
if password.is_empty() {
return Err(Error::EmptyPassword);
}
let salt = SaltString::generate(&mut OsRng);
match Pbkdf2.hash_password(password.as_bytes(), &salt) {
Ok(h) => Ok(h.to_string()),
Err(_) => Err(Error::PasswordHashing),
}
}
pub fn verify_password(password_hash: &str, attempted_password: &str) -> bool {
match PasswordHash::new(password_hash) {
Ok(h) => Pbkdf2
.verify_password(attempted_password.as_bytes(), &h)
.is_ok(),
Err(_) => false,
}
}
pub fn generate_auth_token(
authorization: &Authorization,
auth_secret: &Secret,
) -> Result<Token, Error> {
let serialized_authorization =
serde_json::to_string(&authorization).or(Err(Error::AuthorizationTokenEncoding))?;
branca::encode(
serialized_authorization.as_bytes(),
auth_secret.as_ref(),
SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap_or_default()
.as_secs() as u32,
)
.or(Err(Error::BrancaTokenEncoding))
.map(Token)
}
pub fn decode_auth_token(
auth_token: &Token,
scope: Scope,
auth_secret: &Secret,
) -> Result<Authorization, Error> {
let Token(data) = auth_token;
let ttl = match scope {
Scope::PolarisAuth => 0, // permanent
};
let authorization =
branca::decode(data, auth_secret.as_ref(), ttl).map_err(|_| Error::InvalidAuthToken)?;
let authorization: Authorization =
serde_json::from_slice(&authorization[..]).map_err(|_| Error::InvalidAuthToken)?;
if authorization.scope != scope {
return Err(Error::IncorrectAuthorizationScope);
}
Ok(authorization)
}

338
src/app/config.rs Normal file
View file

@ -0,0 +1,338 @@
use std::{
path::{Path, PathBuf},
sync::Arc,
time::Duration,
};
use log::{error, info};
use notify::{RecommendedWatcher, RecursiveMode, Watcher};
use notify_debouncer_full::{Debouncer, FileIdMap};
use regex::Regex;
use tokio::sync::{futures::Notified, Notify, RwLock};
use crate::app::Error;
mod mounts;
pub mod storage;
mod user;
pub use mounts::*;
pub use user::*;
use super::auth;
#[derive(Debug, Clone, Default)]
pub struct Config {
pub album_art_pattern: Option<Regex>,
pub ddns_update_url: Option<http::Uri>,
pub mount_dirs: Vec<MountDir>,
pub users: Vec<User>,
}
impl TryFrom<storage::Config> for Config {
type Error = Error;
fn try_from(c: storage::Config) -> Result<Self, Self::Error> {
let mut config = Config::default();
config.set_mounts(c.mount_dirs)?;
config.set_users(c.users)?;
config.album_art_pattern = match c.album_art_pattern.as_deref().map(Regex::new) {
Some(Ok(u)) => Some(u),
Some(Err(_)) => return Err(Error::IndexAlbumArtPatternInvalid),
None => None,
};
config.ddns_update_url = match c.ddns_update_url.map(http::Uri::try_from) {
Some(Ok(u)) => Some(u),
Some(Err(_)) => return Err(Error::DDNSUpdateURLInvalid),
None => None,
};
Ok(config)
}
}
impl From<Config> for storage::Config {
fn from(c: Config) -> Self {
Self {
album_art_pattern: c.album_art_pattern.map(|p| p.as_str().to_owned()),
mount_dirs: c.mount_dirs.into_iter().map(|d| d.into()).collect(),
ddns_update_url: c.ddns_update_url.map(|u| u.to_string()),
users: c.users.into_iter().map(|u| u.into()).collect(),
}
}
}
#[derive(Clone)]
pub struct Manager {
config_file_path: PathBuf,
config: Arc<RwLock<Config>>,
auth_secret: auth::Secret,
#[allow(dead_code)]
file_watcher: Arc<Debouncer<RecommendedWatcher, FileIdMap>>,
change_notify: Arc<Notify>,
}
impl Manager {
pub async fn new(config_file_path: &Path, auth_secret: auth::Secret) -> Result<Self, Error> {
if let Some(parent) = config_file_path.parent() {
tokio::fs::create_dir_all(parent)
.await
.map_err(|e| Error::Io(parent.to_owned(), e))?;
}
match tokio::fs::File::create_new(config_file_path).await {
Ok(_) => (),
Err(e) if e.kind() == std::io::ErrorKind::AlreadyExists => (),
Err(e) => {
error!("Failed to create config file at {config_file_path:#?}: {e}");
return Err(Error::Io(config_file_path.to_owned(), e));
}
};
let notify = Arc::new(Notify::new());
let mut debouncer = notify_debouncer_full::new_debouncer(Duration::from_secs(1), None, {
let notify = notify.clone();
move |_| {
notify.notify_waiters();
}
})?;
debouncer
.watcher()
.watch(&config_file_path, RecursiveMode::NonRecursive)?;
let manager = Self {
config_file_path: config_file_path.to_owned(),
config: Arc::new(RwLock::new(Config::default())),
auth_secret,
file_watcher: Arc::new(debouncer),
change_notify: Arc::default(),
};
tokio::task::spawn({
let manager = manager.clone();
async move {
loop {
notify.notified().await;
if let Err(e) = manager.reload_config().await {
error!("Configuration error: {e}");
} else {
info!("Successfully applied configuration change");
}
}
}
});
manager.reload_config().await?;
Ok(manager)
}
pub fn on_config_change(&self) -> Notified {
self.change_notify.notified()
}
async fn reload_config(&self) -> Result<(), Error> {
let config = Self::read_config(&self.config_file_path).await?;
self.apply_config(config).await
}
async fn read_config(config_file_path: &Path) -> Result<storage::Config, Error> {
let config_content = tokio::fs::read_to_string(config_file_path)
.await
.map_err(|e| Error::Io(config_file_path.to_owned(), e))?;
toml::de::from_str::<storage::Config>(&config_content).map_err(Error::ConfigDeserialization)
}
pub async fn save_config(&self) -> Result<(), Error> {
let serialized = toml::ser::to_string_pretty::<storage::Config>(
&self.config.read().await.clone().into(),
)
.map_err(Error::ConfigSerialization)?;
tokio::fs::write(&self.config_file_path, serialized.as_bytes())
.await
.map_err(|e| Error::Io(self.config_file_path.clone(), e))?;
Ok(())
}
pub async fn apply_config(&self, new_config: storage::Config) -> Result<(), Error> {
let mut config = self.config.write().await;
*config = new_config.try_into()?;
self.change_notify.notify_waiters();
Ok(())
}
async fn mutate<F: FnOnce(&mut Config)>(&self, op: F) -> Result<(), Error> {
self.mutate_fallible(|c| {
op(c);
Ok(())
})
.await
}
async fn mutate_fallible<F: FnOnce(&mut Config) -> Result<(), Error>>(
&self,
op: F,
) -> Result<(), Error> {
{
let mut config = self.config.write().await;
op(&mut config)?;
}
self.change_notify.notify_waiters();
self.save_config().await?;
Ok(())
}
pub async fn get_index_album_art_pattern(&self) -> Regex {
let config = self.config.read().await;
let pattern = config.album_art_pattern.clone();
pattern.unwrap_or_else(|| Regex::new("Folder.(jpeg|jpg|png)").unwrap())
}
pub async fn set_index_album_art_pattern(&self, regex: Regex) -> Result<(), Error> {
self.mutate(|c| {
c.album_art_pattern = Some(regex);
})
.await
}
pub async fn get_ddns_update_url(&self) -> Option<http::Uri> {
self.config.read().await.ddns_update_url.clone()
}
pub async fn set_ddns_update_url(&self, url: Option<http::Uri>) -> Result<(), Error> {
self.mutate(|c| {
c.ddns_update_url = url;
})
.await
}
pub async fn get_users(&self) -> Vec<User> {
self.config.read().await.users.iter().cloned().collect()
}
pub async fn get_user(&self, username: &str) -> Result<User, Error> {
let config = self.config.read().await;
config
.get_user(username)
.cloned()
.ok_or(Error::UserNotFound)
}
pub async fn create_user(
&self,
username: &str,
password: &str,
admin: bool,
) -> Result<(), Error> {
self.mutate_fallible(|c| c.create_user(username, password, admin))
.await
}
pub async fn login(&self, username: &str, password: &str) -> Result<auth::Token, Error> {
let config = self.config.read().await;
config.login(username, password, &self.auth_secret)
}
pub async fn set_is_admin(&self, username: &str, is_admin: bool) -> Result<(), Error> {
self.mutate_fallible(|c| c.set_is_admin(username, is_admin))
.await
}
pub async fn set_password(&self, username: &str, password: &str) -> Result<(), Error> {
self.mutate_fallible(|c| c.set_password(username, password))
.await
}
pub async fn authenticate(
&self,
auth_token: &auth::Token,
scope: auth::Scope,
) -> Result<auth::Authorization, Error> {
let config = self.config.read().await;
config.authenticate(auth_token, scope, &self.auth_secret)
}
pub async fn delete_user(&self, username: &str) -> Result<(), Error> {
self.mutate(|c| c.delete_user(username)).await
}
pub async fn get_mounts(&self) -> Vec<MountDir> {
let config = self.config.read().await;
config.mount_dirs.iter().cloned().collect()
}
pub async fn resolve_virtual_path<P: AsRef<Path>>(
&self,
virtual_path: P,
) -> Result<PathBuf, Error> {
let config = self.config.read().await;
config.resolve_virtual_path(virtual_path)
}
pub async fn set_mounts(&self, mount_dirs: Vec<storage::MountDir>) -> Result<(), Error> {
self.mutate_fallible(|c| c.set_mounts(mount_dirs)).await
}
}
#[cfg(test)]
mod test {
use crate::app::test;
use crate::test_name;
use super::*;
#[tokio::test]
async fn blank_config_round_trip() {
let config_path = PathBuf::from_iter(["test-data", "blank.toml"]);
let manager = Manager::new(&config_path, auth::Secret([0; 32]))
.await
.unwrap();
let config: storage::Config = manager.config.read().await.clone().into();
assert_eq!(config, storage::Config::default());
}
#[tokio::test]
async fn can_read_config() {
let config_path = PathBuf::from_iter(["test-data", "config.toml"]);
let manager = Manager::new(&config_path, auth::Secret([0; 32]))
.await
.unwrap();
let config: storage::Config = manager.config.read().await.clone().into();
assert_eq!(
config.album_art_pattern,
Some(r#"^Folder\.(png|jpg|jpeg)$"#.to_owned())
);
assert_eq!(
config.mount_dirs,
vec![storage::MountDir {
source: PathBuf::from("test-data/small-collection"),
name: "root".to_owned(),
}]
);
assert_eq!(config.users[0].name, "test_user");
assert_eq!(config.users[0].admin, Some(true));
assert_eq!(
config.users[0].initial_password,
Some("very_secret_password".to_owned())
);
assert!(config.users[0].hashed_password.is_some());
}
#[tokio::test]
async fn can_write_config() {
let ctx = test::ContextBuilder::new(test_name!()).build().await;
ctx.config_manager
.create_user("Walter", "example_password", false)
.await
.unwrap();
let manager = Manager::new(&ctx.config_manager.config_file_path, auth::Secret([0; 32]))
.await
.unwrap();
assert!(manager.get_user("Walter").await.is_ok());
}
}

149
src/app/config/mounts.rs Normal file
View file

@ -0,0 +1,149 @@
use std::{
ops::Deref,
path::{Path, PathBuf},
};
use regex::Regex;
use crate::app::Error;
use super::storage;
use super::Config;
#[derive(Clone, Debug, Default, Eq, PartialEq)]
pub struct MountDir {
pub source: PathBuf,
pub name: String,
}
impl TryFrom<storage::MountDir> for MountDir {
type Error = Error;
fn try_from(mount_dir: storage::MountDir) -> Result<Self, Self::Error> {
// TODO validation
Ok(Self {
source: sanitize_path(&mount_dir.source),
name: mount_dir.name,
})
}
}
impl From<MountDir> for storage::MountDir {
fn from(m: MountDir) -> Self {
Self {
source: m.source,
name: m.name,
}
}
}
impl Config {
pub fn set_mounts(&mut self, mount_dirs: Vec<storage::MountDir>) -> Result<(), Error> {
let mut new_mount_dirs = Vec::new();
for mount_dir in mount_dirs {
let mount_dir = <storage::MountDir as TryInto<MountDir>>::try_into(mount_dir)?;
new_mount_dirs.push(mount_dir);
}
new_mount_dirs.dedup_by(|a, b| a.name == b.name);
self.mount_dirs = new_mount_dirs;
Ok(())
}
pub fn resolve_virtual_path<P: AsRef<Path>>(&self, virtual_path: P) -> Result<PathBuf, Error> {
for mount in &self.mount_dirs {
if let Ok(p) = virtual_path.as_ref().strip_prefix(&mount.name) {
return if p.components().count() == 0 {
Ok(mount.source.clone())
} else {
Ok(mount.source.join(p))
};
}
}
Err(Error::CouldNotMapToRealPath(virtual_path.as_ref().into()))
}
}
fn sanitize_path(source: &PathBuf) -> PathBuf {
let path_string = source.to_string_lossy();
let separator_regex = Regex::new(r"\\|/").unwrap();
let mut correct_separator = String::new();
correct_separator.push(std::path::MAIN_SEPARATOR);
let path_string = separator_regex.replace_all(&path_string, correct_separator.as_str());
PathBuf::from(path_string.deref())
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn can_resolve_virtual_paths() {
let raw_config = storage::Config {
mount_dirs: vec![storage::MountDir {
name: "root".to_owned(),
source: PathBuf::from("test_dir"),
}],
..Default::default()
};
let config: Config = raw_config.try_into().unwrap();
let test_cases = vec![
(vec!["root"], vec!["test_dir"]),
(
vec!["root", "somewhere", "something.png"],
vec!["test_dir", "somewhere", "something.png"],
),
];
for (r#virtual, real) in test_cases {
let real_path: PathBuf = real.iter().collect();
let virtual_path: PathBuf = r#virtual.iter().collect();
let converted_path = config.resolve_virtual_path(&virtual_path).unwrap();
assert_eq!(converted_path, real_path);
}
}
#[test]
fn sanitizes_paths() {
let mut correct_path = PathBuf::new();
if cfg!(target_os = "windows") {
correct_path.push("C:\\");
} else {
correct_path.push("/usr");
}
correct_path.push("some");
correct_path.push("path");
let tests = if cfg!(target_os = "windows") {
vec![
r#"C:/some/path"#,
r#"C:\some\path"#,
r#"C:\some\path\"#,
r#"C:\some\path\\\\"#,
r#"C:\some/path//"#,
]
} else {
vec![
r#"/usr/some/path"#,
r#"/usr\some\path"#,
r#"/usr\some\path\"#,
r#"/usr\some\path\\\\"#,
r#"/usr\some/path//"#,
]
};
for test in tests {
let raw_config = storage::Config {
mount_dirs: vec![storage::MountDir {
name: "root".to_owned(),
source: PathBuf::from(test),
}],
..Default::default()
};
let config: Config = raw_config.try_into().unwrap();
let converted_path = config.resolve_virtual_path(&PathBuf::from("root")).unwrap();
assert_eq!(converted_path, correct_path);
}
}
}

32
src/app/config/storage.rs Normal file
View file

@ -0,0 +1,32 @@
use std::path::PathBuf;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, Default, Eq, PartialEq, Serialize, Deserialize)]
pub struct User {
pub name: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub admin: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub initial_password: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub hashed_password: Option<String>,
}
#[derive(Clone, Debug, Default, Eq, PartialEq, Serialize, Deserialize)]
pub struct MountDir {
pub source: PathBuf,
pub name: String,
}
#[derive(Clone, Debug, Default, Eq, PartialEq, Serialize, Deserialize)]
pub struct Config {
#[serde(skip_serializing_if = "Option::is_none")]
pub album_art_pattern: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub mount_dirs: Vec<MountDir>,
#[serde(skip_serializing_if = "Option::is_none")]
pub ddns_update_url: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub users: Vec<User>,
}

308
src/app/config/user.rs Normal file
View file

@ -0,0 +1,308 @@
use crate::app::{auth, Error};
use super::storage;
use super::Config;
#[derive(Clone, Debug, Default, Eq, PartialEq)]
pub struct User {
pub name: String,
pub admin: Option<bool>,
pub initial_password: Option<String>,
pub hashed_password: String,
}
impl User {
pub fn is_admin(&self) -> bool {
self.admin == Some(true)
}
}
impl TryFrom<storage::User> for User {
type Error = Error;
fn try_from(user: storage::User) -> Result<Self, Self::Error> {
let hashed_password = match (&user.initial_password, &user.hashed_password) {
(_, Some(p)) => p.clone(),
(Some(p), None) => auth::hash_password(p)?,
(None, None) => return Err(Error::EmptyPassword),
};
Ok(Self {
name: user.name,
admin: user.admin,
initial_password: user.initial_password,
hashed_password,
})
}
}
impl From<User> for storage::User {
fn from(user: User) -> Self {
Self {
name: user.name,
admin: user.admin,
initial_password: user.initial_password,
hashed_password: Some(user.hashed_password),
}
}
}
impl Config {
pub fn set_users(&mut self, users: Vec<storage::User>) -> Result<(), Error> {
let mut new_users = Vec::new();
for user in users {
let user = <storage::User as TryInto<User>>::try_into(user)?;
new_users.push(user);
}
new_users.dedup_by(|a, b| a.name == b.name);
self.users = new_users;
Ok(())
}
pub fn create_user(
&mut self,
username: &str,
password: &str,
admin: bool,
) -> Result<(), Error> {
if username.is_empty() {
return Err(Error::EmptyUsername);
}
if self.exists(username) {
return Err(Error::DuplicateUsername);
}
let password_hash = auth::hash_password(&password)?;
self.users.push(User {
name: username.to_owned(),
admin: Some(admin),
initial_password: None,
hashed_password: password_hash,
});
Ok(())
}
pub fn exists(&self, username: &str) -> bool {
self.users.iter().any(|u| u.name == username)
}
pub fn get_user(&self, username: &str) -> Option<&User> {
self.users.iter().find(|u| u.name == username)
}
pub fn get_user_mut(&mut self, username: &str) -> Option<&mut User> {
self.users.iter_mut().find(|u| u.name == username)
}
pub fn authenticate(
&self,
auth_token: &auth::Token,
scope: auth::Scope,
auth_secret: &auth::Secret,
) -> Result<auth::Authorization, Error> {
let authorization = auth::decode_auth_token(auth_token, scope, auth_secret)?;
if self.exists(&authorization.username) {
Ok(authorization)
} else {
Err(Error::IncorrectUsername)
}
}
pub fn login(
&self,
username: &str,
password: &str,
auth_secret: &auth::Secret,
) -> Result<auth::Token, Error> {
let user = self.get_user(username).ok_or(Error::IncorrectUsername)?;
if auth::verify_password(&user.hashed_password, password) {
let authorization = auth::Authorization {
username: username.to_owned(),
scope: auth::Scope::PolarisAuth,
};
auth::generate_auth_token(&authorization, auth_secret)
} else {
Err(Error::IncorrectPassword)
}
}
pub fn set_is_admin(&mut self, username: &str, is_admin: bool) -> Result<(), Error> {
let user = self.get_user_mut(username).ok_or(Error::UserNotFound)?;
user.admin = Some(is_admin);
Ok(())
}
pub fn set_password(&mut self, username: &str, password: &str) -> Result<(), Error> {
let user = self.get_user_mut(username).ok_or(Error::UserNotFound)?;
user.hashed_password = auth::hash_password(password)?;
Ok(())
}
pub fn delete_user(&mut self, username: &str) {
self.users.retain(|u| u.name != username);
}
}
#[cfg(test)]
mod test {
use crate::app::test;
use crate::test_name;
use super::*;
const TEST_USERNAME: &str = "Walter";
const TEST_PASSWORD: &str = "super_secret!";
#[test]
fn adds_password_hashes() {
let user_in = storage::User {
name: TEST_USERNAME.to_owned(),
initial_password: Some(TEST_PASSWORD.to_owned()),
..Default::default()
};
let user: User = user_in.try_into().unwrap();
let user_out: storage::User = user.into();
assert_eq!(user_out.name, TEST_USERNAME);
assert_eq!(user_out.initial_password, Some(TEST_PASSWORD.to_owned()));
assert!(user_out.hashed_password.is_some());
}
#[test]
fn preserves_password_hashes() {
let user_in = storage::User {
name: TEST_USERNAME.to_owned(),
hashed_password: Some("hash".to_owned()),
..Default::default()
};
let user: User = user_in.clone().try_into().unwrap();
let user_out: storage::User = user.into();
assert_eq!(user_out, user_in);
}
#[tokio::test]
async fn create_delete_user_golden_path() {
let ctx = test::ContextBuilder::new(test_name!()).build().await;
ctx.config_manager
.create_user(TEST_USERNAME, TEST_PASSWORD, false)
.await
.unwrap();
assert!(ctx.config_manager.get_user(TEST_USERNAME).await.is_ok());
ctx.config_manager.delete_user(TEST_USERNAME).await.unwrap();
assert!(ctx.config_manager.get_user(TEST_USERNAME).await.is_err());
}
#[tokio::test]
async fn cannot_create_user_with_blank_username() {
let ctx = test::ContextBuilder::new(test_name!()).build().await;
let result = ctx.config_manager.create_user("", TEST_PASSWORD, false);
assert!(matches!(result.await.unwrap_err(), Error::EmptyUsername));
}
#[tokio::test]
async fn cannot_create_user_with_blank_password() {
let ctx = test::ContextBuilder::new(test_name!()).build().await;
let result = ctx.config_manager.create_user(TEST_USERNAME, "", false);
assert!(matches!(result.await.unwrap_err(), Error::EmptyPassword));
}
#[tokio::test]
async fn cannot_create_duplicate_user() {
let ctx = test::ContextBuilder::new(test_name!()).build().await;
let result = ctx
.config_manager
.create_user(TEST_USERNAME, TEST_PASSWORD, false);
assert!(result.await.is_ok());
let result = ctx
.config_manager
.create_user(TEST_USERNAME, TEST_PASSWORD, false);
assert!(matches!(
result.await.unwrap_err(),
Error::DuplicateUsername
));
}
#[tokio::test]
async fn login_rejects_bad_password() {
let ctx = test::ContextBuilder::new(test_name!()).build().await;
ctx.config_manager
.create_user(TEST_USERNAME, TEST_PASSWORD, false)
.await
.unwrap();
let result = ctx.config_manager.login(TEST_USERNAME, "not the password");
assert!(matches!(
result.await.unwrap_err(),
Error::IncorrectPassword
));
}
#[tokio::test]
async fn login_golden_path() {
let ctx = test::ContextBuilder::new(test_name!()).build().await;
ctx.config_manager
.create_user(TEST_USERNAME, TEST_PASSWORD, false)
.await
.unwrap();
let result = ctx.config_manager.login(TEST_USERNAME, TEST_PASSWORD);
assert!(result.await.is_ok());
}
#[tokio::test]
async fn authenticate_rejects_bad_token() {
let ctx = test::ContextBuilder::new(test_name!()).build().await;
ctx.config_manager
.create_user(TEST_USERNAME, TEST_PASSWORD, false)
.await
.unwrap();
let fake_token = auth::Token("fake token".to_owned());
assert!(ctx
.config_manager
.authenticate(&fake_token, auth::Scope::PolarisAuth)
.await
.is_err())
}
#[tokio::test]
async fn authenticate_golden_path() {
let ctx = test::ContextBuilder::new(test_name!()).build().await;
ctx.config_manager
.create_user(TEST_USERNAME, TEST_PASSWORD, false)
.await
.unwrap();
let token = ctx
.config_manager
.login(TEST_USERNAME, TEST_PASSWORD)
.await
.unwrap();
let authorization = ctx
.config_manager
.authenticate(&token, auth::Scope::PolarisAuth)
.await
.unwrap();
assert_eq!(
authorization,
auth::Authorization {
username: TEST_USERNAME.to_owned(),
scope: auth::Scope::PolarisAuth,
}
)
}
}

45
src/app/ddns.rs Normal file
View file

@ -0,0 +1,45 @@
use log::{debug, error};
use std::time::Duration;
use crate::app::{config, Error};
#[derive(Clone)]
pub struct Manager {
config_manager: config::Manager,
}
impl Manager {
pub fn new(config_manager: config::Manager) -> Self {
Self { config_manager }
}
pub async fn update_ddns(&self) -> Result<(), Error> {
let url = self.config_manager.get_ddns_update_url().await;
let Some(url) = url else {
debug!("Skipping DDNS update because credentials are missing");
return Ok(());
};
let response = ureq::get(&url.to_string()).call();
match response {
Ok(_) => Ok(()),
Err(ureq::Error::Status(code, _)) => Err(Error::UpdateQueryFailed(code)),
Err(ureq::Error::Transport(_)) => Err(Error::UpdateQueryTransport),
}
}
pub fn begin_periodic_updates(&self) {
tokio::spawn({
let ddns = self.clone();
async move {
loop {
if let Err(e) = ddns.update_ddns().await {
error!("Dynamic DNS update error: {:?}", e);
}
tokio::time::sleep(Duration::from_secs(60 * 30)).await;
}
}
});
}
}

444
src/app/formats.rs Normal file
View file

@ -0,0 +1,444 @@
use id3::TagLike;
use lewton::inside_ogg::OggStreamReader;
use log::error;
use std::fs;
use std::io::{Seek, SeekFrom};
use std::path::Path;
use crate::app::Error;
use crate::utils;
use crate::utils::AudioFormat;
#[derive(Debug, Default, Clone, PartialEq, Eq)]
pub struct SongMetadata {
pub disc_number: Option<u32>,
pub track_number: Option<u32>,
pub title: Option<String>,
pub duration: Option<u32>,
pub artists: Vec<String>,
pub album_artists: Vec<String>,
pub album: Option<String>,
pub year: Option<i32>,
pub has_artwork: bool,
pub lyricists: Vec<String>,
pub composers: Vec<String>,
pub genres: Vec<String>,
pub labels: Vec<String>,
}
pub fn read_metadata<P: AsRef<Path>>(path: P) -> Option<SongMetadata> {
let data = match utils::get_audio_format(&path) {
Some(AudioFormat::AIFF) => read_id3(&path),
Some(AudioFormat::FLAC) => read_flac(&path),
Some(AudioFormat::MP3) => read_mp3(&path),
Some(AudioFormat::OGG) => read_vorbis(&path),
Some(AudioFormat::OPUS) => read_opus(&path),
Some(AudioFormat::WAVE) => read_id3(&path),
Some(AudioFormat::APE) | Some(AudioFormat::MPC) => read_ape(&path),
Some(AudioFormat::MP4) | Some(AudioFormat::M4B) => read_mp4(&path),
None => return None,
};
match data {
Ok(d) => Some(d),
Err(e) => {
error!(
"Error while reading file metadata for '{:?}': {}",
path.as_ref(),
e
);
None
}
}
}
trait ID3Ext {
fn get_text_values(&self, frame_name: &str) -> Vec<String>;
}
impl ID3Ext for id3::Tag {
fn get_text_values(&self, frame_name: &str) -> Vec<String> {
self.get(frame_name)
.and_then(|f| f.content().text_values())
.map(|i| i.map(str::to_string).collect())
.unwrap_or_default()
}
}
fn read_id3<P: AsRef<Path>>(path: P) -> Result<SongMetadata, Error> {
let file = fs::File::open(path.as_ref()).map_err(|e| Error::Io(path.as_ref().to_owned(), e))?;
read_id3_from_file(&file, path)
}
fn read_id3_from_file<P: AsRef<Path>>(file: &fs::File, path: P) -> Result<SongMetadata, Error> {
let tag = id3::Tag::read_from2(file)
.or_else(|error| {
if let Some(tag) = error.partial_tag {
Ok(tag)
} else {
Err(error)
}
})
.map_err(|e| Error::Id3(path.as_ref().to_owned(), e))?;
let artists = tag.get_text_values("TPE1");
let album_artists = tag.get_text_values("TPE2");
let album = tag.album().map(|s| s.to_string());
let title = tag.title().map(|s| s.to_string());
let duration = tag.duration();
let disc_number = tag.disc();
let track_number = tag.track();
let year = tag
.year()
.or_else(|| tag.date_released().map(|d| d.year))
.or_else(|| tag.original_date_released().map(|d| d.year))
.or_else(|| tag.date_recorded().map(|d| d.year));
let has_artwork = tag.pictures().count() > 0;
let lyricists = tag.get_text_values("TEXT");
let composers = tag.get_text_values("TCOM");
let genres = tag.get_text_values("TCON");
let labels = tag.get_text_values("TPUB");
Ok(SongMetadata {
disc_number,
track_number,
title,
duration,
artists,
album_artists,
album,
year,
has_artwork,
lyricists,
composers,
genres,
labels,
})
}
fn read_mp3<P: AsRef<Path>>(path: P) -> Result<SongMetadata, Error> {
let mut file = fs::File::open(&path).unwrap();
let mut metadata = read_id3_from_file(&file, &path)?;
metadata.duration = metadata.duration.or_else(|| {
file.seek(SeekFrom::Start(0)).unwrap();
mp3_duration::from_file(&file)
.map(|d| d.as_secs() as u32)
.ok()
});
Ok(metadata)
}
mod ape_ext {
use regex::Regex;
use std::sync::LazyLock;
pub fn read_string(item: &ape::Item) -> Option<String> {
item.try_into().ok().map(str::to_string)
}
pub fn read_strings(item: Option<&ape::Item>) -> Vec<String> {
let Some(item) = item else {
return vec![];
};
let strings: Vec<&str> = item.try_into().unwrap_or_default();
strings.into_iter().map(str::to_string).collect()
}
pub fn read_i32(item: &ape::Item) -> Option<i32> {
item.try_into()
.ok()
.map(|s: &str| s.parse::<i32>().ok())
.flatten()
}
static X_OF_Y_REGEX: LazyLock<Regex> = LazyLock::new(|| Regex::new(r#"^\d+"#).unwrap());
pub fn read_x_of_y(item: &ape::Item) -> Option<u32> {
item.try_into()
.ok()
.map(|s: &str| {
if let Some(m) = X_OF_Y_REGEX.find(s) {
s[m.start()..m.end()].parse().ok()
} else {
None
}
})
.flatten()
}
}
fn read_ape<P: AsRef<Path>>(path: P) -> Result<SongMetadata, Error> {
let tag = ape::read_from_path(path)?;
let artists = ape_ext::read_strings(tag.item("Artist"));
let album = tag.item("Album").and_then(ape_ext::read_string);
let album_artists = ape_ext::read_strings(tag.item("Album artist"));
let title = tag.item("Title").and_then(ape_ext::read_string);
let year = tag.item("Year").and_then(ape_ext::read_i32);
let disc_number = tag.item("Disc").and_then(ape_ext::read_x_of_y);
let track_number = tag.item("Track").and_then(ape_ext::read_x_of_y);
let lyricists = ape_ext::read_strings(tag.item("LYRICIST"));
let composers = ape_ext::read_strings(tag.item("COMPOSER"));
let genres = ape_ext::read_strings(tag.item("GENRE"));
let labels = ape_ext::read_strings(tag.item("PUBLISHER"));
Ok(SongMetadata {
artists,
album_artists,
album,
title,
duration: None,
disc_number,
track_number,
year,
has_artwork: false,
lyricists,
composers,
genres,
labels,
})
}
fn read_vorbis<P: AsRef<Path>>(path: P) -> Result<SongMetadata, Error> {
let file = fs::File::open(&path).map_err(|e| Error::Io(path.as_ref().to_owned(), e))?;
let source = OggStreamReader::new(file)?;
let mut metadata = SongMetadata::default();
for (key, value) in source.comment_hdr.comment_list {
utils::match_ignore_case! {
match key {
"TITLE" => metadata.title = Some(value),
"ALBUM" => metadata.album = Some(value),
"ARTIST" => metadata.artists.push(value),
"ALBUMARTIST" => metadata.album_artists.push(value),
"TRACKNUMBER" => metadata.track_number = value.parse::<u32>().ok(),
"DISCNUMBER" => metadata.disc_number = value.parse::<u32>().ok(),
"DATE" => metadata.year = value.parse::<i32>().ok(),
"LYRICIST" => metadata.lyricists.push(value),
"COMPOSER" => metadata.composers.push(value),
"GENRE" => metadata.genres.push(value),
"PUBLISHER" => metadata.labels.push(value),
_ => (),
}
}
}
Ok(metadata)
}
fn read_opus<P: AsRef<Path>>(path: P) -> Result<SongMetadata, Error> {
let headers = opus_headers::parse_from_path(path)?;
let mut metadata = SongMetadata::default();
for (key, value) in headers.comments.user_comments {
utils::match_ignore_case! {
match key {
"TITLE" => metadata.title = Some(value),
"ALBUM" => metadata.album = Some(value),
"ARTIST" => metadata.artists.push(value),
"ALBUMARTIST" => metadata.album_artists.push(value),
"TRACKNUMBER" => metadata.track_number = value.parse::<u32>().ok(),
"DISCNUMBER" => metadata.disc_number = value.parse::<u32>().ok(),
"DATE" => metadata.year = value.parse::<i32>().ok(),
"LYRICIST" => metadata.lyricists.push(value),
"COMPOSER" => metadata.composers.push(value),
"GENRE" => metadata.genres.push(value),
"PUBLISHER" => metadata.labels.push(value),
_ => (),
}
}
}
Ok(metadata)
}
fn read_flac<P: AsRef<Path>>(path: P) -> Result<SongMetadata, Error> {
let tag = metaflac::Tag::read_from_path(&path)
.map_err(|e| Error::Metaflac(path.as_ref().to_owned(), e))?;
let vorbis = tag
.vorbis_comments()
.ok_or(Error::VorbisCommentNotFoundInFlacFile)?;
let disc_number = vorbis
.get("DISCNUMBER")
.and_then(|d| d[0].parse::<u32>().ok());
let year = vorbis.get("DATE").and_then(|d| d[0].parse::<i32>().ok());
let mut streaminfo = tag.get_blocks(metaflac::BlockType::StreamInfo);
let duration = match streaminfo.next() {
Some(metaflac::Block::StreamInfo(s)) => Some(s.total_samples as u32 / s.sample_rate),
_ => None,
};
let has_artwork = tag.pictures().count() > 0;
let multivalue = |o: Option<&Vec<String>>| o.cloned().unwrap_or_default();
Ok(SongMetadata {
artists: multivalue(vorbis.artist()),
album_artists: multivalue(vorbis.album_artist()),
album: vorbis.album().map(|v| v[0].clone()),
title: vorbis.title().map(|v| v[0].clone()),
duration,
disc_number,
track_number: vorbis.track(),
year,
has_artwork,
lyricists: multivalue(vorbis.get("LYRICIST")),
composers: multivalue(vorbis.get("COMPOSER")),
genres: multivalue(vorbis.get("GENRE")),
labels: multivalue(vorbis.get("PUBLISHER")),
})
}
fn read_mp4<P: AsRef<Path>>(path: P) -> Result<SongMetadata, Error> {
let mut tag = mp4ameta::Tag::read_from_path(&path)
.map_err(|e| Error::Mp4aMeta(path.as_ref().to_owned(), e))?;
let label_ident = mp4ameta::FreeformIdent::new("com.apple.iTunes", "Label");
Ok(SongMetadata {
artists: tag.take_artists().collect(),
album_artists: tag.take_album_artists().collect(),
album: tag.take_album(),
title: tag.take_title(),
duration: tag.duration().map(|v| v.as_secs() as u32),
disc_number: tag.disc_number().map(|d| d as u32),
track_number: tag.track_number().map(|d| d as u32),
year: tag.year().and_then(|v| v.parse::<i32>().ok()),
has_artwork: tag.artwork().is_some(),
lyricists: tag.take_lyricists().collect(),
composers: tag.take_composers().collect(),
genres: tag.take_genres().collect(),
labels: tag.take_strings_of(&label_ident).collect(),
})
}
#[test]
fn reads_file_metadata() {
let expected_without_duration = SongMetadata {
disc_number: Some(3),
track_number: Some(1),
title: Some("TEST TITLE".into()),
artists: vec!["TEST ARTIST".into()],
album_artists: vec!["TEST ALBUM ARTIST".into()],
album: Some("TEST ALBUM".into()),
duration: None,
year: Some(2016),
has_artwork: false,
lyricists: vec!["TEST LYRICIST".into()],
composers: vec!["TEST COMPOSER".into()],
genres: vec!["TEST GENRE".into()],
labels: vec!["TEST LABEL".into()],
};
let expected_with_duration = SongMetadata {
duration: Some(0),
..expected_without_duration.clone()
};
assert_eq!(
read_metadata(Path::new("test-data/formats/sample.aif")).unwrap(),
expected_without_duration
);
assert_eq!(
read_metadata(Path::new("test-data/formats/sample.mp3")).unwrap(),
expected_with_duration
);
assert_eq!(
read_metadata(Path::new("test-data/formats/sample.ogg")).unwrap(),
expected_without_duration
);
assert_eq!(
read_metadata(Path::new("test-data/formats/sample.flac")).unwrap(),
expected_with_duration
);
assert_eq!(
read_metadata(Path::new("test-data/formats/sample.m4a")).unwrap(),
expected_with_duration
);
assert_eq!(
read_metadata(Path::new("test-data/formats/sample.opus")).unwrap(),
expected_without_duration
);
assert_eq!(
read_metadata(Path::new("test-data/formats/sample.ape")).unwrap(),
expected_without_duration
);
assert_eq!(
read_metadata(Path::new("test-data/formats/sample.wav")).unwrap(),
expected_without_duration
);
}
#[test]
fn reads_embedded_artwork() {
assert!(
read_metadata(Path::new("test-data/artwork/sample.aif"))
.unwrap()
.has_artwork
);
assert!(
read_metadata(Path::new("test-data/artwork/sample.mp3"))
.unwrap()
.has_artwork
);
assert!(
read_metadata(Path::new("test-data/artwork/sample.flac"))
.unwrap()
.has_artwork
);
assert!(
read_metadata(Path::new("test-data/artwork/sample.m4a"))
.unwrap()
.has_artwork
);
assert!(
read_metadata(Path::new("test-data/artwork/sample.wav"))
.unwrap()
.has_artwork
);
}
#[test]
fn reads_multivalue_fields() {
let expected_without_duration = SongMetadata {
disc_number: Some(3),
track_number: Some(1),
title: Some("TEST TITLE".into()),
artists: vec!["TEST ARTIST".into(), "OTHER ARTIST".into()],
album_artists: vec!["TEST ALBUM ARTIST".into(), "OTHER ALBUM ARTIST".into()],
album: Some("TEST ALBUM".into()),
duration: None,
year: Some(2016),
has_artwork: false,
lyricists: vec!["TEST LYRICIST".into(), "OTHER LYRICIST".into()],
composers: vec!["TEST COMPOSER".into(), "OTHER COMPOSER".into()],
genres: vec!["TEST GENRE".into(), "OTHER GENRE".into()],
labels: vec!["TEST LABEL".into(), "OTHER LABEL".into()],
};
let expected_with_duration = SongMetadata {
duration: Some(0),
..expected_without_duration.clone()
};
assert_eq!(
read_metadata(Path::new("test-data/multivalue/multivalue.aif")).unwrap(),
expected_without_duration
);
assert_eq!(
read_metadata(Path::new("test-data/multivalue/multivalue.mp3")).unwrap(),
expected_with_duration
);
assert_eq!(
read_metadata(Path::new("test-data/multivalue/multivalue.ogg")).unwrap(),
expected_without_duration
);
assert_eq!(
read_metadata(Path::new("test-data/multivalue/multivalue.flac")).unwrap(),
expected_with_duration
);
// TODO Test m4a support (likely working). Pending https://tickets.metabrainz.org/browse/PICARD-3029
assert_eq!(
read_metadata(Path::new("test-data/multivalue/multivalue.opus")).unwrap(),
expected_without_duration
);
assert_eq!(
read_metadata(Path::new("test-data/multivalue/multivalue.ape")).unwrap(),
expected_without_duration
);
assert_eq!(
read_metadata(Path::new("test-data/multivalue/multivalue.wav")).unwrap(),
expected_without_duration
);
}

388
src/app/index.rs Normal file
View file

@ -0,0 +1,388 @@
use std::{
path::{Path, PathBuf},
sync::{Arc, RwLock},
};
use log::{error, info};
use serde::{Deserialize, Serialize};
use tokio::task::spawn_blocking;
use crate::app::{scanner, Error};
mod browser;
mod collection;
mod dictionary;
mod query;
mod search;
mod storage;
pub use browser::File;
pub use collection::{Album, AlbumHeader, Artist, ArtistHeader, Genre, GenreHeader, Song};
use storage::{store_song, AlbumKey, ArtistKey, GenreKey, InternPath, SongKey};
#[derive(Clone)]
pub struct Manager {
index_file_path: PathBuf,
index: Arc<RwLock<Index>>, // Not a tokio RwLock as we want to do CPU-bound work with Index and lock this inside spawn_blocking()
}
impl Manager {
pub async fn new(directory: &Path) -> Result<Self, Error> {
tokio::fs::create_dir_all(directory)
.await
.map_err(|e| Error::Io(directory.to_owned(), e))?;
let index_manager = Self {
index_file_path: directory.join("collection.index"),
index: Arc::default(),
};
match index_manager.try_restore_index().await {
Ok(true) => info!("Restored collection index from disk"),
Ok(false) => info!("No existing collection index to restore"),
Err(e) => error!("Failed to restore collection index: {}", e),
};
Ok(index_manager)
}
pub async fn is_index_empty(&self) -> bool {
spawn_blocking({
let index_manager = self.clone();
move || {
let index = index_manager.index.read().unwrap();
index.collection.num_songs() == 0
}
})
.await
.unwrap()
}
pub async fn replace_index(&self, new_index: Index) {
spawn_blocking({
let index_manager = self.clone();
move || {
let mut lock = index_manager.index.write().unwrap();
*lock = new_index;
}
})
.await
.unwrap()
}
pub async fn persist_index(&self, index: &Index) -> Result<(), Error> {
let serialized = match bitcode::serialize(index) {
Ok(s) => s,
Err(_) => return Err(Error::IndexSerializationError),
};
tokio::fs::write(&self.index_file_path, &serialized[..])
.await
.map_err(|e| Error::Io(self.index_file_path.clone(), e))?;
Ok(())
}
async fn try_restore_index(&self) -> Result<bool, Error> {
match tokio::fs::try_exists(&self.index_file_path).await {
Ok(true) => (),
Ok(false) => return Ok(false),
Err(e) => return Err(Error::Io(self.index_file_path.clone(), e)),
};
let serialized = tokio::fs::read(&self.index_file_path)
.await
.map_err(|e| Error::Io(self.index_file_path.clone(), e))?;
let index = match bitcode::deserialize(&serialized[..]) {
Ok(i) => i,
Err(_) => return Err(Error::IndexDeserializationError),
};
self.replace_index(index).await;
Ok(true)
}
pub async fn browse(&self, virtual_path: PathBuf) -> Result<Vec<browser::File>, Error> {
spawn_blocking({
let index_manager = self.clone();
move || {
let index = index_manager.index.read().unwrap();
index.browser.browse(&index.dictionary, virtual_path)
}
})
.await
.unwrap()
}
pub async fn flatten(&self, virtual_path: PathBuf) -> Result<Vec<PathBuf>, Error> {
spawn_blocking({
let index_manager = self.clone();
move || {
let index = index_manager.index.read().unwrap();
index.browser.flatten(&index.dictionary, virtual_path)
}
})
.await
.unwrap()
}
pub async fn get_genres(&self) -> Vec<GenreHeader> {
spawn_blocking({
let index_manager = self.clone();
move || {
let index = index_manager.index.read().unwrap();
index.collection.get_genres(&index.dictionary)
}
})
.await
.unwrap()
}
pub async fn get_genre(&self, name: String) -> Result<Genre, Error> {
spawn_blocking({
let index_manager = self.clone();
move || {
let index = index_manager.index.read().unwrap();
let name = index
.dictionary
.get(&name)
.ok_or_else(|| Error::GenreNotFound)?;
let genre_key = GenreKey(name);
index
.collection
.get_genre(&index.dictionary, genre_key)
.ok_or_else(|| Error::GenreNotFound)
}
})
.await
.unwrap()
}
pub async fn get_albums(&self) -> Vec<AlbumHeader> {
spawn_blocking({
let index_manager = self.clone();
move || {
let index = index_manager.index.read().unwrap();
index.collection.get_albums(&index.dictionary)
}
})
.await
.unwrap()
}
pub async fn get_artists(&self) -> Vec<ArtistHeader> {
spawn_blocking({
let index_manager = self.clone();
move || {
let index = index_manager.index.read().unwrap();
index.collection.get_artists(&index.dictionary)
}
})
.await
.unwrap()
}
pub async fn get_artist(&self, name: String) -> Result<Artist, Error> {
spawn_blocking({
let index_manager = self.clone();
move || {
let index = index_manager.index.read().unwrap();
let name = index
.dictionary
.get(name)
.ok_or_else(|| Error::ArtistNotFound)?;
let artist_key = ArtistKey(name);
index
.collection
.get_artist(&index.dictionary, artist_key)
.ok_or_else(|| Error::ArtistNotFound)
}
})
.await
.unwrap()
}
pub async fn get_album(&self, artists: Vec<String>, name: String) -> Result<Album, Error> {
spawn_blocking({
let index_manager = self.clone();
move || {
let index = index_manager.index.read().unwrap();
let name = index
.dictionary
.get(&name)
.ok_or_else(|| Error::AlbumNotFound)?;
let album_key = AlbumKey {
artists: artists
.into_iter()
.filter_map(|a| index.dictionary.get(a))
.map(|k| ArtistKey(k))
.collect(),
name,
};
index
.collection
.get_album(&index.dictionary, album_key)
.ok_or_else(|| Error::AlbumNotFound)
}
})
.await
.unwrap()
}
pub async fn get_random_albums(
&self,
seed: Option<u64>,
offset: usize,
count: usize,
) -> Result<Vec<Album>, Error> {
spawn_blocking({
let index_manager = self.clone();
move || {
let index = index_manager.index.read().unwrap();
Ok(index
.collection
.get_random_albums(&index.dictionary, seed, offset, count))
}
})
.await
.unwrap()
}
pub async fn get_recent_albums(
&self,
offset: usize,
count: usize,
) -> Result<Vec<Album>, Error> {
spawn_blocking({
let index_manager = self.clone();
move || {
let index = index_manager.index.read().unwrap();
Ok(index
.collection
.get_recent_albums(&index.dictionary, offset, count))
}
})
.await
.unwrap()
}
pub async fn get_songs(&self, virtual_paths: Vec<PathBuf>) -> Vec<Result<Song, Error>> {
spawn_blocking({
let index_manager = self.clone();
move || {
let index = index_manager.index.read().unwrap();
virtual_paths
.into_iter()
.map(|p| {
p.get(&index.dictionary)
.and_then(|virtual_path| {
let key = SongKey { virtual_path };
index.collection.get_song(&index.dictionary, key)
})
.ok_or_else(|| Error::SongNotFound)
})
.collect()
}
})
.await
.unwrap()
}
pub async fn search(&self, query: String) -> Result<Vec<Song>, Error> {
spawn_blocking({
let index_manager = self.clone();
move || {
let index = index_manager.index.read().unwrap();
index
.search
.find_songs(&index.collection, &index.dictionary, &query)
}
})
.await
.unwrap()
}
}
#[derive(Serialize, Deserialize)]
pub struct Index {
pub dictionary: dictionary::Dictionary,
pub browser: browser::Browser,
pub collection: collection::Collection,
pub search: search::Search,
}
impl Default for Index {
fn default() -> Self {
Self {
dictionary: Default::default(),
browser: Default::default(),
collection: Default::default(),
search: Default::default(),
}
}
}
#[derive(Clone)]
pub struct Builder {
dictionary_builder: dictionary::Builder,
browser_builder: browser::Builder,
collection_builder: collection::Builder,
search_builder: search::Builder,
}
impl Builder {
pub fn new() -> Self {
Self {
dictionary_builder: dictionary::Builder::default(),
browser_builder: browser::Builder::default(),
collection_builder: collection::Builder::default(),
search_builder: search::Builder::default(),
}
}
pub fn add_directory(&mut self, directory: scanner::Directory) {
self.browser_builder
.add_directory(&mut self.dictionary_builder, directory);
}
pub fn add_song(&mut self, scanner_song: scanner::Song) {
if let Some(storage_song) = store_song(&mut self.dictionary_builder, &scanner_song) {
self.browser_builder
.add_song(&mut self.dictionary_builder, &scanner_song);
self.collection_builder.add_song(&storage_song);
self.search_builder.add_song(&scanner_song, &storage_song);
}
}
pub fn build(self) -> Index {
Index {
dictionary: self.dictionary_builder.build(),
browser: self.browser_builder.build(),
collection: self.collection_builder.build(),
search: self.search_builder.build(),
}
}
}
impl Default for Builder {
fn default() -> Self {
Self::new()
}
}
#[cfg(test)]
mod test {
use crate::{
app::{index, test},
test_name,
};
#[tokio::test]
async fn can_persist_index() {
let ctx = test::ContextBuilder::new(test_name!()).build().await;
assert_eq!(ctx.index_manager.try_restore_index().await.unwrap(), false);
let index = index::Builder::new().build();
ctx.index_manager.persist_index(&index).await.unwrap();
assert_eq!(ctx.index_manager.try_restore_index().await.unwrap(), true);
}
}

389
src/app/index/browser.rs Normal file
View file

@ -0,0 +1,389 @@
use std::{
cmp::Ordering,
collections::{BTreeSet, HashMap},
ffi::OsStr,
hash::Hash,
path::{Path, PathBuf},
};
use rayon::prelude::*;
use serde::{Deserialize, Serialize};
use tinyvec::TinyVec;
use trie_rs::{Trie, TrieBuilder};
use crate::app::index::{
dictionary::{self, Dictionary},
storage::{self, PathKey},
InternPath,
};
use crate::app::{scanner, Error};
#[derive(Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
pub enum File {
Directory(PathBuf),
Song(PathBuf),
}
#[derive(Serialize, Deserialize)]
pub struct Browser {
directories: HashMap<PathKey, BTreeSet<storage::File>>,
flattened: Trie<lasso2::Spur>,
}
impl Default for Browser {
fn default() -> Self {
Self {
directories: HashMap::default(),
flattened: TrieBuilder::new().build(),
}
}
}
impl Browser {
pub fn browse<P: AsRef<Path>>(
&self,
dictionary: &Dictionary,
virtual_path: P,
) -> Result<Vec<File>, Error> {
let path = virtual_path
.as_ref()
.get(dictionary)
.ok_or_else(|| Error::DirectoryNotFound(virtual_path.as_ref().to_owned()))?;
let Some(files) = self.directories.get(&path) else {
return Err(Error::DirectoryNotFound(virtual_path.as_ref().to_owned()));
};
let mut files = files
.iter()
.map(|f| {
let path = match f {
storage::File::Directory(p) => p,
storage::File::Song(p) => p,
};
let path = Path::new(OsStr::new(dictionary.resolve(&path.0))).to_owned();
match f {
storage::File::Directory(_) => File::Directory(path),
storage::File::Song(_) => File::Song(path),
}
})
.collect::<Vec<_>>();
if virtual_path.as_ref().parent().is_none() {
if let [File::Directory(ref p)] = files[..] {
return self.browse(dictionary, p);
}
}
let collator = dictionary::make_collator();
files.sort_by(|a, b| {
let (a, b) = match (a, b) {
(File::Directory(_), File::Song(_)) => return Ordering::Less,
(File::Song(_), File::Directory(_)) => return Ordering::Greater,
(File::Directory(a), File::Directory(b)) => (a, b),
(File::Song(a), File::Song(b)) => (a, b),
};
collator.compare(
a.as_os_str().to_string_lossy().as_ref(),
b.as_os_str().to_string_lossy().as_ref(),
)
});
Ok(files)
}
pub fn flatten<P: AsRef<Path>>(
&self,
dictionary: &Dictionary,
virtual_path: P,
) -> Result<Vec<PathBuf>, Error> {
let path_components = virtual_path
.as_ref()
.components()
.map(|c| c.as_os_str().to_str().unwrap_or_default())
.filter_map(|c| dictionary.get(c))
.collect::<Vec<_>>();
if !self.flattened.is_prefix(&path_components) {
return Err(Error::DirectoryNotFound(virtual_path.as_ref().to_owned()));
}
let mut results: Vec<TinyVec<[_; 8]>> = self
.flattened
.predictive_search(path_components)
.collect::<Vec<_>>();
results.par_sort_unstable_by(|a, b| {
for (x, y) in a.iter().zip(b.iter()) {
match dictionary.cmp(x, y) {
Ordering::Equal => continue,
ordering @ _ => return ordering,
}
}
a.len().cmp(&b.len())
});
let files = results
.into_iter()
.map(|c: TinyVec<[_; 8]>| -> PathBuf {
c.into_iter()
.map(|s| dictionary.resolve(&s))
.collect::<TinyVec<[&str; 8]>>()
.join(std::path::MAIN_SEPARATOR_STR)
.into()
})
.collect::<Vec<_>>();
Ok(files)
}
}
#[derive(Clone, Default)]
pub struct Builder {
directories: HashMap<PathKey, BTreeSet<storage::File>>,
flattened: TrieBuilder<lasso2::Spur>,
}
impl Builder {
pub fn add_directory(
&mut self,
dictionary_builder: &mut dictionary::Builder,
directory: scanner::Directory,
) {
let Some(virtual_path) = (&directory.virtual_path).get_or_intern(dictionary_builder) else {
return;
};
let Some(virtual_parent) = directory
.virtual_path
.parent()
.and_then(|p| p.get_or_intern(dictionary_builder))
else {
return;
};
self.directories.entry(virtual_path).or_default();
self.directories
.entry(virtual_parent)
.or_default()
.insert(storage::File::Directory(virtual_path));
}
pub fn add_song(&mut self, dictionary_builder: &mut dictionary::Builder, song: &scanner::Song) {
let Some(virtual_path) = (&song.virtual_path).get_or_intern(dictionary_builder) else {
return;
};
let Some(virtual_parent) = song
.virtual_path
.parent()
.and_then(|p| p.get_or_intern(dictionary_builder))
else {
return;
};
self.directories
.entry(virtual_parent)
.or_default()
.insert(storage::File::Song(virtual_path));
self.flattened.push(
song.virtual_path
.components()
.map(|c| dictionary_builder.get_or_intern(c.as_os_str().to_str().unwrap()))
.collect::<TinyVec<[lasso2::Spur; 8]>>(),
);
}
pub fn build(self) -> Browser {
Browser {
directories: self.directories,
flattened: self.flattened.build(),
}
}
}
#[cfg(test)]
mod test {
use std::collections::HashSet;
use std::path::PathBuf;
use super::*;
fn setup_test(songs: HashSet<PathBuf>) -> (Browser, Dictionary) {
let mut dictionary_builder = dictionary::Builder::default();
let mut builder = Builder::default();
let directories = songs
.iter()
.flat_map(|k| k.parent().unwrap().ancestors())
.collect::<HashSet<_>>();
for directory in directories {
builder.add_directory(
&mut dictionary_builder,
scanner::Directory {
virtual_path: directory.to_owned(),
},
);
}
for path in songs {
let mut song = scanner::Song::default();
song.virtual_path = path.clone();
builder.add_song(&mut dictionary_builder, &song);
}
let browser = builder.build();
let dictionary = dictionary_builder.build();
(browser, dictionary)
}
#[test]
fn can_browse_top_level() {
let (browser, strings) = setup_test(HashSet::from([
PathBuf::from_iter(["Music", "Iron Maiden", "Moonchild.mp3"]),
PathBuf::from_iter(["Also Music", "Iron Maiden", "The Prisoner.mp3"]),
]));
let files = browser.browse(&strings, PathBuf::new()).unwrap();
assert_eq!(
files[..],
[
File::Directory(PathBuf::from_iter(["Also Music"])),
File::Directory(PathBuf::from_iter(["Music"])),
]
);
}
#[test]
fn browse_skips_redundant_top_level() {
let (browser, strings) = setup_test(HashSet::from([PathBuf::from_iter([
"Music",
"Iron Maiden",
"Moonchild.mp3",
])]));
let files = browser.browse(&strings, PathBuf::new()).unwrap();
assert_eq!(
files[..],
[File::Directory(PathBuf::from_iter([
"Music",
"Iron Maiden"
])),]
);
}
#[test]
fn can_browse_directory() {
let artist_directory = PathBuf::from_iter(["Music", "Iron Maiden"]);
let (browser, strings) = setup_test(HashSet::from([
artist_directory.join("Infinite Dreams.mp3"),
artist_directory.join("Moonchild.mp3"),
]));
let files = browser.browse(&strings, artist_directory.clone()).unwrap();
assert_eq!(
files,
[
File::Song(artist_directory.join("Infinite Dreams.mp3")),
File::Song(artist_directory.join("Moonchild.mp3"))
]
);
}
#[test]
fn browse_entries_are_sorted() {
let (browser, strings) = setup_test(HashSet::from([
PathBuf::from_iter(["Ott", "Mir.mp3"]),
PathBuf::from("Helios.mp3"),
PathBuf::from("asura.mp3"),
PathBuf::from("à la maison.mp3"),
]));
let files = browser.browse(&strings, PathBuf::new()).unwrap();
assert_eq!(
files,
[
File::Directory(PathBuf::from("Ott")),
File::Song(PathBuf::from("à la maison.mp3")),
File::Song(PathBuf::from("asura.mp3")),
File::Song(PathBuf::from("Helios.mp3")),
]
);
}
#[test]
fn can_flatten_root() {
let song_a = PathBuf::from_iter(["Music", "Electronic", "Papua New Guinea.mp3"]);
let song_b = PathBuf::from_iter(["Music", "Metal", "Destiny.mp3"]);
let song_c = PathBuf::from_iter(["Music", "Metal", "No Turning Back.mp3"]);
let (browser, strings) = setup_test(HashSet::from([
song_a.clone(),
song_b.clone(),
song_c.clone(),
]));
let files = browser.flatten(&strings, PathBuf::new()).unwrap();
assert_eq!(files, [song_a, song_b, song_c]);
}
#[test]
fn can_flatten_directory() {
let electronic = PathBuf::from_iter(["Music", "Electronic"]);
let song_a = electronic.join(PathBuf::from_iter(["FSOL", "Papua New Guinea.mp3"]));
let song_b = electronic.join(PathBuf::from_iter(["Kraftwerk", "Autobahn.mp3"]));
let song_c = PathBuf::from_iter(["Music", "Metal", "Destiny.mp3"]);
let (browser, strings) = setup_test(HashSet::from([
song_a.clone(),
song_b.clone(),
song_c.clone(),
]));
let files = browser.flatten(&strings, electronic).unwrap();
assert_eq!(files, [song_a, song_b]);
}
#[test]
fn flatten_entries_are_sorted() {
let (browser, strings) = setup_test(HashSet::from([
PathBuf::from_iter(["Ott", "Mir.mp3"]),
PathBuf::from("Helios.mp3"),
PathBuf::from("à la maison.mp3.mp3"),
PathBuf::from("asura.mp3"),
]));
let files = browser.flatten(&strings, PathBuf::new()).unwrap();
assert_eq!(
files,
[
PathBuf::from("à la maison.mp3.mp3"),
PathBuf::from("asura.mp3"),
PathBuf::from("Helios.mp3"),
PathBuf::from_iter(["Ott", "Mir.mp3"]),
]
);
}
#[test]
fn can_flatten_directory_with_shared_prefix() {
let directory_a = PathBuf::from_iter(["Music", "Therion", "Leviathan II"]);
let directory_b = PathBuf::from_iter(["Music", "Therion", "Leviathan III"]);
let song_a = directory_a.join("Pazuzu.mp3");
let song_b = directory_b.join("Ninkigal.mp3");
let (browser, strings) = setup_test(HashSet::from([song_a.clone(), song_b.clone()]));
let files = browser.flatten(&strings, directory_a).unwrap();
assert_eq!(files, [song_a]);
}
}

1116
src/app/index/collection.rs Normal file

File diff suppressed because it is too large Load diff

110
src/app/index/dictionary.rs Normal file
View file

@ -0,0 +1,110 @@
use std::{cmp::Ordering, collections::HashMap};
use icu_collator::{Collator, CollatorOptions, Strength};
use lasso2::{Rodeo, RodeoReader, Spur};
use rayon::slice::ParallelSliceMut;
use serde::{Deserialize, Serialize};
pub fn sanitize(s: &str) -> String {
// TODO merge inconsistent diacritic usage
let mut cleaned = s.to_owned();
cleaned.retain(|c| match c {
' ' | '_' | '-' | '\'' => false,
_ => true,
});
cleaned.to_lowercase()
}
pub fn make_collator() -> Collator {
let options = {
let mut o = CollatorOptions::new();
o.strength = Some(Strength::Secondary);
o
};
Collator::try_new(&Default::default(), options).unwrap()
}
#[derive(Serialize, Deserialize)]
pub struct Dictionary {
strings: RodeoReader, // Interned strings
canon: HashMap<String, Spur>, // Canonical representation of similar strings
sort_keys: HashMap<Spur, u32>, // All spurs sorted against each other
}
impl Dictionary {
pub fn get<S: AsRef<str>>(&self, string: S) -> Option<Spur> {
self.strings.get(string)
}
pub fn get_canon<S: AsRef<str>>(&self, string: S) -> Option<Spur> {
self.canon.get(&sanitize(string.as_ref())).copied()
}
pub fn resolve(&self, spur: &Spur) -> &str {
self.strings.resolve(spur)
}
pub fn cmp(&self, a: &Spur, b: &Spur) -> Ordering {
self.sort_keys
.get(a)
.copied()
.unwrap_or_default()
.cmp(&self.sort_keys.get(b).copied().unwrap_or_default())
}
}
impl Default for Dictionary {
fn default() -> Self {
Self {
strings: Rodeo::default().into_reader(),
canon: Default::default(),
sort_keys: Default::default(),
}
}
}
#[derive(Clone, Default)]
pub struct Builder {
strings: Rodeo,
canon: HashMap<String, Spur>,
}
impl Builder {
pub fn build(self) -> Dictionary {
let mut sorted_spurs = self.strings.iter().collect::<Vec<_>>();
// TODO this is too slow!
sorted_spurs.par_sort_unstable_by(|(_, a), (_, b)| {
let collator = make_collator();
collator.compare(a, b)
});
let sort_keys = sorted_spurs
.into_iter()
.enumerate()
.map(|(i, (spur, _))| (spur, i as u32))
.collect();
Dictionary {
strings: self.strings.into_reader(),
canon: self.canon,
sort_keys,
}
}
pub fn get_or_intern<S: AsRef<str>>(&mut self, string: S) -> Spur {
self.strings.get_or_intern(string)
}
pub fn get_or_intern_canon<S: AsRef<str>>(&mut self, string: S) -> Option<Spur> {
let cleaned = sanitize(string.as_ref());
match cleaned.is_empty() {
true => None,
false => Some(
self.canon
.entry(cleaned)
.or_insert_with(|| self.strings.get_or_intern(string.as_ref()))
.to_owned(),
),
}
}
}

478
src/app/index/query.rs Normal file
View file

@ -0,0 +1,478 @@
use std::collections::HashSet;
use chumsky::{
error::Simple,
prelude::{choice, end, filter, just, none_of, recursive},
text::{int, keyword, whitespace, TextParser},
Parser,
};
use enum_map::Enum;
use serde::{Deserialize, Serialize};
#[derive(Clone, Copy, Debug, Deserialize, Enum, Eq, Hash, PartialEq, Serialize)]
pub enum TextField {
Album,
AlbumArtist,
Artist,
Composer,
Genre,
Label,
Lyricist,
Path,
Title,
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum TextOp {
Eq,
Like,
}
#[derive(Clone, Copy, Debug, Deserialize, Enum, Eq, Hash, PartialEq, Serialize)]
pub enum NumberField {
DiscNumber,
TrackNumber,
Year,
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum NumberOp {
Eq,
Greater,
GreaterOrEq,
Less,
LessOrEq,
}
#[derive(Debug, Eq, PartialEq)]
pub enum Literal {
Text(String),
Number(i32),
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum BoolOp {
And,
Or,
Not,
}
#[derive(Debug, Eq, PartialEq)]
pub enum Expr {
Fuzzy(Literal),
TextCmp(TextField, TextOp, String),
NumberCmp(NumberField, NumberOp, i32),
Combined(Box<Expr>, BoolOp, Box<Expr>),
}
pub fn make_parser() -> impl Parser<char, Expr, Error = Simple<char>> {
recursive(|expr| {
let quoted_str = just('"')
.ignore_then(none_of('"').repeated().collect::<String>())
.then_ignore(just('"'));
let symbols = r#"()<>"|&=!"#.chars().collect::<HashSet<_>>();
let raw_str = filter(move |c: &char| !c.is_whitespace() && !symbols.contains(c))
.repeated()
.at_least(1)
.collect::<String>();
let str_ = choice((quoted_str, raw_str)).padded();
let number = int(10).from_str().unwrapped().padded();
let text_field = choice((
keyword("album").to(TextField::Album),
keyword("albumartist").to(TextField::AlbumArtist),
keyword("artist").to(TextField::Artist),
keyword("composer").to(TextField::Composer),
keyword("genre").to(TextField::Genre),
keyword("label").to(TextField::Label),
keyword("lyricist").to(TextField::Lyricist),
keyword("path").to(TextField::Path),
keyword("title").to(TextField::Title),
))
.padded();
let text_op = choice((just("=").to(TextOp::Eq), just("%").to(TextOp::Like))).padded();
let text_cmp = text_field
.then(text_op)
.then(str_.clone())
.map(|((a, b), c)| Expr::TextCmp(a, b, c));
let number_field = choice((
keyword("discnumber").to(NumberField::DiscNumber),
keyword("tracknumber").to(NumberField::TrackNumber),
keyword("year").to(NumberField::Year),
))
.padded();
let number_op = choice((
just("=").to(NumberOp::Eq),
just(">=").to(NumberOp::GreaterOrEq),
just(">").to(NumberOp::Greater),
just("<=").to(NumberOp::LessOrEq),
just("<").to(NumberOp::Less),
))
.padded();
let number_cmp = number_field
.then(number_op)
.then(number)
.map(|((a, b), c)| Expr::NumberCmp(a, b, c));
let literal = choice((number.map(Literal::Number), str_.map(Literal::Text)));
let fuzzy = literal.map(Expr::Fuzzy);
let filter = choice((text_cmp, number_cmp, fuzzy));
let atom = choice((filter, expr.delimited_by(just('('), just(')'))));
let bool_op = choice((
just("&&").to(BoolOp::And),
just("||").to(BoolOp::Or),
just("!!").to(BoolOp::Not),
))
.padded();
let combined = atom
.clone()
.then(bool_op.then(atom).repeated())
.foldl(|a, (b, c)| Expr::Combined(Box::new(a), b, Box::new(c)));
let implicit_and = combined
.clone()
.then(whitespace().ignore_then(combined).repeated())
.foldl(|a: Expr, b: Expr| Expr::Combined(Box::new(a), BoolOp::And, Box::new(b)));
implicit_and
})
.then_ignore(end())
}
#[test]
fn can_parse_fuzzy_query() {
let parser = make_parser();
assert_eq!(
parser.parse(r#"rhapsody"#).unwrap(),
Expr::Fuzzy(Literal::Text("rhapsody".to_owned())),
);
assert_eq!(
parser.parse(r#"2005"#).unwrap(),
Expr::Fuzzy(Literal::Number(2005)),
);
}
#[test]
fn can_repeat_fuzzy_queries() {
let parser = make_parser();
assert_eq!(
parser.parse(r#"rhapsody "of victory""#).unwrap(),
Expr::Combined(
Box::new(Expr::Fuzzy(Literal::Text("rhapsody".to_owned()))),
BoolOp::And,
Box::new(Expr::Fuzzy(Literal::Text("of victory".to_owned()))),
),
);
}
#[test]
fn can_mix_fuzzy_and_structured() {
let parser = make_parser();
assert_eq!(
parser.parse(r#"rhapsody album % dragonflame"#).unwrap(),
Expr::Combined(
Box::new(Expr::Fuzzy(Literal::Text("rhapsody".to_owned()))),
BoolOp::And,
Box::new(Expr::TextCmp(
TextField::Album,
TextOp::Like,
"dragonflame".to_owned()
)),
),
);
}
#[test]
fn can_parse_text_fields() {
let parser = make_parser();
assert_eq!(
parser.parse(r#"album = "legendary tales""#).unwrap(),
Expr::TextCmp(TextField::Album, TextOp::Eq, "legendary tales".to_owned()),
);
assert_eq!(
parser.parse(r#"albumartist = "rhapsody""#).unwrap(),
Expr::TextCmp(TextField::AlbumArtist, TextOp::Eq, "rhapsody".to_owned()),
);
assert_eq!(
parser.parse(r#"artist = "rhapsody""#).unwrap(),
Expr::TextCmp(TextField::Artist, TextOp::Eq, "rhapsody".to_owned()),
);
assert_eq!(
parser.parse(r#"composer = "yoko kanno""#).unwrap(),
Expr::TextCmp(TextField::Composer, TextOp::Eq, "yoko kanno".to_owned()),
);
assert_eq!(
parser.parse(r#"genre = "jazz""#).unwrap(),
Expr::TextCmp(TextField::Genre, TextOp::Eq, "jazz".to_owned()),
);
assert_eq!(
parser.parse(r#"label = "diverse system""#).unwrap(),
Expr::TextCmp(TextField::Label, TextOp::Eq, "diverse system".to_owned()),
);
assert_eq!(
parser.parse(r#"lyricist = "dalida""#).unwrap(),
Expr::TextCmp(TextField::Lyricist, TextOp::Eq, "dalida".to_owned()),
);
assert_eq!(
parser.parse(r#"path = "electronic/big beat""#).unwrap(),
Expr::TextCmp(
TextField::Path,
TextOp::Eq,
"electronic/big beat".to_owned()
),
);
assert_eq!(
parser.parse(r#"title = "emerald sword""#).unwrap(),
Expr::TextCmp(TextField::Title, TextOp::Eq, "emerald sword".to_owned()),
);
}
#[test]
fn can_parse_text_operators() {
let parser = make_parser();
assert_eq!(
parser.parse(r#"album = "legendary tales""#).unwrap(),
Expr::TextCmp(TextField::Album, TextOp::Eq, "legendary tales".to_owned()),
);
assert_eq!(
parser.parse(r#"album % "legendary tales""#).unwrap(),
Expr::TextCmp(TextField::Album, TextOp::Like, "legendary tales".to_owned()),
);
}
#[test]
fn can_parse_number_fields() {
let parser = make_parser();
assert_eq!(
parser.parse(r#"discnumber = 6"#).unwrap(),
Expr::NumberCmp(NumberField::DiscNumber, NumberOp::Eq, 6),
);
assert_eq!(
parser.parse(r#"tracknumber = 12"#).unwrap(),
Expr::NumberCmp(NumberField::TrackNumber, NumberOp::Eq, 12),
);
assert_eq!(
parser.parse(r#"year = 1999"#).unwrap(),
Expr::NumberCmp(NumberField::Year, NumberOp::Eq, 1999),
);
}
#[test]
fn can_parse_number_operators() {
let parser = make_parser();
assert_eq!(
parser.parse(r#"discnumber = 6"#).unwrap(),
Expr::NumberCmp(NumberField::DiscNumber, NumberOp::Eq, 6),
);
assert_eq!(
parser.parse(r#"discnumber > 6"#).unwrap(),
Expr::NumberCmp(NumberField::DiscNumber, NumberOp::Greater, 6),
);
assert_eq!(
parser.parse(r#"discnumber >= 6"#).unwrap(),
Expr::NumberCmp(NumberField::DiscNumber, NumberOp::GreaterOrEq, 6),
);
assert_eq!(
parser.parse(r#"discnumber < 6"#).unwrap(),
Expr::NumberCmp(NumberField::DiscNumber, NumberOp::Less, 6),
);
assert_eq!(
parser.parse(r#"discnumber <= 6"#).unwrap(),
Expr::NumberCmp(NumberField::DiscNumber, NumberOp::LessOrEq, 6),
);
}
#[test]
fn can_use_and_operator() {
let parser = make_parser();
assert_eq!(
parser.parse(r#"album % lands && title % "sword""#).unwrap(),
Expr::Combined(
Box::new(Expr::TextCmp(
TextField::Album,
TextOp::Like,
"lands".to_owned()
)),
BoolOp::And,
Box::new(Expr::TextCmp(
TextField::Title,
TextOp::Like,
"sword".to_owned()
))
),
);
}
#[test]
fn can_use_or_operator() {
let parser = make_parser();
assert_eq!(
parser.parse(r#"album % lands || title % "sword""#).unwrap(),
Expr::Combined(
Box::new(Expr::TextCmp(
TextField::Album,
TextOp::Like,
"lands".to_owned()
)),
BoolOp::Or,
Box::new(Expr::TextCmp(
TextField::Title,
TextOp::Like,
"sword".to_owned()
))
),
);
}
#[test]
fn can_use_not_operator() {
let parser = make_parser();
assert_eq!(
parser.parse(r#"album % lands !! title % "sword""#).unwrap(),
Expr::Combined(
Box::new(Expr::TextCmp(
TextField::Album,
TextOp::Like,
"lands".to_owned()
)),
BoolOp::Not,
Box::new(Expr::TextCmp(
TextField::Title,
TextOp::Like,
"sword".to_owned()
))
),
);
}
#[test]
fn boolean_operators_share_precedence() {
let parser = make_parser();
assert_eq!(
parser
.parse(r#"album % lands || album % tales && title % "sword""#)
.unwrap(),
Expr::Combined(
Box::new(Expr::Combined(
Box::new(Expr::TextCmp(
TextField::Album,
TextOp::Like,
"lands".to_owned()
)),
BoolOp::Or,
Box::new(Expr::TextCmp(
TextField::Album,
TextOp::Like,
"tales".to_owned()
))
)),
BoolOp::And,
Box::new(Expr::TextCmp(
TextField::Title,
TextOp::Like,
"sword".to_owned()
))
),
);
assert_eq!(
parser
.parse(r#"album % lands && album % tales || title % "sword""#)
.unwrap(),
Expr::Combined(
Box::new(Expr::Combined(
Box::new(Expr::TextCmp(
TextField::Album,
TextOp::Like,
"lands".to_owned()
)),
BoolOp::And,
Box::new(Expr::TextCmp(
TextField::Album,
TextOp::Like,
"tales".to_owned()
))
)),
BoolOp::Or,
Box::new(Expr::TextCmp(
TextField::Title,
TextOp::Like,
"sword".to_owned()
))
),
);
}
#[test]
fn can_use_parenthesis_for_precedence() {
let parser = make_parser();
assert_eq!(
parser
.parse(r#"album % lands || (album % tales && title % sword)"#)
.unwrap(),
Expr::Combined(
Box::new(Expr::TextCmp(
TextField::Album,
TextOp::Like,
"lands".to_owned()
)),
BoolOp::Or,
Box::new(Expr::Combined(
Box::new(Expr::TextCmp(
TextField::Album,
TextOp::Like,
"tales".to_owned()
)),
BoolOp::And,
Box::new(Expr::TextCmp(
TextField::Title,
TextOp::Like,
"sword".to_owned()
)),
))
),
);
assert_eq!(
parser
.parse(r#"(album % lands || album % tales) && title % "sword""#)
.unwrap(),
Expr::Combined(
Box::new(Expr::Combined(
Box::new(Expr::TextCmp(
TextField::Album,
TextOp::Like,
"lands".to_owned()
)),
BoolOp::Or,
Box::new(Expr::TextCmp(
TextField::Album,
TextOp::Like,
"tales".to_owned()
))
)),
BoolOp::And,
Box::new(Expr::TextCmp(
TextField::Title,
TextOp::Like,
"sword".to_owned()
))
),
);
}

708
src/app/index/search.rs Normal file
View file

@ -0,0 +1,708 @@
use chumsky::Parser;
use enum_map::EnumMap;
use lasso2::Spur;
use nohash_hasher::IntSet;
use serde::{Deserialize, Serialize};
use std::collections::{BTreeMap, HashMap};
use tinyvec::TinyVec;
use crate::app::{
index::{
dictionary::Dictionary,
query::{BoolOp, Expr, Literal, NumberField, NumberOp, TextField, TextOp},
storage::SongKey,
},
scanner, Error,
};
use super::{collection, dictionary::sanitize, query::make_parser, storage};
#[derive(Serialize, Deserialize)]
pub struct Search {
text_fields: EnumMap<TextField, TextFieldIndex>,
number_fields: EnumMap<NumberField, NumberFieldIndex>,
}
impl Default for Search {
fn default() -> Self {
Self {
text_fields: Default::default(),
number_fields: Default::default(),
}
}
}
impl Search {
pub fn find_songs(
&self,
collection: &collection::Collection,
dictionary: &Dictionary,
query: &str,
) -> Result<Vec<collection::Song>, Error> {
let parser = make_parser();
let parsed_query = parser
.parse(query)
.map_err(|_| Error::SearchQueryParseError)?;
let mut songs = self
.eval(dictionary, &parsed_query)
.into_iter()
.collect::<Vec<_>>();
collection.sort_songs(&mut songs, dictionary);
let songs = songs
.into_iter()
.filter_map(|song_key| collection.get_song(dictionary, song_key))
.collect::<Vec<_>>();
Ok(songs)
}
fn eval(&self, dictionary: &Dictionary, expr: &Expr) -> IntSet<SongKey> {
match expr {
Expr::Fuzzy(s) => self.eval_fuzzy(dictionary, s),
Expr::TextCmp(field, op, s) => self.eval_text_operator(dictionary, *field, *op, &s),
Expr::NumberCmp(field, op, n) => self.eval_number_operator(*field, *op, *n),
Expr::Combined(e, op, f) => self.combine(dictionary, e, *op, f),
}
}
fn combine(
&self,
dictionary: &Dictionary,
e: &Box<Expr>,
op: BoolOp,
f: &Box<Expr>,
) -> IntSet<SongKey> {
let is_operable = |expr: &Expr| match expr {
Expr::Fuzzy(Literal::Text(s)) if s.chars().count() < BIGRAM_SIZE => false,
Expr::Fuzzy(Literal::Number(n)) if *n < 10 => false,
Expr::TextCmp(_, _, s) if s.chars().count() < BIGRAM_SIZE => false,
_ => true,
};
let left = is_operable(e).then(|| self.eval(dictionary, e));
let right = is_operable(f).then(|| self.eval(dictionary, f));
match (left, op, right) {
(Some(l), BoolOp::And, Some(r)) => l.intersection(&r).cloned().collect(),
(Some(l), BoolOp::Or, Some(r)) => l.union(&r).cloned().collect(),
(Some(l), BoolOp::Not, Some(r)) => l.difference(&r).cloned().collect(),
(None, BoolOp::Not, _) => IntSet::default(),
(Some(l), _, None) => l,
(None, _, Some(r)) => r,
(None, _, None) => IntSet::default(),
}
}
fn eval_fuzzy(&self, dictionary: &Dictionary, value: &Literal) -> IntSet<SongKey> {
match value {
Literal::Text(s) => {
let mut songs = IntSet::default();
for field in self.text_fields.values() {
songs.extend(field.find_like(dictionary, s));
}
songs
}
Literal::Number(n) => {
let mut songs = IntSet::default();
for field in self.number_fields.values() {
songs.extend(field.find(*n as i64, NumberOp::Eq));
}
songs
.union(&self.eval_fuzzy(dictionary, &Literal::Text(n.to_string())))
.copied()
.collect()
}
}
}
fn eval_text_operator(
&self,
dictionary: &Dictionary,
field: TextField,
operator: TextOp,
value: &str,
) -> IntSet<SongKey> {
match operator {
TextOp::Eq => self.text_fields[field].find_exact(dictionary, value),
TextOp::Like => self.text_fields[field].find_like(dictionary, value),
}
}
fn eval_number_operator(
&self,
field: NumberField,
operator: NumberOp,
value: i32,
) -> IntSet<SongKey> {
self.number_fields[field].find(value as i64, operator)
}
}
const BIGRAM_SIZE: usize = 2;
const ASCII_RANGE: usize = u8::MAX as usize;
#[derive(Clone, Deserialize, Serialize)]
struct TextFieldIndex {
exact: HashMap<Spur, IntSet<SongKey>>,
ascii_bigrams: Vec<Vec<(SongKey, Spur)>>,
other_bigrams: HashMap<[char; BIGRAM_SIZE], Vec<(SongKey, Spur)>>,
}
impl Default for TextFieldIndex {
fn default() -> Self {
Self {
exact: Default::default(),
ascii_bigrams: vec![Default::default(); ASCII_RANGE * ASCII_RANGE],
other_bigrams: Default::default(),
}
}
}
impl TextFieldIndex {
fn ascii_bigram_to_index(a: char, b: char) -> usize {
assert!(a.is_ascii());
assert!(b.is_ascii());
(a as usize) * ASCII_RANGE + (b as usize) as usize
}
pub fn insert(&mut self, raw_value: &str, value: Spur, song: SongKey) {
let characters = sanitize(raw_value).chars().collect::<TinyVec<[char; 32]>>();
for substring in characters[..].windows(BIGRAM_SIZE) {
if substring.iter().all(|c| c.is_ascii()) {
let index = Self::ascii_bigram_to_index(substring[0], substring[1]);
self.ascii_bigrams[index].push((song, value));
} else {
self.other_bigrams
.entry(substring.try_into().unwrap())
.or_default()
.push((song, value));
}
}
self.exact.entry(value).or_default().insert(song);
}
pub fn find_like(&self, dictionary: &Dictionary, value: &str) -> IntSet<SongKey> {
let sanitized = sanitize(value);
let characters = sanitized.chars().collect::<Vec<_>>();
let empty = Vec::new();
let candidates_by_bigram = characters[..]
.windows(BIGRAM_SIZE)
.map(|s| {
if s.iter().all(|c| c.is_ascii()) {
let index = Self::ascii_bigram_to_index(s[0], s[1]);
&self.ascii_bigrams[index]
} else {
self.other_bigrams
.get::<[char; BIGRAM_SIZE]>(s.try_into().unwrap())
.unwrap_or(&empty)
}
})
.collect::<Vec<_>>();
candidates_by_bigram
.into_iter()
.min_by_key(|h| h.len()) // Only check songs that contain the least common bigram from the search term
.unwrap_or(&empty)
.iter()
.filter(|(_song_key, indexed_value)| {
// Only keep songs that actually contain the search term in full
let resolved = dictionary.resolve(indexed_value);
sanitize(resolved).contains(&sanitized)
})
.map(|(k, _v)| k)
.copied()
.collect()
}
pub fn find_exact(&self, dictionary: &Dictionary, value: &str) -> IntSet<SongKey> {
dictionary
.get_canon(value)
.and_then(|s| self.exact.get(&s))
.cloned()
.unwrap_or_default()
}
}
#[derive(Clone, Default, Deserialize, Serialize)]
struct NumberFieldIndex {
values: BTreeMap<i64, IntSet<SongKey>>,
}
impl NumberFieldIndex {
pub fn insert(&mut self, value: i64, key: SongKey) {
self.values.entry(value).or_default().insert(key);
}
pub fn find(&self, value: i64, operator: NumberOp) -> IntSet<SongKey> {
let range = match operator {
NumberOp::Eq => self.values.range(value..=value),
NumberOp::Greater => self.values.range((value + 1)..),
NumberOp::GreaterOrEq => self.values.range(value..),
NumberOp::Less => self.values.range(..value),
NumberOp::LessOrEq => self.values.range(..=value),
};
let candidates = range.map(|(_n, songs)| songs).collect::<Vec<_>>();
let mut results = Vec::with_capacity(candidates.iter().map(|c| c.len()).sum());
candidates
.into_iter()
.for_each(|songs| results.extend(songs.iter()));
IntSet::from_iter(results)
}
}
#[derive(Clone, Default)]
pub struct Builder {
text_fields: EnumMap<TextField, TextFieldIndex>,
number_fields: EnumMap<NumberField, NumberFieldIndex>,
}
impl Builder {
pub fn add_song(&mut self, scanner_song: &scanner::Song, storage_song: &storage::Song) {
let song_key = SongKey {
virtual_path: storage_song.virtual_path,
};
if let (Some(str), Some(spur)) = (&scanner_song.album, storage_song.album) {
self.text_fields[TextField::Album].insert(str, spur, song_key);
}
for (str, artist_key) in scanner_song
.album_artists
.iter()
.zip(storage_song.album_artists.iter())
{
self.text_fields[TextField::AlbumArtist].insert(str, artist_key.0, song_key);
}
for (str, artist_key) in scanner_song.artists.iter().zip(storage_song.artists.iter()) {
self.text_fields[TextField::Artist].insert(str, artist_key.0, song_key);
}
for (str, artist_key) in scanner_song
.composers
.iter()
.zip(storage_song.composers.iter())
{
self.text_fields[TextField::Composer].insert(str, artist_key.0, song_key);
}
if let Some(disc_number) = &scanner_song.disc_number {
self.number_fields[NumberField::DiscNumber].insert(*disc_number, song_key);
}
for (str, spur) in scanner_song.genres.iter().zip(storage_song.genres.iter()) {
self.text_fields[TextField::Genre].insert(str, *spur, song_key);
}
for (str, spur) in scanner_song.labels.iter().zip(storage_song.labels.iter()) {
self.text_fields[TextField::Label].insert(str, *spur, song_key);
}
for (str, artist_key) in scanner_song
.lyricists
.iter()
.zip(storage_song.lyricists.iter())
{
self.text_fields[TextField::Lyricist].insert(str, artist_key.0, song_key);
}
self.text_fields[TextField::Path].insert(
scanner_song.virtual_path.to_string_lossy().as_ref(),
storage_song.virtual_path.0,
song_key,
);
if let (Some(str), Some(spur)) = (&scanner_song.title, storage_song.title) {
self.text_fields[TextField::Title].insert(str, spur, song_key);
}
if let Some(track_number) = &scanner_song.track_number {
self.number_fields[NumberField::TrackNumber].insert(*track_number, song_key);
}
if let Some(year) = &scanner_song.year {
self.number_fields[NumberField::Year].insert(*year, song_key);
}
}
pub fn build(self) -> Search {
Search {
text_fields: self.text_fields,
number_fields: self.number_fields,
}
}
}
#[cfg(test)]
mod test {
use std::path::PathBuf;
use super::*;
use crate::app::index::dictionary;
use collection::Collection;
use storage::store_song;
struct Context {
dictionary: Dictionary,
collection: Collection,
search: Search,
}
impl Context {
pub fn search(&self, query: &str) -> Vec<PathBuf> {
self.search
.find_songs(&self.collection, &self.dictionary, query)
.unwrap()
.into_iter()
.map(|s| s.virtual_path)
.collect()
}
}
fn setup_test(songs: Vec<scanner::Song>) -> Context {
let mut dictionary_builder = dictionary::Builder::default();
let mut collection_builder = collection::Builder::default();
let mut search_builder = Builder::default();
for song in songs {
let storage_song = store_song(&mut dictionary_builder, &song).unwrap();
collection_builder.add_song(&storage_song);
search_builder.add_song(&song, &storage_song);
}
Context {
collection: collection_builder.build(),
search: search_builder.build(),
dictionary: dictionary_builder.build(),
}
}
#[test]
fn can_find_fuzzy() {
let ctx = setup_test(vec![
scanner::Song {
virtual_path: PathBuf::from("seasons.mp3"),
title: Some("Seasons".to_owned()),
artists: vec!["Dragonforce".to_owned()],
..Default::default()
},
scanner::Song {
virtual_path: PathBuf::from("potd.mp3"),
title: Some("Power of the Dragonflame".to_owned()),
artists: vec!["Rhapsody".to_owned()],
..Default::default()
},
scanner::Song {
virtual_path: PathBuf::from("calcium.mp3"),
title: Some("Calcium".to_owned()),
artists: vec!["FSOL".to_owned()],
..Default::default()
},
]);
let songs = ctx.search("agon");
assert_eq!(songs.len(), 2);
assert!(songs.contains(&PathBuf::from("seasons.mp3")));
assert!(songs.contains(&PathBuf::from("potd.mp3")));
}
#[test]
fn can_find_field_like() {
let ctx = setup_test(vec![
scanner::Song {
virtual_path: PathBuf::from("seasons.mp3"),
title: Some("Seasons".to_owned()),
artists: vec!["Dragonforce".to_owned()],
..Default::default()
},
scanner::Song {
virtual_path: PathBuf::from("potd.mp3"),
title: Some("Power of the Dragonflame".to_owned()),
artists: vec!["Rhapsody".to_owned()],
..Default::default()
},
]);
let songs = ctx.search("artist % agon");
assert_eq!(songs.len(), 1);
assert!(songs.contains(&PathBuf::from("seasons.mp3")));
}
#[test]
fn text_is_case_insensitive() {
let ctx = setup_test(vec![scanner::Song {
virtual_path: PathBuf::from("seasons.mp3"),
artists: vec!["Dragonforce".to_owned()],
..Default::default()
}]);
let songs = ctx.search("dragonforce");
assert_eq!(songs.len(), 1);
assert!(songs.contains(&PathBuf::from("seasons.mp3")));
let songs = ctx.search("artist = dragonforce");
assert_eq!(songs.len(), 1);
assert!(songs.contains(&PathBuf::from("seasons.mp3")));
}
#[test]
fn can_find_field_exact() {
let ctx = setup_test(vec![
scanner::Song {
virtual_path: PathBuf::from("seasons.mp3"),
title: Some("Seasons".to_owned()),
artists: vec!["Dragonforce".to_owned()],
..Default::default()
},
scanner::Song {
virtual_path: PathBuf::from("potd.mp3"),
title: Some("Power of the Dragonflame".to_owned()),
artists: vec!["Rhapsody".to_owned()],
..Default::default()
},
]);
let songs = ctx.search("artist = Dragon");
assert!(songs.is_empty());
let songs = ctx.search("artist = Dragonforce");
assert_eq!(songs.len(), 1);
assert!(songs.contains(&PathBuf::from("seasons.mp3")));
}
#[test]
fn can_query_number_fields() {
let ctx = setup_test(vec![
scanner::Song {
virtual_path: PathBuf::from("1999.mp3"),
year: Some(1999),
..Default::default()
},
scanner::Song {
virtual_path: PathBuf::from("2000.mp3"),
year: Some(2000),
..Default::default()
},
scanner::Song {
virtual_path: PathBuf::from("2001.mp3"),
year: Some(2001),
..Default::default()
},
]);
let songs = ctx.search("year=2000");
assert_eq!(songs.len(), 1);
assert!(songs.contains(&PathBuf::from("2000.mp3")));
let songs = ctx.search("year>2000");
assert_eq!(songs.len(), 1);
assert!(songs.contains(&PathBuf::from("2001.mp3")));
let songs = ctx.search("year<2000");
assert_eq!(songs.len(), 1);
assert!(songs.contains(&PathBuf::from("1999.mp3")));
let songs = ctx.search("year>=2000");
assert_eq!(songs.len(), 2);
assert!(songs.contains(&PathBuf::from("2000.mp3")));
assert!(songs.contains(&PathBuf::from("2001.mp3")));
let songs = ctx.search("year<=2000");
assert_eq!(songs.len(), 2);
assert!(songs.contains(&PathBuf::from("1999.mp3")));
assert!(songs.contains(&PathBuf::from("2000.mp3")));
}
#[test]
fn fuzzy_numbers_query_all_fields() {
let ctx = setup_test(vec![
scanner::Song {
virtual_path: PathBuf::from("music.mp3"),
year: Some(2000),
..Default::default()
},
scanner::Song {
virtual_path: PathBuf::from("fireworks 2000.mp3"),
..Default::default()
},
scanner::Song {
virtual_path: PathBuf::from("calcium.mp3"),
..Default::default()
},
]);
let songs = ctx.search("2000");
assert_eq!(songs.len(), 2);
assert!(songs.contains(&PathBuf::from("music.mp3")));
assert!(songs.contains(&PathBuf::from("fireworks 2000.mp3")));
}
#[test]
fn can_use_and_operator() {
let ctx = setup_test(vec![
scanner::Song {
virtual_path: PathBuf::from("whale.mp3"),
..Default::default()
},
scanner::Song {
virtual_path: PathBuf::from("space.mp3"),
..Default::default()
},
scanner::Song {
virtual_path: PathBuf::from("whales in space.mp3"),
..Default::default()
},
]);
let songs = ctx.search("space && whale");
assert_eq!(songs.len(), 1);
assert!(songs.contains(&PathBuf::from("whales in space.mp3")));
let songs = ctx.search("space whale");
assert_eq!(songs.len(), 1);
assert!(songs.contains(&PathBuf::from("whales in space.mp3")));
}
#[test]
fn can_use_or_operator() {
let ctx = setup_test(vec![
scanner::Song {
virtual_path: PathBuf::from("whale.mp3"),
..Default::default()
},
scanner::Song {
virtual_path: PathBuf::from("space.mp3"),
..Default::default()
},
scanner::Song {
virtual_path: PathBuf::from("whales in space.mp3"),
..Default::default()
},
]);
let songs = ctx.search("space || whale");
assert_eq!(songs.len(), 3);
assert!(songs.contains(&PathBuf::from("whale.mp3")));
assert!(songs.contains(&PathBuf::from("space.mp3")));
assert!(songs.contains(&PathBuf::from("whales in space.mp3")));
}
#[test]
fn can_use_not_operator() {
let ctx = setup_test(vec![
scanner::Song {
virtual_path: PathBuf::from("whale.mp3"),
..Default::default()
},
scanner::Song {
virtual_path: PathBuf::from("space.mp3"),
..Default::default()
},
scanner::Song {
virtual_path: PathBuf::from("whales in space.mp3"),
..Default::default()
},
]);
let songs = ctx.search("whale !! space");
assert_eq!(songs.len(), 1);
assert!(songs.contains(&PathBuf::from("whale.mp3")));
}
#[test]
fn results_are_sorted() {
let ctx = setup_test(vec![
scanner::Song {
virtual_path: PathBuf::from("accented.mp3"),
artists: vec!["à la maison".to_owned()],
genres: vec!["Metal".to_owned()],
..Default::default()
},
scanner::Song {
virtual_path: PathBuf::from("cry thunder.mp3"),
artists: vec!["Dragonforce".to_owned()],
album: Some("The Power Within".to_owned()),
year: Some(2012),
genres: vec!["Metal".to_owned()],
..Default::default()
},
scanner::Song {
virtual_path: PathBuf::from("revelations.mp3"),
artists: vec!["Dragonforce".to_owned()],
album: Some("Valley of the Damned".to_owned()),
year: Some(2003),
track_number: Some(7),
genres: vec!["Metal".to_owned()],
..Default::default()
},
scanner::Song {
virtual_path: PathBuf::from("starfire.mp3"),
artists: vec!["Dragonforce".to_owned()],
album: Some("Valley of the Damned".to_owned()),
year: Some(2003),
track_number: Some(5),
genres: vec!["Metal".to_owned()],
..Default::default()
},
scanner::Song {
virtual_path: PathBuf::from("eternal snow.mp3"),
artists: vec!["Rhapsody".to_owned()],
genres: vec!["Metal".to_owned()],
..Default::default()
},
scanner::Song {
virtual_path: PathBuf::from("alchemy.mp3"),
artists: vec!["Avantasia".to_owned()],
genres: vec!["Metal".to_owned()],
..Default::default()
},
]);
let songs = ctx.search("metal");
assert_eq!(songs.len(), 6);
assert_eq!(
songs,
vec![
PathBuf::from("accented.mp3"),
PathBuf::from("alchemy.mp3"),
PathBuf::from("starfire.mp3"),
PathBuf::from("revelations.mp3"),
PathBuf::from("cry thunder.mp3"),
PathBuf::from("eternal snow.mp3"),
]
);
}
#[test]
fn avoids_bigram_false_positives() {
let ctx = setup_test(vec![scanner::Song {
virtual_path: PathBuf::from("lorry bovine vehicle.mp3"),
..Default::default()
}]);
let songs = ctx.search("love");
assert!(songs.is_empty());
}
#[test]
fn ignores_single_letter_components() {
let ctx = setup_test(vec![scanner::Song {
virtual_path: PathBuf::from("seasons.mp3"),
..Default::default()
}]);
let songs = ctx.search("seas u");
assert_eq!(songs.len(), 1);
let songs = ctx.search("seas 2");
assert_eq!(songs.len(), 1);
let songs = ctx.search("seas || u");
assert_eq!(songs.len(), 1);
let songs = ctx.search("seas || 2");
assert_eq!(songs.len(), 1);
}
}

256
src/app/index/storage.rs Normal file
View file

@ -0,0 +1,256 @@
use std::{
collections::{HashMap, HashSet},
path::{Path, PathBuf},
};
use lasso2::Spur;
use log::error;
use serde::{Deserialize, Serialize};
use tinyvec::TinyVec;
use crate::app::scanner;
use crate::app::index::dictionary::{self, Dictionary};
#[derive(Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
pub enum File {
Directory(PathKey),
Song(PathKey),
}
#[derive(Clone, Serialize, Deserialize)]
pub struct Genre {
pub name: Spur,
pub albums: HashSet<AlbumKey>,
pub artists: HashSet<ArtistKey>,
pub related_genres: HashMap<GenreKey, u32>,
pub songs: Vec<SongKey>,
}
#[derive(Clone, Serialize, Deserialize)]
pub struct Artist {
pub name: Spur,
pub all_albums: HashSet<AlbumKey>,
pub albums_as_performer: HashSet<AlbumKey>,
pub albums_as_additional_performer: HashSet<AlbumKey>,
pub albums_as_composer: HashSet<AlbumKey>,
pub albums_as_lyricist: HashSet<AlbumKey>,
pub num_songs_by_genre: HashMap<Spur, u32>,
pub num_songs: u32,
}
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct Album {
pub name: Spur,
pub artwork: Option<PathKey>,
pub artists: TinyVec<[ArtistKey; 1]>,
pub year: Option<i64>,
pub date_added: i64,
pub songs: HashSet<SongKey>,
}
#[derive(Clone, Serialize, Deserialize)]
pub struct Song {
pub real_path: PathKey,
pub virtual_path: PathKey,
pub track_number: Option<i64>,
pub disc_number: Option<i64>,
pub title: Option<Spur>,
pub artists: TinyVec<[ArtistKey; 1]>,
pub album_artists: TinyVec<[ArtistKey; 1]>,
pub year: Option<i64>,
pub album: Option<Spur>,
pub artwork: Option<PathKey>,
pub duration: Option<i64>,
pub lyricists: TinyVec<[ArtistKey; 0]>,
pub composers: TinyVec<[ArtistKey; 0]>,
pub genres: TinyVec<[Spur; 1]>,
pub labels: TinyVec<[Spur; 0]>,
pub date_added: i64,
}
#[derive(
Copy, Clone, Debug, Default, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize,
)]
pub struct PathKey(pub Spur);
#[derive(Copy, Clone, Debug, Default, Eq, Hash, PartialEq, Serialize, Deserialize)]
pub struct GenreKey(pub Spur);
#[derive(Copy, Clone, Debug, Default, Eq, Hash, PartialEq, Serialize, Deserialize)]
pub struct ArtistKey(pub Spur);
#[derive(Clone, Debug, Eq, Hash, PartialEq, Serialize, Deserialize)]
pub struct AlbumKey {
pub artists: TinyVec<[ArtistKey; 4]>,
pub name: Spur,
}
#[derive(Copy, Clone, Debug, Default, Eq, Hash, PartialEq, Serialize, Deserialize)]
pub struct SongKey {
pub virtual_path: PathKey,
}
impl nohash_hasher::IsEnabled for SongKey {}
impl Song {
pub fn album_key(&self) -> Option<AlbumKey> {
let main_artists = match self.album_artists.is_empty() {
true => &self.artists,
false => &self.album_artists,
};
if main_artists.is_empty() {
return None;
}
match self.album {
None => None,
Some(name) => Some(AlbumKey {
artists: main_artists.iter().cloned().collect(),
name,
}),
}
}
}
pub fn store_song(
dictionary_builder: &mut dictionary::Builder,
song: &scanner::Song,
) -> Option<Song> {
let Some(real_path) = (&song.real_path).get_or_intern(dictionary_builder) else {
return None;
};
let Some(virtual_path) = (&song.virtual_path).get_or_intern(dictionary_builder) else {
return None;
};
let artwork = match &song.artwork {
Some(a) => match a.get_or_intern(dictionary_builder) {
Some(a) => Some(a),
None => return None,
},
None => None,
};
let mut canonicalize = |s: &String| dictionary_builder.get_or_intern_canon(s);
Some(Song {
real_path,
virtual_path,
track_number: song.track_number,
disc_number: song.disc_number,
title: song.title.as_ref().and_then(&mut canonicalize),
artists: song
.artists
.iter()
.filter_map(&mut canonicalize)
.map(|k| ArtistKey(k))
.collect(),
album_artists: song
.album_artists
.iter()
.filter_map(&mut canonicalize)
.map(|k| ArtistKey(k))
.collect(),
year: song.year,
album: song.album.as_ref().and_then(&mut canonicalize),
artwork: artwork,
duration: song.duration,
lyricists: song
.lyricists
.iter()
.filter_map(&mut canonicalize)
.map(|k| ArtistKey(k))
.collect(),
composers: song
.composers
.iter()
.filter_map(&mut canonicalize)
.map(|k| ArtistKey(k))
.collect(),
genres: song.genres.iter().filter_map(&mut canonicalize).collect(),
labels: song.labels.iter().filter_map(&mut canonicalize).collect(),
date_added: song.date_added,
})
}
pub fn fetch_song(dictionary: &Dictionary, song: &Song) -> super::Song {
super::Song {
real_path: PathBuf::from(dictionary.resolve(&song.real_path.0)),
virtual_path: PathBuf::from(dictionary.resolve(&song.virtual_path.0)),
track_number: song.track_number,
disc_number: song.disc_number,
title: song.title.map(|s| dictionary.resolve(&s).to_string()),
artists: song
.artists
.iter()
.map(|k| dictionary.resolve(&k.0).to_string())
.collect(),
album_artists: song
.album_artists
.iter()
.map(|k| dictionary.resolve(&k.0).to_string())
.collect(),
year: song.year,
album: song.album.map(|s| dictionary.resolve(&s).to_string()),
artwork: song
.artwork
.map(|a| PathBuf::from(dictionary.resolve(&a.0))),
duration: song.duration,
lyricists: song
.lyricists
.iter()
.map(|k| dictionary.resolve(&k.0).to_string())
.collect(),
composers: song
.composers
.iter()
.map(|k| dictionary.resolve(&k.0).to_string())
.collect(),
genres: song
.genres
.iter()
.map(|s| dictionary.resolve(&s).to_string())
.collect(),
labels: song
.labels
.iter()
.map(|s| dictionary.resolve(&s).to_string())
.collect(),
date_added: song.date_added,
}
}
pub trait InternPath {
fn get_or_intern(self, dictionary: &mut dictionary::Builder) -> Option<PathKey>;
fn get(self, dictionary: &Dictionary) -> Option<PathKey>;
}
impl<P: AsRef<Path>> InternPath for P {
fn get_or_intern(self, dictionary: &mut dictionary::Builder) -> Option<PathKey> {
let id = self
.as_ref()
.as_os_str()
.to_str()
.map(|s| dictionary.get_or_intern(s))
.map(PathKey);
if id.is_none() {
error!("Unsupported path: `{}`", self.as_ref().to_string_lossy());
}
id
}
fn get(self, dictionary: &Dictionary) -> Option<PathKey> {
let id = self
.as_ref()
.as_os_str()
.to_str()
.and_then(|s| dictionary.get(s))
.map(PathKey);
if id.is_none() {
error!("Unsupported path: `{}`", self.as_ref().to_string_lossy());
}
id
}
}

305
src/app/legacy.rs Normal file
View file

@ -0,0 +1,305 @@
use std::{
collections::HashMap,
ops::Deref,
path::{Path, PathBuf},
str::FromStr,
};
use regex::Regex;
use rusqlite::Connection;
use crate::app::{config, index, scanner, Error};
pub fn read_legacy_auth_secret(db_file_path: &PathBuf) -> Result<[u8; 32], Error> {
let connection = Connection::open(db_file_path)?;
let auth_secret: [u8; 32] =
connection.query_row("SELECT auth_secret FROM misc_settings", [], |row| {
row.get(0)
})?;
Ok(auth_secret)
}
pub fn read_legacy_config(
db_file_path: &PathBuf,
) -> Result<Option<config::storage::Config>, Error> {
let connection = Connection::open(db_file_path)?;
let album_art_pattern: String = connection.query_row(
"SELECT index_album_art_pattern FROM misc_settings",
[],
|row| row.get(0),
)?;
let mount_dirs = read_mount_dirs(db_file_path)?;
let users = read_users(db_file_path)?;
Ok(Some(config::storage::Config {
album_art_pattern: Some(album_art_pattern),
mount_dirs,
ddns_update_url: None,
users: users.into_values().collect(),
}))
}
fn read_mount_dirs(db_file_path: &PathBuf) -> Result<Vec<config::storage::MountDir>, Error> {
let connection = Connection::open(db_file_path)?;
let mut mount_dirs_statement = connection.prepare("SELECT source, name FROM mount_points")?;
let mount_dirs_rows = mount_dirs_statement.query_and_then::<_, Error, _, _>([], |row| {
let source_string = row.get::<_, String>(0)?;
let source = PathBuf::from_str(&source_string)
.map_err(|_| Error::InvalidDirectory(source_string))?;
Ok(config::storage::MountDir {
source,
name: row.get::<_, String>(1)?,
})
})?;
let mut mount_dirs = vec![];
for mount_dir_result in mount_dirs_rows {
mount_dirs.push(mount_dir_result?);
}
Ok(mount_dirs)
}
fn read_users(db_file_path: &PathBuf) -> Result<HashMap<u32, config::storage::User>, Error> {
let connection = Connection::open(db_file_path)?;
let mut users_statement =
connection.prepare("SELECT id, name, password_hash, admin FROM users")?;
let users_rows = users_statement.query_map([], |row| {
Ok((
row.get(0)?,
config::storage::User {
name: row.get(1)?,
admin: row.get(3)?,
initial_password: None,
hashed_password: row.get(2)?,
},
))
})?;
let mut users = HashMap::new();
for users_row in users_rows {
let (id, user) = users_row?;
users.insert(id, user);
}
Ok(users)
}
fn sanitize_path(source: &PathBuf) -> PathBuf {
let path_string = source.to_string_lossy();
let separator_regex = Regex::new(r"\\|/").unwrap();
let mut correct_separator = String::new();
correct_separator.push(std::path::MAIN_SEPARATOR);
let path_string = separator_regex.replace_all(&path_string, correct_separator.as_str());
PathBuf::from(path_string.deref())
}
fn virtualize_path(
real_path: &PathBuf,
mount_dirs: &Vec<config::storage::MountDir>,
) -> Result<PathBuf, Error> {
let sanitized = sanitize_path(real_path); // Paths in test database use `/` separators, but need `\` when running tests on Windows
for mount_dir in mount_dirs {
if let Ok(tail) = sanitized.strip_prefix(&mount_dir.source) {
return Ok(Path::new(&mount_dir.name).join(tail));
}
}
Err(Error::CouldNotMapToVirtualPath(real_path.clone()))
}
pub async fn read_legacy_playlists(
db_file_path: &PathBuf,
index_manager: index::Manager,
scanner: scanner::Scanner,
) -> Result<Vec<(String, String, Vec<index::Song>)>, Error> {
scanner.run_scan().await?;
let users = read_users(db_file_path)?;
let mount_dirs = read_mount_dirs(db_file_path)?;
let connection = Connection::open(db_file_path)?;
let mut playlists_statement = connection.prepare("SELECT id, owner, name FROM playlists")?;
let playlists_rows =
playlists_statement.query_map([], |row| Ok((row.get(0)?, row.get(1)?, row.get(2)?)))?;
let mut playlists = HashMap::new();
for playlists_row in playlists_rows {
let (id, owner, name): (u32, u32, String) = playlists_row?;
playlists.insert(id, (users.get(&owner).ok_or(Error::UserNotFound)?, name));
}
let mut playlists_by_user: HashMap<String, HashMap<String, Vec<index::Song>>> = HashMap::new();
let mut songs_statement =
connection.prepare("SELECT playlist, path FROM playlist_songs ORDER BY ordering")?;
let mut songs_rows = songs_statement.query([])?;
while let Some(row) = songs_rows.next()? {
let playlist = playlists.get(&row.get(0)?).ok_or(Error::PlaylistNotFound)?;
let user = playlist.0.name.clone();
let name = playlist.1.clone();
let real_path = PathBuf::from(row.get::<_, String>(1)?);
let Ok(virtual_path) = virtualize_path(&real_path, &mount_dirs) else {
continue;
};
let Ok(song) = index_manager
.get_songs(vec![virtual_path])
.await
.pop()
.unwrap()
else {
continue;
};
playlists_by_user
.entry(user)
.or_default()
.entry(name)
.or_default()
.push(song);
}
let mut results = vec![];
for (user, playlists) in playlists_by_user {
for (playlist_name, songs) in playlists {
results.push((playlist_name.clone(), user.clone(), songs));
}
}
Ok(results)
}
pub async fn delete_legacy_db(db_file_path: &PathBuf) -> Result<(), Error> {
tokio::fs::remove_file(db_file_path)
.await
.map_err(|e| Error::Io(db_file_path.clone(), e))?;
Ok(())
}
#[cfg(test)]
mod test {
use std::path::PathBuf;
use super::*;
use crate::{
app::{config, test},
test_name,
};
#[test]
fn can_read_auth_secret() {
let secret =
read_legacy_auth_secret(&PathBuf::from_iter(["test-data", "legacy_db_blank.sqlite"]))
.unwrap();
assert_eq!(
secret,
[
0x8B as u8, 0x88, 0x50, 0x17, 0x20, 0x09, 0x7E, 0x60, 0x31, 0x80, 0xCE, 0xE3, 0xF0,
0x5A, 0x00, 0xBC, 0x3A, 0xF4, 0xDC, 0xFD, 0x2E, 0xB7, 0x5D, 0x33, 0x5D, 0x81, 0x2F,
0x9A, 0xB4, 0x3A, 0x27, 0x2D
]
);
}
#[test]
fn can_read_blank_config() {
let actual =
read_legacy_config(&PathBuf::from_iter(["test-data", "legacy_db_blank.sqlite"]))
.unwrap()
.unwrap();
let expected = config::storage::Config {
album_art_pattern: Some("Folder.(jpeg|jpg|png)".to_owned()),
mount_dirs: vec![],
ddns_update_url: None,
users: vec![],
};
assert_eq!(actual, expected);
}
#[test]
fn can_read_populated_config() {
let actual = read_legacy_config(&PathBuf::from_iter([
"test-data",
"legacy_db_populated.sqlite",
]))
.unwrap()
.unwrap();
let expected = config::storage::Config {
album_art_pattern: Some("Folder.(jpeg|jpg|png)".to_owned()),
mount_dirs: vec![config::storage::MountDir {
source: PathBuf::from_iter(["test-data", "small-collection"]),
name: "root".to_owned(),
}],
ddns_update_url: None,
users: vec![config::storage::User {
name: "example_user".to_owned(),
admin: Some(true),
initial_password: None,
hashed_password: Some("$pbkdf2-sha256$i=10000,l=32$ADvDnwBv3kLUtjTJEwGcFA$oK43ICpNt2rbH21diMo6cSXL62qqLWOM7qs8f0s/9Oo".to_owned()),
}],
};
assert_eq!(actual, expected);
}
#[tokio::test]
async fn can_read_blank_playlists() {
let ctx = test::ContextBuilder::new(test_name!()).build().await;
let actual = read_legacy_playlists(
&PathBuf::from_iter(["test-data", "legacy_db_blank.sqlite"]),
ctx.index_manager,
ctx.scanner,
)
.await
.unwrap();
let expected = vec![];
assert_eq!(actual, expected);
}
#[tokio::test]
async fn can_read_populated_playlists() {
let ctx = test::ContextBuilder::new(test_name!()).build().await;
let db_file_path = PathBuf::from_iter(["test-data", "legacy_db_populated.sqlite"]);
let config = read_legacy_config(&db_file_path).unwrap().unwrap();
ctx.config_manager.apply_config(config).await.unwrap();
let actual = read_legacy_playlists(
&db_file_path,
ctx.index_manager.clone(),
ctx.scanner.clone(),
)
.await
.unwrap();
#[rustfmt::skip]
let song_paths = vec![
PathBuf::from_iter(["root", "Khemmis", "Hunted", "01 - Above The Water.mp3"]),
PathBuf::from_iter(["root", "Khemmis", "Hunted", "02 - Candlelight.mp3"]),
PathBuf::from_iter(["root", "Khemmis", "Hunted", "03 - Three Gates.mp3"]),
PathBuf::from_iter(["root", "Khemmis", "Hunted", "04 - Beyond The Door.mp3"]),
PathBuf::from_iter(["root", "Khemmis", "Hunted", "05 - Hunted.mp3"]),
];
let songs: Vec<index::Song> = ctx
.index_manager
.get_songs(song_paths)
.await
.into_iter()
.map(|s| s.unwrap())
.collect();
let expected = vec![(
"Example Playlist".to_owned(),
"example_user".to_owned(),
songs,
)];
assert_eq!(actual, expected);
}
}

40
src/app/ndb.rs Normal file
View file

@ -0,0 +1,40 @@
use std::{
ops::Deref,
path::Path,
sync::{Arc, LazyLock},
};
use native_db::{Database, Models};
use crate::app::{playlist, Error};
static MODELS: LazyLock<Models> = LazyLock::new(|| {
let mut models = Models::new();
models.define::<playlist::v1::PlaylistModel>().unwrap();
models
});
#[derive(Clone)]
pub struct Manager {
database: Arc<Database<'static>>,
}
impl Manager {
pub fn new(directory: &Path) -> Result<Self, Error> {
std::fs::create_dir_all(directory).map_err(|e| Error::Io(directory.to_owned(), e))?;
let path = directory.join("polaris.ndb");
let database = native_db::Builder::new()
.create(&MODELS, path)
.map_err(|e| Error::NativeDatabaseCreationError(e))?;
let database = Arc::new(database);
Ok(Self { database })
}
}
impl Deref for Manager {
type Target = Database<'static>;
fn deref(&self) -> &Self::Target {
self.database.as_ref()
}
}

179
src/app/peaks.rs Normal file
View file

@ -0,0 +1,179 @@
use std::{
hash::{DefaultHasher, Hash, Hasher},
path::{Path, PathBuf},
};
use serde::{Deserialize, Serialize};
use symphonia::core::{
audio::SampleBuffer,
codecs::{DecoderOptions, CODEC_TYPE_NULL},
formats::FormatOptions,
io::{MediaSourceStream, MediaSourceStreamOptions},
meta::MetadataOptions,
probe::Hint,
};
use tokio::{io::AsyncWriteExt, task::spawn_blocking};
use crate::app::Error;
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct Peaks {
pub interleaved: Vec<u8>,
}
#[derive(Clone)]
pub struct Manager {
peaks_dir_path: PathBuf,
}
impl Manager {
pub fn new(peaks_dir_path: PathBuf) -> Self {
Self { peaks_dir_path }
}
pub async fn get_peaks(&self, audio_path: &Path) -> Result<Peaks, Error> {
match self.read_from_cache(audio_path).await {
Ok(Some(peaks)) => Ok(peaks),
_ => self.read_from_source(audio_path).await,
}
}
fn get_peaks_path(&self, audio_path: &Path) -> PathBuf {
let hash = Manager::hash(audio_path);
let mut peaks_path = self.peaks_dir_path.clone();
peaks_path.push(format!("{}.peaks", hash));
peaks_path
}
async fn read_from_cache(&self, audio_path: &Path) -> Result<Option<Peaks>, Error> {
let peaks_path = self.get_peaks_path(audio_path);
if peaks_path.exists() {
let serialized = tokio::fs::read(&peaks_path)
.await
.map_err(|e| Error::Io(peaks_path.clone(), e))?;
let peaks =
bitcode::deserialize::<Peaks>(&serialized).map_err(Error::PeaksDeserialization)?;
Ok(Some(peaks))
} else {
Ok(None)
}
}
async fn read_from_source(&self, audio_path: &Path) -> Result<Peaks, Error> {
let peaks = spawn_blocking({
let audio_path = audio_path.to_owned();
move || compute_peaks(&audio_path)
})
.await??;
let serialized = bitcode::serialize(&peaks).map_err(Error::PeaksSerialization)?;
tokio::fs::create_dir_all(&self.peaks_dir_path)
.await
.map_err(|e| Error::Io(self.peaks_dir_path.clone(), e))?;
let path = self.get_peaks_path(audio_path);
let mut out_file = tokio::fs::File::create(&path)
.await
.map_err(|e| Error::Io(path.clone(), e))?;
out_file
.write_all(&serialized)
.await
.map_err(|e| Error::Io(path.clone(), e))?;
Ok(peaks)
}
fn hash(path: &Path) -> u64 {
let mut hasher = DefaultHasher::new();
path.hash(&mut hasher);
hasher.finish()
}
}
fn compute_peaks(audio_path: &Path) -> Result<Peaks, Error> {
let peaks_per_minute = 4000;
let file =
std::fs::File::open(&audio_path).or_else(|e| Err(Error::Io(audio_path.to_owned(), e)))?;
let media_source = MediaSourceStream::new(Box::new(file), MediaSourceStreamOptions::default());
let mut peaks = Peaks::default();
peaks.interleaved.reserve(5 * peaks_per_minute);
let mut format = symphonia::default::get_probe()
.format(
&Hint::new(),
media_source,
&FormatOptions::default(),
&MetadataOptions::default(),
)
.map_err(Error::MediaProbeError)?
.format;
let track = format
.tracks()
.iter()
.find(|t| t.codec_params.codec != CODEC_TYPE_NULL)
.ok_or_else(|| Error::MediaEmpty(audio_path.to_owned()))?;
let track_id = track.id;
let mut decoder = symphonia::default::get_codecs()
.make(&track.codec_params, &DecoderOptions::default())
.map_err(Error::MediaDecoderError)?;
let (mut min, mut max) = (u8::MAX, u8::MIN);
let mut num_ingested = 0;
loop {
let packet = match format.next_packet() {
Ok(packet) => packet,
Err(symphonia::core::errors::Error::IoError(e))
if e.kind() == std::io::ErrorKind::UnexpectedEof =>
{
break;
}
Err(e) => return Err(Error::MediaPacketError(e)),
};
if packet.track_id() != track_id {
continue;
}
let decoded = match decoder.decode(&packet) {
Ok(d) => d,
Err(_) => continue,
};
let num_channels = decoded.spec().channels.count();
let sample_rate = decoded.spec().rate;
let num_samples_per_peak =
((sample_rate as f32) * 60.0 / (peaks_per_minute as f32)).round() as usize;
let mut buffer = SampleBuffer::<u8>::new(decoded.capacity() as u64, *decoded.spec());
buffer.copy_interleaved_ref(decoded);
for samples in buffer.samples().chunks_exact(num_channels) {
// Merge channels into mono signal
let mut mono: u32 = 0;
for sample in samples {
mono += *sample as u32;
}
mono /= samples.len() as u32;
min = u8::min(min, mono as u8);
max = u8::max(max, mono as u8);
num_ingested += 1;
if num_ingested >= num_samples_per_peak {
peaks.interleaved.push(min);
peaks.interleaved.push(max);
(min, max) = (u8::MAX, u8::MIN);
num_ingested = 0;
}
}
}
Ok(peaks)
}

374
src/app/playlist.rs Normal file
View file

@ -0,0 +1,374 @@
use core::clone::Clone;
use std::collections::{BTreeMap, HashMap};
use std::path::PathBuf;
use std::time::Duration;
use icu_collator::{Collator, CollatorOptions, Strength};
use native_db::*;
use native_model::{native_model, Model};
use serde::{Deserialize, Serialize};
use tokio::task::spawn_blocking;
use crate::app::{index, ndb, Error};
#[derive(Clone)]
pub struct Manager {
db: ndb::Manager,
}
#[derive(Debug)]
pub struct PlaylistHeader {
pub name: String,
pub duration: Duration,
pub num_songs_by_genre: HashMap<String, u32>,
}
#[derive(Debug)]
pub struct Playlist {
pub header: PlaylistHeader,
pub songs: Vec<PathBuf>,
}
pub type PlaylistModel = v1::PlaylistModel;
type PlaylistModelKey = v1::PlaylistModelKey;
pub mod v1 {
use super::*;
#[derive(Debug, Default, Serialize, Deserialize)]
#[native_model(id = 1, version = 1)]
#[native_db(primary_key(custom_id -> (&str, &str)))]
pub struct PlaylistModel {
#[secondary_key]
pub owner: String,
pub name: String,
pub duration: Duration,
pub num_songs_by_genre: BTreeMap<String, u32>,
pub virtual_paths: Vec<PathBuf>,
}
impl PlaylistModel {
fn custom_id(&self) -> (&str, &str) {
(&self.owner, &self.name)
}
}
}
impl From<PlaylistModel> for PlaylistHeader {
fn from(p: PlaylistModel) -> Self {
Self {
name: p.name,
duration: p.duration,
num_songs_by_genre: p.num_songs_by_genre.into_iter().collect(),
}
}
}
impl From<PlaylistModel> for Playlist {
fn from(mut p: PlaylistModel) -> Self {
let songs = p.virtual_paths.drain(0..).collect();
Self {
songs,
header: p.into(),
}
}
}
impl Manager {
pub fn new(db: ndb::Manager) -> Self {
Self { db }
}
pub async fn list_playlists(&self, owner: &str) -> Result<Vec<PlaylistHeader>, Error> {
spawn_blocking({
let manager = self.clone();
let owner = owner.to_owned();
move || {
let transaction = manager.db.r_transaction()?;
let mut playlists = transaction
.scan()
.secondary::<PlaylistModel>(PlaylistModelKey::owner)?
.range(owner.as_str()..=owner.as_str())?
.filter_map(|p| p.ok())
.map(PlaylistHeader::from)
.collect::<Vec<_>>();
let collator_options = {
let mut o = CollatorOptions::new();
o.strength = Some(Strength::Secondary);
o
};
let collator = Collator::try_new(&Default::default(), collator_options).unwrap();
playlists.sort_by(|a, b| collator.compare(&a.name, &b.name));
Ok(playlists)
}
})
.await?
}
pub async fn save_playlist(
&self,
name: &str,
owner: &str,
songs: Vec<index::Song>,
) -> Result<(), Error> {
spawn_blocking({
let manager = self.clone();
let owner = owner.to_owned();
let name = name.to_owned();
move || {
let transaction = manager.db.rw_transaction()?;
let duration = songs
.iter()
.filter_map(|s| s.duration.map(|d| d as u64))
.sum();
let mut num_songs_by_genre = BTreeMap::<String, u32>::new();
for song in &songs {
for genre in &song.genres {
*num_songs_by_genre.entry(genre.clone()).or_default() += 1;
}
}
let virtual_paths = songs.into_iter().map(|s| s.virtual_path).collect();
transaction.upsert::<PlaylistModel>(PlaylistModel {
owner: owner.to_owned(),
name: name.to_owned(),
duration: Duration::from_secs(duration),
num_songs_by_genre,
virtual_paths,
})?;
transaction.commit()?;
Ok(())
}
})
.await?
}
pub async fn read_playlist(&self, name: &str, owner: &str) -> Result<Playlist, Error> {
spawn_blocking({
let manager = self.clone();
let owner = owner.to_owned();
let name = name.to_owned();
move || {
let transaction = manager.db.r_transaction()?;
match transaction.get().primary::<PlaylistModel>((owner, name)) {
Ok(Some(p)) => Ok(Playlist::from(p)),
Ok(None) => Err(Error::PlaylistNotFound),
Err(e) => Err(Error::NativeDatabase(e)),
}
}
})
.await?
}
pub async fn delete_playlist(&self, name: &str, owner: &str) -> Result<(), Error> {
spawn_blocking({
let manager = self.clone();
let owner = owner.to_owned();
let name = name.to_owned();
move || {
let transaction = manager.db.rw_transaction()?;
let playlist = match transaction
.get()
.primary::<PlaylistModel>((owner.as_str(), name.as_str()))
{
Ok(Some(p)) => Ok(p),
Ok(None) => Err(Error::PlaylistNotFound),
Err(e) => Err(Error::NativeDatabase(e)),
}?;
transaction.remove::<PlaylistModel>(playlist)?;
transaction.commit()?;
Ok(())
}
})
.await?
}
}
#[cfg(test)]
mod test {
use std::path::PathBuf;
use crate::app::index;
use crate::app::test::{self, Context};
use crate::test_name;
const TEST_USER: &str = "test_user";
const TEST_PASSWORD: &str = "password";
const TEST_PLAYLIST_NAME: &str = "Chill & Grill";
const TEST_MOUNT_NAME: &str = "root";
async fn list_all_songs(ctx: &Context) -> Vec<index::Song> {
let paths = ctx
.index_manager
.flatten(PathBuf::from(TEST_MOUNT_NAME))
.await
.unwrap()
.into_iter()
.collect::<Vec<_>>();
let songs = ctx
.index_manager
.get_songs(paths)
.await
.into_iter()
.map(|s| s.unwrap())
.collect::<Vec<_>>();
assert_eq!(songs.len(), 13);
songs
}
#[tokio::test]
async fn save_playlist_golden_path() {
let ctx = test::ContextBuilder::new(test_name!())
.user(TEST_USER, TEST_PASSWORD, false)
.build()
.await;
ctx.playlist_manager
.save_playlist(TEST_PLAYLIST_NAME, TEST_USER, Vec::new())
.await
.unwrap();
let found_playlists = ctx
.playlist_manager
.list_playlists(TEST_USER)
.await
.unwrap();
assert_eq!(found_playlists.len(), 1);
assert_eq!(found_playlists[0].name.as_str(), TEST_PLAYLIST_NAME);
}
#[tokio::test]
async fn save_playlist_is_idempotent() {
let ctx = test::ContextBuilder::new(test_name!())
.user(TEST_USER, TEST_PASSWORD, false)
.mount(TEST_MOUNT_NAME, "test-data/small-collection")
.build()
.await;
ctx.scanner.run_scan().await.unwrap();
let songs = list_all_songs(&ctx).await;
ctx.playlist_manager
.save_playlist(TEST_PLAYLIST_NAME, TEST_USER, songs.clone())
.await
.unwrap();
ctx.playlist_manager
.save_playlist(TEST_PLAYLIST_NAME, TEST_USER, songs.clone())
.await
.unwrap();
let playlist = ctx
.playlist_manager
.read_playlist(TEST_PLAYLIST_NAME, TEST_USER)
.await
.unwrap();
assert_eq!(playlist.songs.len(), 13);
}
#[tokio::test]
async fn delete_playlist_golden_path() {
let ctx = test::ContextBuilder::new(test_name!())
.user(TEST_USER, TEST_PASSWORD, false)
.mount(TEST_MOUNT_NAME, "test-data/small-collection")
.build()
.await;
ctx.scanner.run_scan().await.unwrap();
let songs = list_all_songs(&ctx).await;
ctx.playlist_manager
.save_playlist(TEST_PLAYLIST_NAME, TEST_USER, songs)
.await
.unwrap();
ctx.playlist_manager
.delete_playlist(TEST_PLAYLIST_NAME, TEST_USER)
.await
.unwrap();
let found_playlists = ctx
.playlist_manager
.list_playlists(TEST_USER)
.await
.unwrap();
assert_eq!(found_playlists.len(), 0);
}
#[tokio::test]
async fn read_playlist_golden_path() {
let ctx = test::ContextBuilder::new(test_name!())
.user(TEST_USER, TEST_PASSWORD, false)
.mount(TEST_MOUNT_NAME, "test-data/small-collection")
.build()
.await;
ctx.scanner.run_scan().await.unwrap();
let songs = list_all_songs(&ctx).await;
ctx.playlist_manager
.save_playlist(TEST_PLAYLIST_NAME, TEST_USER, songs)
.await
.unwrap();
let playlist = ctx
.playlist_manager
.read_playlist(TEST_PLAYLIST_NAME, TEST_USER)
.await
.unwrap();
assert_eq!(playlist.songs.len(), 13);
let first_song_path: PathBuf = [
TEST_MOUNT_NAME,
"Khemmis",
"Hunted",
"01 - Above The Water.mp3",
]
.iter()
.collect();
assert_eq!(playlist.songs[0], first_song_path);
}
#[tokio::test]
async fn playlists_are_sorted_alphabetically() {
let ctx = test::ContextBuilder::new(test_name!())
.user(TEST_USER, TEST_PASSWORD, false)
.mount(TEST_MOUNT_NAME, "test-data/small-collection")
.build()
.await;
for name in ["ax", "b", "Ay", "B", "àz"] {
ctx.playlist_manager
.save_playlist(name, TEST_USER, Vec::new())
.await
.unwrap();
}
let playlists = ctx
.playlist_manager
.list_playlists(TEST_USER)
.await
.unwrap();
let names = playlists
.into_iter()
.map(|p| p.name.to_string())
.collect::<Vec<_>>();
assert_eq!(names, vec!["ax", "Ay", "àz", "B", "b"]);
}
}

619
src/app/scanner.rs Normal file
View file

@ -0,0 +1,619 @@
use log::{error, info};
use notify::{RecommendedWatcher, Watcher};
use notify_debouncer_full::{Debouncer, FileIdMap};
use rayon::{Scope, ThreadPoolBuilder};
use regex::Regex;
use std::fs;
use std::path::{Path, PathBuf};
use std::str::FromStr;
use std::sync::mpsc::{channel, Sender, TryRecvError};
use std::sync::Arc;
use std::time::SystemTime;
use std::{cmp::min, time::Duration};
use tokio::sync::mpsc::unbounded_channel;
use tokio::sync::{Notify, RwLock};
use tokio::task::JoinSet;
use tokio::time::Instant;
use crate::app::{config, formats, index, Error};
#[derive(Debug, PartialEq, Eq)]
pub struct Directory {
pub virtual_path: PathBuf,
}
#[derive(Debug, Default, PartialEq, Eq)]
pub struct Song {
pub real_path: PathBuf,
pub virtual_path: PathBuf,
pub track_number: Option<i64>,
pub disc_number: Option<i64>,
pub title: Option<String>,
pub artists: Vec<String>,
pub album_artists: Vec<String>,
pub year: Option<i64>,
pub album: Option<String>,
pub artwork: Option<PathBuf>,
pub duration: Option<i64>,
pub lyricists: Vec<String>,
pub composers: Vec<String>,
pub genres: Vec<String>,
pub labels: Vec<String>,
pub date_added: i64,
}
#[derive(Clone, Default)]
pub enum State {
#[default]
Initial,
Pending,
InProgress,
UpToDate,
}
#[derive(Clone)]
struct Parameters {
artwork_regex: Option<Regex>,
mount_dirs: Vec<config::MountDir>,
}
impl PartialEq for Parameters {
fn eq(&self, other: &Self) -> bool {
self.artwork_regex.as_ref().map(|r| r.as_str())
== other.artwork_regex.as_ref().map(|r| r.as_str())
&& self.mount_dirs == other.mount_dirs
}
}
#[derive(Clone, Default)]
pub struct Status {
pub state: State,
pub last_start_time: Option<SystemTime>,
pub last_end_time: Option<SystemTime>,
pub num_songs_indexed: u32,
}
#[derive(Clone)]
pub struct Scanner {
index_manager: index::Manager,
config_manager: config::Manager,
file_watcher: Arc<RwLock<Option<Debouncer<RecommendedWatcher, FileIdMap>>>>,
on_file_change: Arc<Notify>,
pending_scan: Arc<Notify>,
status: Arc<RwLock<Status>>,
parameters: Arc<RwLock<Option<Parameters>>>,
}
impl Scanner {
pub async fn new(
index_manager: index::Manager,
config_manager: config::Manager,
) -> Result<Self, Error> {
let scanner = Self {
index_manager,
config_manager: config_manager.clone(),
file_watcher: Arc::default(),
on_file_change: Arc::default(),
pending_scan: Arc::new(Notify::new()),
status: Arc::new(RwLock::new(Status::default())),
parameters: Arc::default(),
};
let abort_scan = Arc::new(Notify::new());
tokio::spawn({
let scanner = scanner.clone();
let abort_scan = abort_scan.clone();
async move {
loop {
scanner.wait_for_change().await;
abort_scan.notify_waiters();
scanner.status.write().await.state = State::Pending;
while tokio::time::timeout(Duration::from_secs(2), scanner.wait_for_change())
.await
.is_ok()
{}
scanner.pending_scan.notify_waiters();
}
}
});
tokio::spawn({
let scanner = scanner.clone();
async move {
loop {
scanner.pending_scan.notified().await;
tokio::select! {
result = scanner.run_scan() => {
if let Err(e) = result {
error!("Error while updating index: {e}");
}
}
_ = abort_scan.notified() => {
info!("Interrupted index update");
}
};
}
}
});
Ok(scanner)
}
async fn setup_file_watcher(
config_manager: &config::Manager,
on_file_changed: Arc<Notify>,
) -> Result<Debouncer<RecommendedWatcher, FileIdMap>, Error> {
let mut debouncer =
notify_debouncer_full::new_debouncer(Duration::from_millis(100), None, move |_| {
on_file_changed.notify_waiters();
})?;
let mount_dirs = config_manager.get_mounts().await;
for mount_dir in &mount_dirs {
if let Err(e) = debouncer
.watcher()
.watch(&mount_dir.source, notify::RecursiveMode::Recursive)
{
error!("Failed to setup file watcher for `{mount_dir:#?}`: {e}");
}
}
Ok(debouncer)
}
async fn wait_for_change(&self) {
tokio::select! {
_ = async {
loop {
self.config_manager.on_config_change().await;
if *self.parameters.read().await == Some(self.read_parameters().await) {
continue;
}
break;
}
} => {},
_ = self.on_file_change.notified() => {},
}
}
async fn read_parameters(&self) -> Parameters {
let album_art_pattern = self.config_manager.get_index_album_art_pattern().await;
let artwork_regex = Regex::new(&format!("(?i){}", &album_art_pattern)).ok();
Parameters {
artwork_regex,
mount_dirs: self.config_manager.get_mounts().await,
}
}
pub async fn get_status(&self) -> Status {
self.status.read().await.clone()
}
pub fn queue_scan(&self) {
self.pending_scan.notify_one();
}
pub fn try_trigger_scan(&self) {
self.pending_scan.notify_waiters();
}
pub async fn run_scan(&self) -> Result<(), Error> {
info!("Beginning collection scan");
let start = Instant::now();
{
let mut status = self.status.write().await;
status.last_start_time = Some(SystemTime::now());
status.state = State::InProgress;
status.num_songs_indexed = 0;
}
let was_empty = self.index_manager.is_index_empty().await;
let mut partial_update_time = Instant::now();
let new_parameters = self.read_parameters().await;
*self.parameters.write().await = Some(new_parameters.clone());
let (scan_directories_output, collection_directories_input) = channel();
let (scan_songs_output, collection_songs_input) = channel();
let scan = Scan::new(scan_directories_output, scan_songs_output, new_parameters);
let mut scan_task_set = JoinSet::new();
let mut index_task_set = JoinSet::new();
let mut watch_task_set = JoinSet::<Result<(), Error>>::new();
let mut secondary_task_set = JoinSet::new();
scan_task_set.spawn_blocking(|| scan.run());
watch_task_set.spawn({
let scanner = self.clone();
let config_manager = self.config_manager.clone();
async move {
let mut watcher = scanner.file_watcher.write().await;
*watcher = None; // Drops previous watcher
*watcher = Some(
Self::setup_file_watcher(&config_manager, scanner.on_file_change.clone())
.await?,
);
Ok(())
}
});
let partial_index_notify = Arc::new(tokio::sync::Notify::new());
let partial_index_mutex = Arc::new(tokio::sync::Mutex::new(index::Builder::default()));
secondary_task_set.spawn({
let index_manager = self.index_manager.clone();
let partial_index_notify = partial_index_notify.clone();
let partial_index_mutex = partial_index_mutex.clone();
async move {
loop {
partial_index_notify.notified().await;
let mut partial_index = partial_index_mutex.clone().lock_owned().await;
let partial_index =
std::mem::replace(&mut *partial_index, index::Builder::new());
let partial_index = partial_index.build();
let num_songs = partial_index.collection.num_songs();
index_manager.clone().replace_index(partial_index).await;
info!("Promoted partial collection index ({num_songs} songs)");
}
}
});
let (status_sender, mut status_receiver) = unbounded_channel();
secondary_task_set.spawn({
let manager = self.clone();
async move {
loop {
match status_receiver.recv().await {
Some(n) => {
manager.status.write().await.num_songs_indexed = n;
}
None => break,
}
}
}
});
index_task_set.spawn_blocking(move || {
let mut index_builder = index::Builder::default();
let mut num_songs_scanned = 0;
loop {
let exhausted_songs = match collection_songs_input.try_recv() {
Ok(song) => {
index_builder.add_song(song);
num_songs_scanned += 1;
status_sender.send(num_songs_scanned).ok();
false
}
Err(TryRecvError::Empty) => {
std::thread::sleep(Duration::from_millis(1));
false
}
Err(TryRecvError::Disconnected) => true,
};
let exhausted_directories = match collection_directories_input.try_recv() {
Ok(directory) => {
index_builder.add_directory(directory);
false
}
Err(TryRecvError::Empty) => false,
Err(TryRecvError::Disconnected) => true,
};
if exhausted_directories && exhausted_songs {
break;
}
if was_empty && partial_update_time.elapsed().as_secs() > 5 {
if let Ok(mut m) = partial_index_mutex.clone().try_lock_owned() {
*m = index_builder.clone();
partial_index_notify.notify_one();
partial_update_time = Instant::now()
}
}
}
index_builder.build()
});
scan_task_set.join_next().await.unwrap()??;
watch_task_set.join_next().await.unwrap()??;
let index = index_task_set.join_next().await.unwrap()?;
secondary_task_set.abort_all();
self.index_manager.persist_index(&index).await?;
self.index_manager.replace_index(index).await;
{
let mut status = self.status.write().await;
status.state = State::UpToDate;
status.last_end_time = Some(SystemTime::now());
}
info!(
"Collection scan took {} seconds",
start.elapsed().as_millis() as f32 / 1000.0
);
Ok(())
}
}
struct Scan {
directories_output: Sender<Directory>,
songs_output: Sender<Song>,
parameters: Parameters,
}
impl Scan {
pub fn new(
directories_output: Sender<Directory>,
songs_output: Sender<Song>,
parameters: Parameters,
) -> Self {
Self {
directories_output,
songs_output,
parameters,
}
}
pub fn run(self) -> Result<(), Error> {
let key = "POLARIS_NUM_TRAVERSER_THREADS";
let num_threads = std::env::var_os(key)
.map(|v| v.to_string_lossy().to_string())
.and_then(|v| usize::from_str(&v).ok())
.unwrap_or_else(|| min(num_cpus::get(), 8));
info!("Browsing collection using {} threads", num_threads);
let directories_output = self.directories_output.clone();
let songs_output = self.songs_output.clone();
let artwork_regex = self.parameters.artwork_regex.clone();
let thread_pool = ThreadPoolBuilder::new().num_threads(num_threads).build()?;
thread_pool.scope({
|scope| {
for mount in self.parameters.mount_dirs {
scope.spawn(|scope| {
process_directory(
scope,
mount.source,
mount.name,
directories_output.clone(),
songs_output.clone(),
artwork_regex.clone(),
);
});
}
}
});
Ok(())
}
}
fn process_directory<P: AsRef<Path>, Q: AsRef<Path>>(
scope: &Scope,
real_path: P,
virtual_path: Q,
directories_output: Sender<Directory>,
songs_output: Sender<Song>,
artwork_regex: Option<Regex>,
) {
let read_dir = match fs::read_dir(&real_path) {
Ok(read_dir) => read_dir,
Err(e) => {
error!(
"Directory read error for `{}`: {}",
real_path.as_ref().display(),
e
);
return;
}
};
let mut songs = vec![];
let mut artwork_file = None;
for entry in read_dir {
let entry = match entry {
Ok(e) => e,
Err(e) => {
error!(
"File read error within `{}`: {}",
real_path.as_ref().display(),
e
);
continue;
}
};
let is_dir = match entry.file_type().map(|f| f.is_dir()) {
Ok(d) => d,
Err(e) => {
error!(
"Could not determine file type for `{}`: {}",
entry.path().to_string_lossy(),
e
);
continue;
}
};
let name = entry.file_name();
let entry_real_path = real_path.as_ref().join(&name);
let entry_virtual_path = virtual_path.as_ref().join(&name);
if is_dir {
scope.spawn({
let directories_output = directories_output.clone();
let songs_output = songs_output.clone();
let artwork_regex = artwork_regex.clone();
|scope| {
process_directory(
scope,
entry_real_path,
entry_virtual_path,
directories_output,
songs_output,
artwork_regex,
);
}
});
} else if let Some(metadata) = formats::read_metadata(&entry_real_path) {
songs.push(Song {
real_path: entry_real_path.clone(),
virtual_path: entry_virtual_path.clone(),
track_number: metadata.track_number.map(|n| n as i64),
disc_number: metadata.disc_number.map(|n| n as i64),
title: metadata.title,
artists: metadata.artists,
album_artists: metadata.album_artists,
year: metadata.year.map(|n| n as i64),
album: metadata.album,
artwork: metadata.has_artwork.then(|| entry_virtual_path.clone()),
duration: metadata.duration.map(|n| n as i64),
lyricists: metadata.lyricists,
composers: metadata.composers,
genres: metadata.genres,
labels: metadata.labels,
date_added: get_date_created(&entry_real_path).unwrap_or_default(),
});
} else if artwork_file.is_none()
&& artwork_regex
.as_ref()
.is_some_and(|r| r.is_match(name.to_str().unwrap_or_default()))
{
artwork_file = Some(entry_virtual_path);
}
}
for mut song in songs {
song.artwork = song.artwork.or_else(|| artwork_file.clone());
songs_output.send(song).ok();
}
directories_output
.send(Directory {
virtual_path: virtual_path.as_ref().to_owned(),
})
.ok();
}
fn get_date_created<P: AsRef<Path>>(path: P) -> Option<i64> {
if let Ok(t) = fs::metadata(path).and_then(|m| m.created().or_else(|_| m.modified())) {
t.duration_since(std::time::UNIX_EPOCH)
.map(|d| d.as_secs() as i64)
.ok()
} else {
None
}
}
#[cfg(test)]
mod test {
use std::path::PathBuf;
use crate::app::test::{self};
use crate::test_name;
use super::*;
#[tokio::test]
async fn scan_finds_songs_and_directories() {
let (directories_sender, directories_receiver) = channel();
let (songs_sender, songs_receiver) = channel();
let parameters = Parameters {
artwork_regex: None,
mount_dirs: vec![config::MountDir {
source: ["test-data", "small-collection"].iter().collect(),
name: "root".to_owned(),
}],
};
let scan = Scan::new(directories_sender, songs_sender, parameters);
scan.run().unwrap();
let directories = directories_receiver.iter().collect::<Vec<_>>();
assert_eq!(directories.len(), 6);
let songs = songs_receiver.iter().collect::<Vec<_>>();
assert_eq!(songs.len(), 13);
}
#[tokio::test]
async fn scan_finds_embedded_artwork() {
let (directories_sender, _) = channel();
let (songs_sender, songs_receiver) = channel();
let parameters = Parameters {
artwork_regex: None,
mount_dirs: vec![config::MountDir {
source: ["test-data", "small-collection"].iter().collect(),
name: "root".to_owned(),
}],
};
let scan = Scan::new(directories_sender, songs_sender, parameters);
scan.run().unwrap();
let songs = songs_receiver.iter().collect::<Vec<_>>();
songs
.iter()
.any(|s| s.artwork.as_ref() == Some(&s.virtual_path));
}
#[tokio::test]
async fn album_art_pattern_is_case_insensitive() {
let artwork_path = PathBuf::from_iter(["root", "Khemmis", "Hunted", "Folder.jpg"]);
let patterns = vec!["folder", "FOLDER"];
for pattern in patterns.into_iter() {
let (directories_sender, _) = channel();
let (songs_sender, songs_receiver) = channel();
let parameters = Parameters {
artwork_regex: Some(Regex::new(pattern).unwrap()),
mount_dirs: vec![config::MountDir {
source: ["test-data", "small-collection"].iter().collect(),
name: "root".to_owned(),
}],
};
let scan = Scan::new(directories_sender, songs_sender, parameters);
scan.run().unwrap();
let songs = songs_receiver.iter().collect::<Vec<_>>();
songs
.iter()
.any(|s| s.artwork.as_ref() == Some(&artwork_path));
}
}
#[tokio::test]
async fn scanner_reacts_to_config_changes() {
let ctx = test::ContextBuilder::new(test_name!()).build().await;
assert!(ctx.index_manager.is_index_empty().await);
ctx.config_manager
.set_mounts(vec![config::storage::MountDir {
source: ["test-data", "small-collection"].iter().collect(),
name: "root".to_owned(),
}])
.await
.unwrap();
tokio::time::timeout(Duration::from_secs(10), async {
loop {
tokio::time::sleep(Duration::from_millis(100)).await;
if !ctx.index_manager.is_index_empty().await {
break;
}
}
})
.await
.expect("Index did not populate");
}
}

68
src/app/test.rs Normal file
View file

@ -0,0 +1,68 @@
use std::path::PathBuf;
use crate::app::config::storage::*;
use crate::app::{auth, config, index, ndb, playlist, scanner};
use crate::test::*;
pub struct Context {
pub index_manager: index::Manager,
pub scanner: scanner::Scanner,
pub config_manager: config::Manager,
pub playlist_manager: playlist::Manager,
}
pub struct ContextBuilder {
config: Config,
pub test_directory: PathBuf,
}
impl ContextBuilder {
pub fn new(test_name: String) -> Self {
Self {
test_directory: prepare_test_directory(test_name),
config: Config::default(),
}
}
pub fn user(mut self, name: &str, password: &str, is_admin: bool) -> Self {
self.config.users.push(User {
name: name.to_owned(),
initial_password: Some(password.to_owned()),
admin: Some(is_admin),
..Default::default()
});
self
}
pub fn mount(mut self, name: &str, source: &str) -> Self {
self.config.mount_dirs.push(MountDir {
name: name.to_owned(),
source: PathBuf::from(source),
});
self
}
pub async fn build(self) -> Context {
let config_path = self.test_directory.join("polaris.toml");
let auth_secret = auth::Secret::default();
let config_manager = config::Manager::new(&config_path, auth_secret)
.await
.unwrap();
let ndb_manager = ndb::Manager::new(&self.test_directory).unwrap();
let index_manager = index::Manager::new(&self.test_directory).await.unwrap();
let scanner = scanner::Scanner::new(index_manager.clone(), config_manager.clone())
.await
.unwrap();
let playlist_manager = playlist::Manager::new(ndb_manager.clone());
config_manager.apply_config(self.config).await.unwrap();
Context {
index_manager,
scanner,
config_manager,
playlist_manager,
}
}
}

274
src/app/thumbnail.rs Normal file
View file

@ -0,0 +1,274 @@
use std::cmp;
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
use std::path::{Path, PathBuf};
use image::codecs::jpeg::JpegEncoder;
use image::{DynamicImage, GenericImage, GenericImageView, ImageBuffer};
use tokio::task::spawn_blocking;
use crate::app::Error;
use crate::utils::{get_audio_format, AudioFormat};
#[derive(Clone, Debug, Hash)]
pub struct Options {
pub max_dimension: Option<u32>,
pub resize_if_almost_square: bool,
pub pad_to_square: bool,
}
impl Default for Options {
fn default() -> Self {
Self {
max_dimension: Some(400),
resize_if_almost_square: true,
pad_to_square: true,
}
}
}
#[derive(Clone)]
pub struct Manager {
thumbnails_dir_path: PathBuf,
}
impl Manager {
pub fn new(thumbnails_dir_path: PathBuf) -> Self {
Self {
thumbnails_dir_path,
}
}
pub async fn get_thumbnail(
&self,
image_path: &Path,
options: &Options,
) -> Result<PathBuf, Error> {
match self.read_from_cache(image_path, options).await {
Some(path) => Ok(path),
None => self.read_from_source(image_path, options).await,
}
}
fn get_thumbnail_path(&self, image_path: &Path, options: &Options) -> PathBuf {
let hash = Manager::hash(image_path, options);
let mut thumbnail_path = self.thumbnails_dir_path.clone();
thumbnail_path.push(format!("{}.jpg", hash));
thumbnail_path
}
async fn read_from_cache(&self, image_path: &Path, options: &Options) -> Option<PathBuf> {
let path = self.get_thumbnail_path(image_path, options);
match tokio::fs::try_exists(&path).await.ok() {
Some(true) => Some(path),
_ => None,
}
}
async fn read_from_source(
&self,
image_path: &Path,
options: &Options,
) -> Result<PathBuf, Error> {
let thumbnail = spawn_blocking({
let image_path = image_path.to_owned();
let options = options.clone();
move || generate_thumbnail(&image_path, &options)
})
.await??;
tokio::fs::create_dir_all(&self.thumbnails_dir_path)
.await
.map_err(|e| Error::Io(self.thumbnails_dir_path.clone(), e))?;
let path = self.get_thumbnail_path(image_path, options);
let out_file = tokio::fs::File::create(&path)
.await
.map_err(|e| Error::Io(self.thumbnails_dir_path.clone(), e))?;
spawn_blocking({
let mut out_file = out_file.into_std().await;
move || {
let quality = 80;
thumbnail.write_with_encoder(JpegEncoder::new_with_quality(&mut out_file, quality))
}
})
.await?
.map_err(|e| Error::Image(image_path.to_owned(), e))?;
Ok(path)
}
fn hash(path: &Path, options: &Options) -> u64 {
let mut hasher = DefaultHasher::new();
path.hash(&mut hasher);
options.hash(&mut hasher);
hasher.finish()
}
}
fn generate_thumbnail(image_path: &Path, options: &Options) -> Result<DynamicImage, Error> {
let source_image = DynamicImage::ImageRgb8(read(image_path)?.into_rgb8());
let (source_width, source_height) = source_image.dimensions();
let largest_dimension = cmp::max(source_width, source_height);
let out_dimension = cmp::min(
options.max_dimension.unwrap_or(largest_dimension),
largest_dimension,
);
let source_aspect_ratio: f32 = source_width as f32 / source_height as f32;
let is_almost_square = source_aspect_ratio > 0.8 && source_aspect_ratio < 1.2;
let mut final_image;
if is_almost_square && options.resize_if_almost_square {
final_image = source_image.thumbnail_exact(out_dimension, out_dimension);
} else if options.pad_to_square {
let scaled_image = source_image.thumbnail(out_dimension, out_dimension);
let (scaled_width, scaled_height) = scaled_image.dimensions();
let background = image::Rgb([255, 255_u8, 255_u8]);
final_image = DynamicImage::ImageRgb8(ImageBuffer::from_pixel(
out_dimension,
out_dimension,
background,
));
final_image
.copy_from(
&scaled_image,
(out_dimension - scaled_width) / 2,
(out_dimension - scaled_height) / 2,
)
.map_err(|e| Error::Image(image_path.to_owned(), e))?;
} else {
final_image = source_image.thumbnail(out_dimension, out_dimension);
}
Ok(final_image)
}
fn read(image_path: &Path) -> Result<DynamicImage, Error> {
match get_audio_format(image_path) {
Some(AudioFormat::AIFF) => read_aiff(image_path),
Some(AudioFormat::FLAC) => read_flac(image_path),
Some(AudioFormat::MP3) => read_mp3(image_path),
Some(AudioFormat::OGG) => read_vorbis(image_path),
Some(AudioFormat::OPUS) => read_opus(image_path),
Some(AudioFormat::WAVE) => read_wave(image_path),
Some(AudioFormat::APE) | Some(AudioFormat::MPC) => read_ape(image_path),
Some(AudioFormat::MP4) | Some(AudioFormat::M4B) => read_mp4(image_path),
None => image::open(image_path).map_err(|e| Error::Image(image_path.to_owned(), e)),
}
}
fn read_ape(_: &Path) -> Result<DynamicImage, Error> {
Err(Error::UnsupportedFormat("ape"))
}
fn read_flac(path: &Path) -> Result<DynamicImage, Error> {
let tag =
metaflac::Tag::read_from_path(path).map_err(|e| Error::Metaflac(path.to_owned(), e))?;
if let Some(p) = tag.pictures().next() {
return image::load_from_memory(&p.data).map_err(|e| Error::Image(path.to_owned(), e));
}
Err(Error::EmbeddedArtworkNotFound(path.to_owned()))
}
fn read_mp3(path: &Path) -> Result<DynamicImage, Error> {
let tag = id3::Tag::read_from_path(path).map_err(|e| Error::Id3(path.to_owned(), e))?;
read_id3(path, &tag)
}
fn read_aiff(path: &Path) -> Result<DynamicImage, Error> {
let tag = id3::Tag::read_from_path(path).map_err(|e| Error::Id3(path.to_owned(), e))?;
read_id3(path, &tag)
}
fn read_wave(path: &Path) -> Result<DynamicImage, Error> {
let tag = id3::Tag::read_from_path(path).map_err(|e| Error::Id3(path.to_owned(), e))?;
read_id3(path, &tag)
}
fn read_id3(path: &Path, tag: &id3::Tag) -> Result<DynamicImage, Error> {
tag.pictures()
.next()
.ok_or_else(|| Error::EmbeddedArtworkNotFound(path.to_owned()))
.and_then(|d| {
image::load_from_memory(&d.data).map_err(|e| Error::Image(path.to_owned(), e))
})
}
fn read_mp4(path: &Path) -> Result<DynamicImage, Error> {
let tag =
mp4ameta::Tag::read_from_path(path).map_err(|e| Error::Mp4aMeta(path.to_owned(), e))?;
tag.artwork()
.ok_or_else(|| Error::EmbeddedArtworkNotFound(path.to_owned()))
.and_then(|d| image::load_from_memory(d.data).map_err(|e| Error::Image(path.to_owned(), e)))
}
fn read_vorbis(_: &Path) -> Result<DynamicImage, Error> {
Err(Error::UnsupportedFormat("vorbis"))
}
fn read_opus(_: &Path) -> Result<DynamicImage, Error> {
Err(Error::UnsupportedFormat("opus"))
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn can_read_artwork_data() {
let ext_img = image::open("test-data/artwork/Folder.png")
.unwrap()
.to_rgb8();
let embedded_img = image::open("test-data/artwork/Embedded.png")
.unwrap()
.to_rgb8();
let folder_img = read(Path::new("test-data/artwork/Folder.png"))
.unwrap()
.to_rgb8();
assert_eq!(folder_img, ext_img);
let aiff_img = read(Path::new("test-data/artwork/sample.aif"))
.unwrap()
.to_rgb8();
assert_eq!(aiff_img, embedded_img);
let ape_img = read(Path::new("test-data/artwork/sample.ape"))
.map(|d| d.to_rgb8())
.ok();
assert_eq!(ape_img, None);
let flac_img = read(Path::new("test-data/artwork/sample.flac"))
.unwrap()
.to_rgb8();
assert_eq!(flac_img, embedded_img);
let mp3_img = read(Path::new("test-data/artwork/sample.mp3"))
.unwrap()
.to_rgb8();
assert_eq!(mp3_img, embedded_img);
let m4a_img = read(Path::new("test-data/artwork/sample.m4a"))
.unwrap()
.to_rgb8();
assert_eq!(m4a_img, embedded_img);
let ogg_img = read(Path::new("test-data/artwork/sample.ogg"))
.map(|d| d.to_rgb8())
.ok();
assert_eq!(ogg_img, None);
let opus_img = read(Path::new("test-data/artwork/sample.opus"))
.map(|d| d.to_rgb8())
.ok();
assert_eq!(opus_img, None);
let wave_img = read(Path::new("test-data/artwork/sample.wav"))
.unwrap()
.to_rgb8();
assert_eq!(wave_img, embedded_img);
}
}

View file

@ -1,425 +0,0 @@
use core::ops::Deref;
use diesel;
use diesel::prelude::*;
use regex::Regex;
use serde_json;
use std::fs;
use std::io::Read;
use std::path;
use toml;
use db::DB;
use db::ConnectionSource;
use db::{ddns_config, misc_settings, mount_points, users};
use ddns::DDNSConfig;
use errors::*;
use user::*;
use vfs::MountPoint;
#[derive(Debug, Queryable)]
pub struct MiscSettings {
id: i32,
pub auth_secret: String,
pub index_sleep_duration_seconds: i32,
pub index_album_art_pattern: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ConfigUser {
pub name: String,
pub password: String,
pub admin: bool,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Config {
pub album_art_pattern: Option<String>,
pub reindex_every_n_seconds: Option<i32>,
pub mount_dirs: Option<Vec<MountPoint>>,
pub users: Option<Vec<ConfigUser>>,
pub ydns: Option<DDNSConfig>,
}
impl Config {
fn clean_paths(&mut self) -> Result<()> {
if let Some(ref mut mount_dirs) = self.mount_dirs {
for mount_dir in mount_dirs {
match clean_path_string(&mount_dir.source).to_str() {
Some(p) => mount_dir.source = p.to_owned(),
_ => bail!("Bad mount directory path"),
}
}
}
Ok(())
}
}
pub fn parse_json(content: &str) -> Result<Config> {
let mut config = serde_json::from_str::<Config>(content)?;
config.clean_paths()?;
Ok(config)
}
pub fn parse_toml_file(path: &path::Path) -> Result<Config> {
println!("Config file path: {}", path.to_string_lossy());
let mut config_file = fs::File::open(path)?;
let mut config_file_content = String::new();
config_file.read_to_string(&mut config_file_content)?;
let mut config = toml::de::from_str::<Config>(&config_file_content)?;
config.clean_paths()?;
Ok(config)
}
pub fn read<T>(db: &T) -> Result<Config>
where T: ConnectionSource
{
use self::misc_settings::dsl::*;
use self::mount_points::dsl::*;
use self::ddns_config::dsl::*;
let connection = db.get_connection();
let mut config = Config {
album_art_pattern: None,
reindex_every_n_seconds: None,
mount_dirs: None,
users: None,
ydns: None,
};
let (art_pattern, sleep_duration) = misc_settings
.select((index_album_art_pattern, index_sleep_duration_seconds))
.get_result(connection.deref())?;
config.album_art_pattern = Some(art_pattern);
config.reindex_every_n_seconds = Some(sleep_duration);
let mount_dirs = mount_points
.select((source, name))
.get_results(connection.deref())?;
config.mount_dirs = Some(mount_dirs);
let found_users: Vec<(String, i32)> = users::table
.select((users::columns::name, users::columns::admin))
.get_results(connection.deref())?;
config.users = Some(found_users
.into_iter()
.map(|(n, a)| {
ConfigUser {
name: n,
password: "".to_owned(),
admin: a != 0,
}
})
.collect::<_>());
let ydns = ddns_config
.select((host, username, password))
.get_result(connection.deref())?;
config.ydns = Some(ydns);
Ok(config)
}
fn reset<T>(db: &T) -> Result<()>
where T: ConnectionSource
{
use self::ddns_config::dsl::*;
let connection = db.get_connection();
diesel::delete(mount_points::table)
.execute(connection.deref())?;
diesel::delete(users::table).execute(connection.deref())?;
diesel::update(ddns_config)
.set((host.eq(""), username.eq(""), password.eq("")))
.execute(connection.deref())?;
Ok(())
}
pub fn overwrite<T>(db: &T, new_config: &Config) -> Result<()>
where T: ConnectionSource
{
reset(db)?;
amend(db, new_config)
}
pub fn amend<T>(db: &T, new_config: &Config) -> Result<()>
where T: ConnectionSource
{
let connection = db.get_connection();
if let Some(ref mount_dirs) = new_config.mount_dirs {
diesel::delete(mount_points::table)
.execute(connection.deref())?;
diesel::insert(mount_dirs)
.into(mount_points::table)
.execute(connection.deref())?;
}
if let Some(ref config_users) = new_config.users {
let old_usernames: Vec<String> = users::table
.select(users::name)
.get_results(connection.deref())?;
// Delete users that are not in new list
// Delete users that have a new password
let delete_usernames: Vec<String> = old_usernames
.into_iter()
.filter(|old_name| match config_users.iter().find(|u| &u.name == old_name) {
None => true,
Some(new_user) => !new_user.password.is_empty(),
})
.collect::<_>();
diesel::delete(users::table.filter(users::name.eq_any(&delete_usernames)))
.execute(connection.deref())?;
// Insert users that have a new password
let insert_users: Vec<&ConfigUser> = config_users
.iter()
.filter(|u| !u.password.is_empty())
.collect::<_>();
for ref config_user in insert_users {
let new_user = User::new(&config_user.name, &config_user.password, config_user.admin);
diesel::insert(&new_user)
.into(users::table)
.execute(connection.deref())?;
}
// Grant admin rights
for ref user in config_users {
diesel::update(users::table.filter(users::name.eq(&user.name)))
.set(users::admin.eq(user.admin as i32))
.execute(connection.deref())?;
}
}
if let Some(sleep_duration) = new_config.reindex_every_n_seconds {
diesel::update(misc_settings::table)
.set(misc_settings::index_sleep_duration_seconds.eq(sleep_duration as i32))
.execute(connection.deref())?;
}
if let Some(ref album_art_pattern) = new_config.album_art_pattern {
diesel::update(misc_settings::table)
.set(misc_settings::index_album_art_pattern.eq(album_art_pattern))
.execute(connection.deref())?;
}
if let Some(ref ydns) = new_config.ydns {
use self::ddns_config::dsl::*;
diesel::update(ddns_config)
.set((host.eq(ydns.host.clone()),
username.eq(ydns.username.clone()),
password.eq(ydns.password.clone())))
.execute(connection.deref())?;
}
Ok(())
}
fn clean_path_string(path_string: &str) -> path::PathBuf {
let separator_regex = Regex::new(r"\\|/").unwrap();
let mut correct_separator = String::new();
correct_separator.push(path::MAIN_SEPARATOR);
let path_string = separator_regex.replace_all(path_string, correct_separator.as_str());
path::Path::new(path_string.deref()).iter().collect()
}
fn _get_test_db(name: &str) -> DB {
let mut db_path = path::PathBuf::new();
db_path.push("test");
db_path.push(name);
if db_path.exists() {
fs::remove_file(&db_path).unwrap();
}
let db = DB::new(&db_path).unwrap();
db
}
#[test]
fn test_amend() {
let db = _get_test_db("amend.sqlite");
let initial_config = Config {
album_art_pattern: Some("file\\.png".into()),
reindex_every_n_seconds: Some(123),
mount_dirs: Some(vec![MountPoint {
source: "C:\\Music".into(),
name: "root".into(),
}]),
users: Some(vec![ConfigUser {
name: "Teddy🐻".into(),
password: "Tasty🍖".into(),
admin: false,
}]),
ydns: None,
};
let new_config = Config {
album_art_pattern: Some("🖼️\\.jpg".into()),
reindex_every_n_seconds: None,
mount_dirs: Some(vec![MountPoint {
source: "/home/music".into(),
name: "🎵📁".into(),
}]),
users: Some(vec![ConfigUser {
name: "Kermit🐸".into(),
password: "🐞🐞".into(),
admin: false,
}]),
ydns: Some(DDNSConfig {
host: "🐸🐸🐸.ydns.eu".into(),
username: "kfr🐸g".into(),
password: "tasty🐞".into(),
}),
};
let mut expected_config = new_config.clone();
expected_config.reindex_every_n_seconds = initial_config.reindex_every_n_seconds;
if let Some(ref mut users) = expected_config.users {
users[0].password = "".into();
}
amend(&db, &initial_config).unwrap();
amend(&db, &new_config).unwrap();
let db_config = read(&db).unwrap();
assert_eq!(db_config, expected_config);
}
#[test]
fn test_amend_preserve_password_hashes() {
use self::users::dsl::*;
let db = _get_test_db("amend_preserve_password_hashes.sqlite");
let initial_hash: Vec<u8>;
let new_hash: Vec<u8>;
let initial_config = Config {
album_art_pattern: None,
reindex_every_n_seconds: None,
mount_dirs: None,
users: Some(vec![ConfigUser {
name: "Teddy🐻".into(),
password: "Tasty🍖".into(),
admin: false,
}]),
ydns: None,
};
amend(&db, &initial_config).unwrap();
{
let connection = db.get_connection();
initial_hash = users
.select(password_hash)
.filter(name.eq("Teddy🐻"))
.get_result(connection.deref())
.unwrap();
}
let new_config = Config {
album_art_pattern: None,
reindex_every_n_seconds: None,
mount_dirs: None,
users: Some(vec![ConfigUser {
name: "Kermit🐸".into(),
password: "tasty🐞".into(),
admin: false,
},
ConfigUser {
name: "Teddy🐻".into(),
password: "".into(),
admin: false,
}]),
ydns: None,
};
amend(&db, &new_config).unwrap();
{
let connection = db.get_connection();
new_hash = users
.select(password_hash)
.filter(name.eq("Teddy🐻"))
.get_result(connection.deref())
.unwrap();
}
assert_eq!(new_hash, initial_hash);
}
#[test]
fn test_toggle_admin() {
use self::users::dsl::*;
let db = _get_test_db("amend_toggle_admin.sqlite");
let initial_config = Config {
album_art_pattern: None,
reindex_every_n_seconds: None,
mount_dirs: None,
users: Some(vec![ConfigUser {
name: "Teddy🐻".into(),
password: "Tasty🍖".into(),
admin: true,
}]),
ydns: None,
};
amend(&db, &initial_config).unwrap();
{
let connection = db.get_connection();
let is_admin: i32 = users
.select(admin)
.get_result(connection.deref())
.unwrap();
assert_eq!(is_admin, 1);
}
let new_config = Config {
album_art_pattern: None,
reindex_every_n_seconds: None,
mount_dirs: None,
users: Some(vec![ConfigUser {
name: "Teddy🐻".into(),
password: "".into(),
admin: false,
}]),
ydns: None,
};
amend(&db, &new_config).unwrap();
{
let connection = db.get_connection();
let is_admin: i32 = users
.select(admin)
.get_result(connection.deref())
.unwrap();
assert_eq!(is_admin, 0);
}
}
#[test]
fn test_clean_path_string() {
let mut correct_path = path::PathBuf::new();
if cfg!(target_os = "windows") {
correct_path.push("C:\\");
} else {
correct_path.push("/usr");
}
correct_path.push("some");
correct_path.push("path");
if cfg!(target_os = "windows") {
assert_eq!(correct_path, clean_path_string(r#"C:/some/path"#));
assert_eq!(correct_path, clean_path_string(r#"C:\some\path"#));
assert_eq!(correct_path, clean_path_string(r#"C:\some\path\"#));
assert_eq!(correct_path, clean_path_string(r#"C:\some\path\\\\"#));
assert_eq!(correct_path, clean_path_string(r#"C:\some/path//"#));
} else {
assert_eq!(correct_path, clean_path_string(r#"/usr/some/path"#));
assert_eq!(correct_path, clean_path_string(r#"/usr\some\path"#));
assert_eq!(correct_path, clean_path_string(r#"/usr\some\path\"#));
assert_eq!(correct_path, clean_path_string(r#"/usr\some\path\\\\"#));
assert_eq!(correct_path, clean_path_string(r#"/usr\some/path//"#));
}
}

View file

@ -1,2 +0,0 @@
DROP TABLE directories;
DROP TABLE songs;

View file

@ -1,25 +0,0 @@
CREATE TABLE directories (
id INTEGER PRIMARY KEY NOT NULL,
path TEXT NOT NULL,
parent TEXT,
artist TEXT,
year INTEGER,
album TEXT,
artwork TEXT,
UNIQUE(path) ON CONFLICT REPLACE
);
CREATE TABLE songs (
id INTEGER PRIMARY KEY NOT NULL,
path TEXT NOT NULL,
parent TEXT NOT NULL,
track_number INTEGER,
disc_number INTEGER,
title TEXT,
artist TEXT,
album_artist TEXT,
year INTEGER,
album TEXT,
artwork TEXT,
UNIQUE(path) ON CONFLICT REPLACE
);

View file

@ -1,15 +0,0 @@
CREATE TEMPORARY TABLE directories_backup(id, path, parent, artist, year, album, artwork);
INSERT INTO directories_backup SELECT id, path, parent, artist, year, album, artwork FROM directories;
DROP TABLE directories;
CREATE TABLE directories (
id INTEGER PRIMARY KEY NOT NULL,
path TEXT NOT NULL,
parent TEXT,
artist TEXT,
year INTEGER,
album TEXT,
artwork TEXT,
UNIQUE(path) ON CONFLICT REPLACE
);
INSERT INTO directories SELECT * FROM directories_backup;
DROP TABLE directories_backup;

View file

@ -1 +0,0 @@
ALTER TABLE directories ADD COLUMN date_added INTEGER DEFAULT 0 NOT NULL;

View file

@ -1 +0,0 @@
DROP TABLE users;

View file

@ -1,8 +0,0 @@
CREATE TABLE users (
id INTEGER PRIMARY KEY NOT NULL,
name TEXT NOT NULL,
password_salt BLOB NOT NULL,
password_hash BLOB NOT NULL,
admin INTEGER NOT NULL,
UNIQUE(name)
);

View file

@ -1 +0,0 @@
DROP TABLE misc_settings;

View file

@ -1,7 +0,0 @@
CREATE TABLE misc_settings (
id INTEGER PRIMARY KEY NOT NULL CHECK(id = 0),
auth_secret TEXT NOT NULL,
index_sleep_duration_seconds INTEGER NOT NULL,
index_album_art_pattern TEXT NOT NULL
);
INSERT INTO misc_settings (id, auth_secret, index_sleep_duration_seconds, index_album_art_pattern) VALUES (0, hex(randomblob(64)), 1800, "Folder.(jpg|png)");

View file

@ -1 +0,0 @@
DROP TABLE ddns_config;

View file

@ -1,8 +0,0 @@
CREATE TABLE ddns_config (
id INTEGER PRIMARY KEY NOT NULL CHECK(id = 0),
host TEXT NOT NULL,
username TEXT NOT NULL,
password TEXT NOT NULL
);
INSERT INTO ddns_config (id, host, username, password) VALUES (0, "", "", "");

View file

@ -1 +0,0 @@
DROP TABLE mount_points;

View file

@ -1,6 +0,0 @@
CREATE TABLE mount_points (
id INTEGER PRIMARY KEY NOT NULL,
source TEXT NOT NULL,
name TEXT NOT NULL,
UNIQUE(name)
);

View file

@ -1,2 +0,0 @@
DROP TABLE playlists;
DROP TABLE playlist_songs;

View file

@ -1,16 +0,0 @@
CREATE TABLE playlists (
id INTEGER PRIMARY KEY NOT NULL,
owner INTEGER NOT NULL,
name TEXT NOT NULL,
FOREIGN KEY(owner) REFERENCES users(id) ON DELETE CASCADE,
UNIQUE(owner, name) ON CONFLICT REPLACE
);
CREATE TABLE playlist_songs (
id INTEGER PRIMARY KEY NOT NULL,
playlist INTEGER NOT NULL,
path TEXT NOT NULL,
ordering INTEGER NOT NULL,
FOREIGN KEY(playlist) REFERENCES playlists(id) ON DELETE CASCADE ON UPDATE CASCADE,
UNIQUE(playlist, ordering) ON CONFLICT REPLACE
);

View file

@ -1,106 +0,0 @@
use core::ops::Deref;
use diesel;
use diesel::prelude::*;
use diesel::sqlite::SqliteConnection;
use std::fs;
use std::path::{Path, PathBuf};
use std::sync::{Arc, Mutex, MutexGuard};
use errors::*;
mod schema;
pub use self::schema::*;
#[allow(dead_code)]
const DB_MIGRATIONS_PATH: &'static str = "src/db/migrations";
embed_migrations!("src/db/migrations");
pub trait ConnectionSource {
fn get_connection(&self) -> MutexGuard<SqliteConnection>;
fn get_connection_mutex(&self) -> Arc<Mutex<SqliteConnection>>;
}
pub struct DB {
connection: Arc<Mutex<SqliteConnection>>,
}
impl DB {
pub fn new(path: &Path) -> Result<DB> {
println!("Database file path: {}", path.to_string_lossy());
let connection =
Arc::new(Mutex::new(SqliteConnection::establish(&path.to_string_lossy())?));
let db = DB { connection: connection.clone() };
db.init()?;
Ok(db)
}
fn init(&self) -> Result<()> {
{
let connection = self.connection.lock().unwrap();
connection.execute("PRAGMA synchronous = NORMAL")?;
}
self.migrate_up()?;
Ok(())
}
#[allow(dead_code)]
fn migrate_down(&self) -> Result<()> {
let connection = self.connection.lock().unwrap();
let connection = connection.deref();
loop {
match diesel::migrations::revert_latest_migration_in_directory(connection, Path::new(DB_MIGRATIONS_PATH)) {
Ok(_) => (),
Err(diesel::migrations::RunMigrationsError::MigrationError(diesel::migrations::MigrationError::NoMigrationRun)) => break,
Err(e) => bail!(e),
}
}
Ok(())
}
fn migrate_up(&self) -> Result<()> {
let connection = self.connection.lock().unwrap();
let connection = connection.deref();
embedded_migrations::run(connection)?;
Ok(())
}
}
impl ConnectionSource for DB {
fn get_connection(&self) -> MutexGuard<SqliteConnection> {
self.connection.lock().unwrap()
}
fn get_connection_mutex(&self) -> Arc<Mutex<SqliteConnection>> {
self.connection.clone()
}
}
pub fn _get_test_db(name: &str) -> DB {
use config;
let config_path = Path::new("test/config.toml");
let config = config::parse_toml_file(&config_path).unwrap();
let mut db_path = PathBuf::new();
db_path.push("test");
db_path.push(name);
if db_path.exists() {
fs::remove_file(&db_path).unwrap();
}
let db = DB::new(&db_path).unwrap();
config::overwrite(&db, &config).unwrap();
db
}
#[test]
fn test_migrations_up() {
_get_test_db("migrations_up.sqlite");
}
#[test]
fn test_migrations_down() {
let db = _get_test_db("migrations_down.sqlite");
db.migrate_down().unwrap();
db.migrate_up().unwrap();
}

View file

@ -1 +0,0 @@
infer_schema!("src/db/schema.sqlite");

View file

@ -1,98 +0,0 @@
use core::ops::Deref;
use diesel::prelude::*;
use reqwest;
use reqwest::header::{Authorization, Basic};
use std::io;
use std::thread;
use std::time;
use db::{ConnectionSource, DB};
use db::ddns_config;
use errors;
#[derive(Clone, Debug, Deserialize, Insertable, PartialEq, Queryable, Serialize)]
#[table_name="ddns_config"]
pub struct DDNSConfig {
pub host: String,
pub username: String,
pub password: String,
}
pub trait DDNSConfigSource {
fn get_ddns_config(&self) -> errors::Result<DDNSConfig>;
}
impl DDNSConfigSource for DB {
fn get_ddns_config(&self) -> errors::Result<DDNSConfig> {
use self::ddns_config::dsl::*;
let connection = self.get_connection();
Ok(ddns_config
.select((host, username, password))
.get_result(connection.deref())?)
}
}
#[derive(Debug)]
enum DDNSError {
InternalError(errors::Error),
IoError(io::Error),
ReqwestError(reqwest::Error),
UpdateError(reqwest::StatusCode),
}
impl From<io::Error> for DDNSError {
fn from(err: io::Error) -> DDNSError {
DDNSError::IoError(err)
}
}
impl From<errors::Error> for DDNSError {
fn from(err: errors::Error) -> DDNSError {
DDNSError::InternalError(err)
}
}
impl From<reqwest::Error> for DDNSError {
fn from(err: reqwest::Error) -> DDNSError {
DDNSError::ReqwestError(err)
}
}
const DDNS_UPDATE_URL: &'static str = "https://ydns.io/api/v1/update/";
fn update_my_ip<T>(config_source: &T) -> Result<(), DDNSError>
where T: DDNSConfigSource
{
let config = config_source.get_ddns_config()?;
if config.host.len() == 0 || config.username.len() == 0 {
println!("Skipping DDNS update because credentials are missing");
return Ok(());
}
let full_url = format!("{}?host={}", DDNS_UPDATE_URL, &config.host);
let auth_header = Authorization(Basic {
username: config.username.clone(),
password: Some(config.password.to_owned()),
});
let client = reqwest::Client::new()?;
let res = client
.get(full_url.as_str())
.header(auth_header)
.send()?;
if !res.status().is_success() {
return Err(DDNSError::UpdateError(*res.status()));
}
Ok(())
}
pub fn run<T>(config_source: &T)
where T: DDNSConfigSource
{
loop {
if let Err(e) = update_my_ip(config_source) {
println!("Dynamic DNS update error: {:?}", e);
}
thread::sleep(time::Duration::from_secs(60 * 30));
}
}

View file

@ -1,72 +0,0 @@
use ape;
use core;
use diesel;
use id3;
use getopts;
use image;
use hyper;
use iron::IronError;
use iron::status::Status;
use lewton;
use metaflac;
use regex;
use serde_json;
use std;
use toml;
error_chain! {
foreign_links {
Ape(ape::Error);
Diesel(diesel::result::Error);
DieselConnection(diesel::ConnectionError);
DieselMigration(diesel::migrations::RunMigrationsError);
Encoding(core::str::Utf8Error);
Flac(metaflac::Error);
GetOpts(getopts::Fail);
Hyper(hyper::Error);
Id3(id3::Error);
Image(image::ImageError);
Io(std::io::Error);
Json(serde_json::Error);
Time(std::time::SystemTimeError);
Toml(toml::de::Error);
Regex(regex::Error);
Vorbis(lewton::VorbisError);
}
errors {
DaemonError {}
AuthenticationRequired {}
AdminPrivilegeRequired {}
MissingConfig {}
MissingUsername {}
MissingPassword {}
MissingPlaylist {}
IncorrectCredentials {}
CannotServeDirectory {}
UnsupportedFileType {}
FileNotFound {}
MissingIndexVersion {}
MissingPlaylistName {}
EncodingError {}
}
}
impl From<Error> for IronError {
fn from(err: Error) -> IronError {
match err {
e @ Error(ErrorKind::AuthenticationRequired, _) => {
IronError::new(e, Status::Unauthorized)
}
e @ Error(ErrorKind::AdminPrivilegeRequired, _) => IronError::new(e, Status::Forbidden),
e @ Error(ErrorKind::MissingUsername, _) => IronError::new(e, Status::BadRequest),
e @ Error(ErrorKind::MissingPassword, _) => IronError::new(e, Status::BadRequest),
e @ Error(ErrorKind::IncorrectCredentials, _) => {
IronError::new(e, Status::Unauthorized)
}
e @ Error(ErrorKind::CannotServeDirectory, _) => IronError::new(e, Status::BadRequest),
e @ Error(ErrorKind::UnsupportedFileType, _) => IronError::new(e, Status::BadRequest),
e => IronError::new(e, Status::InternalServerError),
}
}
}

View file

@ -1,694 +0,0 @@
use core::ops::Deref;
use diesel;
use diesel::expression::sql;
use diesel::prelude::*;
use diesel::sqlite::SqliteConnection;
use diesel::types;
use regex::Regex;
use std::fs;
use std::path::Path;
#[cfg(test)]
use std::path::PathBuf;
use std::sync::{Arc, Mutex};
use std::sync::mpsc::*;
use std::thread;
use std::time;
use config::MiscSettings;
#[cfg(test)]
use db;
use db::ConnectionSource;
use db::{directories, misc_settings, songs};
use vfs::{VFS, VFSSource};
use errors::*;
use metadata;
const INDEX_BUILDING_INSERT_BUFFER_SIZE: usize = 1000; // Insertions in each transaction
const INDEX_BUILDING_CLEAN_BUFFER_SIZE: usize = 500; // Insertions in each transaction
no_arg_sql_function!(random,
types::Integer,
"Represents the SQL RANDOM() function");
pub enum Command {
REINDEX,
}
#[derive(Debug, Queryable, Serialize)]
pub struct Song {
#[serde(skip_serializing)]
id: i32,
pub path: String,
#[serde(skip_serializing)]
pub parent: String,
pub track_number: Option<i32>,
pub disc_number: Option<i32>,
pub title: Option<String>,
pub artist: Option<String>,
pub album_artist: Option<String>,
pub year: Option<i32>,
pub album: Option<String>,
pub artwork: Option<String>,
}
#[derive(Debug, Queryable, Serialize)]
pub struct Directory {
#[serde(skip_serializing)]
id: i32,
pub path: String,
#[serde(skip_serializing)]
pub parent: Option<String>,
pub artist: Option<String>,
pub year: Option<i32>,
pub album: Option<String>,
pub artwork: Option<String>,
pub date_added: i32,
}
#[derive(Debug, Serialize)]
pub enum CollectionFile {
Directory(Directory),
Song(Song),
}
#[derive(Debug, Insertable)]
#[table_name="songs"]
struct NewSong {
path: String,
parent: String,
track_number: Option<i32>,
disc_number: Option<i32>,
title: Option<String>,
artist: Option<String>,
album_artist: Option<String>,
year: Option<i32>,
album: Option<String>,
artwork: Option<String>,
}
#[derive(Debug, Insertable)]
#[table_name="directories"]
struct NewDirectory {
path: String,
parent: Option<String>,
artist: Option<String>,
year: Option<i32>,
album: Option<String>,
artwork: Option<String>,
date_added: i32,
}
struct IndexBuilder<'conn> {
new_songs: Vec<NewSong>,
new_directories: Vec<NewDirectory>,
connection: &'conn Mutex<SqliteConnection>,
album_art_pattern: Regex,
}
impl<'conn> IndexBuilder<'conn> {
fn new(connection: &Mutex<SqliteConnection>, album_art_pattern: Regex) -> Result<IndexBuilder> {
let mut new_songs = Vec::new();
let mut new_directories = Vec::new();
new_songs.reserve_exact(INDEX_BUILDING_INSERT_BUFFER_SIZE);
new_directories.reserve_exact(INDEX_BUILDING_INSERT_BUFFER_SIZE);
Ok(IndexBuilder {
new_songs: new_songs,
new_directories: new_directories,
connection: connection,
album_art_pattern: album_art_pattern,
})
}
fn flush_songs(&mut self) -> Result<()> {
let connection = self.connection.lock().unwrap();
let connection = connection.deref();
connection
.transaction::<_, Error, _>(|| {
diesel::insert(&self.new_songs)
.into(songs::table)
.execute(connection)?;
Ok(())
})?;
self.new_songs.clear();
Ok(())
}
fn flush_directories(&mut self) -> Result<()> {
let connection = self.connection.lock().unwrap();
let connection = connection.deref();
connection
.transaction::<_, Error, _>(|| {
diesel::insert(&self.new_directories)
.into(directories::table)
.execute(connection)?;
Ok(())
})?;
self.new_directories.clear();
Ok(())
}
fn push_song(&mut self, song: NewSong) -> Result<()> {
if self.new_songs.len() >= self.new_songs.capacity() {
self.flush_songs()?;
}
self.new_songs.push(song);
Ok(())
}
fn push_directory(&mut self, directory: NewDirectory) -> Result<()> {
if self.new_directories.len() >= self.new_directories.capacity() {
self.flush_directories()?;
}
self.new_directories.push(directory);
Ok(())
}
fn get_artwork(&self, dir: &Path) -> Result<Option<String>> {
for file in fs::read_dir(dir)? {
let file = file?;
if let Some(name_string) = file.file_name().to_str() {
if self.album_art_pattern.is_match(name_string) {
return Ok(file.path().to_str().map(|p| p.to_owned()));
}
}
}
Ok(None)
}
fn populate_directory(&mut self, parent: Option<&Path>, path: &Path) -> Result<()> {
// Find artwork
let artwork = self.get_artwork(path).unwrap_or(None);
// Extract path and parent path
let parent_string = parent.and_then(|p| p.to_str()).map(|s| s.to_owned());
let path_string = path.to_str().ok_or("Invalid directory path")?;
// Find date added
let metadata = fs::metadata(path_string)?;
let created = metadata
.created()
.or(metadata.modified())?
.duration_since(time::UNIX_EPOCH)?
.as_secs() as i32;
let mut directory_album = None;
let mut directory_year = None;
let mut directory_artist = None;
let mut inconsistent_directory_album = false;
let mut inconsistent_directory_year = false;
let mut inconsistent_directory_artist = false;
// Sub directories
let mut sub_directories = Vec::new();
// Insert content
for file in fs::read_dir(path)? {
let file_path = match file {
Ok(f) => f.path(),
_ => {
println!("File read error within {}", path_string);
break;
}
};
if file_path.is_dir() {
sub_directories.push(file_path.to_path_buf());
continue;
}
if let Some(file_path_string) = file_path.to_str() {
if let Ok(tags) = metadata::read(file_path.as_path()) {
if tags.year.is_some() {
inconsistent_directory_year |= directory_year.is_some() &&
directory_year != tags.year;
directory_year = tags.year;
}
if tags.album.is_some() {
inconsistent_directory_album |= directory_album.is_some() &&
directory_album != tags.album;
directory_album = tags.album.as_ref().map(|a| a.clone());
}
if tags.album_artist.is_some() {
inconsistent_directory_artist |= directory_artist.is_some() &&
directory_artist != tags.album_artist;
directory_artist = tags.album_artist.as_ref().map(|a| a.clone());
} else if tags.artist.is_some() {
inconsistent_directory_artist |= directory_artist.is_some() &&
directory_artist != tags.artist;
directory_artist = tags.artist.as_ref().map(|a| a.clone());
}
let song = NewSong {
path: file_path_string.to_owned(),
parent: path_string.to_owned(),
disc_number: tags.disc_number.map(|n| n as i32),
track_number: tags.track_number.map(|n| n as i32),
title: tags.title,
artist: tags.artist,
album_artist: tags.album_artist,
album: tags.album,
year: tags.year,
artwork: artwork.as_ref().map(|s| s.to_owned()),
};
self.push_song(song)?;
}
}
}
// Insert directory
if inconsistent_directory_year {
directory_year = None;
}
if inconsistent_directory_album {
directory_album = None;
}
if inconsistent_directory_artist {
directory_artist = None;
}
let directory = NewDirectory {
path: path_string.to_owned(),
parent: parent_string,
artwork: artwork,
album: directory_album,
artist: directory_artist,
year: directory_year,
date_added: created,
};
self.push_directory(directory)?;
// Populate subdirectories
for sub_directory in sub_directories {
self.populate_directory(Some(path), &sub_directory)?;
}
Ok(())
}
}
fn clean<T>(db: &T) -> Result<()>
where T: ConnectionSource + VFSSource
{
let vfs = db.get_vfs()?;
{
let all_songs: Vec<String>;
{
let connection = db.get_connection();
all_songs = songs::table
.select(songs::path)
.load(connection.deref())?;
}
let missing_songs = all_songs
.into_iter()
.filter(|ref song_path| {
let path = Path::new(&song_path);
!path.exists() || vfs.real_to_virtual(path).is_err()
})
.collect::<Vec<_>>();
{
let connection = db.get_connection();
for chunk in missing_songs[..].chunks(INDEX_BUILDING_CLEAN_BUFFER_SIZE) {
diesel::delete(songs::table.filter(songs::path.eq_any(chunk)))
.execute(connection.deref())?;
}
}
}
{
let all_directories: Vec<String>;
{
let connection = db.get_connection();
all_directories = directories::table
.select(directories::path)
.load(connection.deref())?;
}
let missing_directories = all_directories
.into_iter()
.filter(|ref directory_path| {
let path = Path::new(&directory_path);
!path.exists() || vfs.real_to_virtual(path).is_err()
})
.collect::<Vec<_>>();
{
let connection = db.get_connection();
for chunk in missing_directories[..].chunks(INDEX_BUILDING_CLEAN_BUFFER_SIZE) {
diesel::delete(directories::table.filter(directories::path.eq_any(chunk)))
.execute(connection.deref())?;
}
}
}
Ok(())
}
fn populate<T>(db: &T) -> Result<()>
where T: ConnectionSource + VFSSource
{
let vfs = db.get_vfs()?;
let mount_points = vfs.get_mount_points();
let album_art_pattern;
{
let connection = db.get_connection();
let settings: MiscSettings = misc_settings::table.get_result(connection.deref())?;
album_art_pattern = Regex::new(&settings.index_album_art_pattern)?;
}
let connection_mutex = db.get_connection_mutex();
let mut builder = IndexBuilder::new(connection_mutex.deref(), album_art_pattern)?;
for (_, target) in mount_points {
builder.populate_directory(None, target.as_path())?;
}
builder.flush_songs()?;
builder.flush_directories()?;
Ok(())
}
pub fn update<T>(db: &T) -> Result<()>
where T: ConnectionSource + VFSSource
{
let start = time::Instant::now();
println!("Beginning library index update");
clean(db)?;
populate(db)?;
println!("Library index update took {} seconds",
start.elapsed().as_secs());
Ok(())
}
pub fn update_loop<T>(db: &T, command_buffer: Receiver<Command>)
where T: ConnectionSource + VFSSource
{
loop {
// Wait for a command
if let Err(e) = command_buffer.recv() {
println!("Error while waiting on index command buffer: {}", e);
return;
}
// Flush the buffer to ignore spammy requests
loop {
match command_buffer.try_recv() {
Err(TryRecvError::Disconnected) => {
println!("Error while flushing index command buffer");
return;
}
Err(TryRecvError::Empty) => break,
Ok(_) => (),
}
}
// Do the update
if let Err(e) = update(db) {
println!("Error while updating index: {}", e);
}
}
}
pub fn self_trigger<T>(db: &T, command_buffer: Arc<Mutex<Sender<Command>>>)
where T: ConnectionSource
{
loop {
{
let command_buffer = command_buffer.lock().unwrap();
let command_buffer = command_buffer.deref();
if let Err(e) = command_buffer.send(Command::REINDEX) {
println!("Error while writing to index command buffer: {}", e);
return;
}
}
let sleep_duration;
{
let connection = db.get_connection();
let settings: Result<MiscSettings> = misc_settings::table
.get_result(connection.deref())
.map_err(|e| e.into());
if let Err(ref e) = settings {
println!("Could not retrieve index sleep duration: {}", e);
}
sleep_duration = settings
.map(|s| s.index_sleep_duration_seconds)
.unwrap_or(1800);
}
thread::sleep(time::Duration::from_secs(sleep_duration as u64));
}
}
pub fn virtualize_song(vfs: &VFS, mut song: Song) -> Option<Song> {
song.path = match vfs.real_to_virtual(Path::new(&song.path)) {
Ok(p) => p.to_string_lossy().into_owned(),
_ => return None,
};
if let Some(artwork_path) = song.artwork {
song.artwork = match vfs.real_to_virtual(Path::new(&artwork_path)) {
Ok(p) => Some(p.to_string_lossy().into_owned()),
_ => None,
};
}
Some(song)
}
fn virtualize_directory(vfs: &VFS, mut directory: Directory) -> Option<Directory> {
directory.path = match vfs.real_to_virtual(Path::new(&directory.path)) {
Ok(p) => p.to_string_lossy().into_owned(),
_ => return None,
};
if let Some(artwork_path) = directory.artwork {
directory.artwork = match vfs.real_to_virtual(Path::new(&artwork_path)) {
Ok(p) => Some(p.to_string_lossy().into_owned()),
_ => None,
};
}
Some(directory)
}
pub fn browse<T>(db: &T, virtual_path: &Path) -> Result<Vec<CollectionFile>>
where T: ConnectionSource + VFSSource
{
let mut output = Vec::new();
let vfs = db.get_vfs()?;
let connection = db.get_connection();
if virtual_path.components().count() == 0 {
// Browse top-level
let real_directories: Vec<Directory> = directories::table
.filter(directories::parent.is_null())
.load(connection.deref())?;
let virtual_directories = real_directories
.into_iter()
.filter_map(|s| virtualize_directory(&vfs, s));
output.extend(virtual_directories
.into_iter()
.map(|d| CollectionFile::Directory(d)));
} else {
// Browse sub-directory
let real_path = vfs.virtual_to_real(virtual_path)?;
let real_path_string = real_path.as_path().to_string_lossy().into_owned();
let real_directories: Vec<Directory> = directories::table
.filter(directories::parent.eq(&real_path_string))
.order(sql::<types::Bool>("path COLLATE NOCASE ASC"))
.load(connection.deref())?;
let virtual_directories = real_directories
.into_iter()
.filter_map(|s| virtualize_directory(&vfs, s));
output.extend(virtual_directories.map(|d| CollectionFile::Directory(d)));
let real_songs: Vec<Song> = songs::table
.filter(songs::parent.eq(&real_path_string))
.order(sql::<types::Bool>("path COLLATE NOCASE ASC"))
.load(connection.deref())?;
let virtual_songs = real_songs
.into_iter()
.filter_map(|s| virtualize_song(&vfs, s));
output.extend(virtual_songs.map(|s| CollectionFile::Song(s)));
}
Ok(output)
}
pub fn flatten<T>(db: &T, virtual_path: &Path) -> Result<Vec<Song>>
where T: ConnectionSource + VFSSource
{
use self::songs::dsl::*;
let vfs = db.get_vfs()?;
let connection = db.get_connection();
let real_songs: Vec<Song> = if virtual_path.parent() != None {
let real_path = vfs.virtual_to_real(virtual_path)?;
let like_path = real_path.as_path().to_string_lossy().into_owned() + "%";
songs
.filter(path.like(&like_path))
.order(path)
.load(connection.deref())?
} else {
songs.order(path).load(connection.deref())?
};
let virtual_songs = real_songs
.into_iter()
.filter_map(|s| virtualize_song(&vfs, s));
Ok(virtual_songs.collect::<Vec<_>>())
}
pub fn get_random_albums<T>(db: &T, count: i64) -> Result<Vec<Directory>>
where T: ConnectionSource + VFSSource
{
use self::directories::dsl::*;
let vfs = db.get_vfs()?;
let connection = db.get_connection();
let real_directories = directories
.filter(album.is_not_null())
.limit(count)
.order(random)
.load(connection.deref())?;
let virtual_directories = real_directories
.into_iter()
.filter_map(|s| virtualize_directory(&vfs, s));
Ok(virtual_directories.collect::<Vec<_>>())
}
pub fn get_recent_albums<T>(db: &T, count: i64) -> Result<Vec<Directory>>
where T: ConnectionSource + VFSSource
{
use self::directories::dsl::*;
let vfs = db.get_vfs()?;
let connection = db.get_connection();
let real_directories: Vec<Directory> = directories
.filter(album.is_not_null())
.order(date_added.desc())
.limit(count)
.load(connection.deref())?;
let virtual_directories = real_directories
.into_iter()
.filter_map(|s| virtualize_directory(&vfs, s));
Ok(virtual_directories.collect::<Vec<_>>())
}
#[test]
fn test_populate() {
let db = db::_get_test_db("populate.sqlite");
update(&db).unwrap();
update(&db).unwrap(); // Check that subsequent updates don't run into conflicts
let connection = db.get_connection();
let all_directories: Vec<Directory> = directories::table.load(connection.deref()).unwrap();
let all_songs: Vec<Song> = songs::table.load(connection.deref()).unwrap();
assert_eq!(all_directories.len(), 5);
assert_eq!(all_songs.len(), 12);
}
#[test]
fn test_metadata() {
let mut target = PathBuf::new();
target.push("test");
target.push("collection");
target.push("Tobokegao");
target.push("Picnic");
let mut song_path = target.clone();
song_path.push("05 - シャーベット (Sherbet).mp3");
let mut artwork_path = target.clone();
artwork_path.push("Folder.png");
let db = db::_get_test_db("metadata.sqlite");
update(&db).unwrap();
let connection = db.get_connection();
let songs: Vec<Song> = songs::table
.filter(songs::title.eq("シャーベット (Sherbet)"))
.load(connection.deref())
.unwrap();
assert_eq!(songs.len(), 1);
let song = &songs[0];
assert_eq!(song.path, song_path.to_string_lossy().as_ref());
assert_eq!(song.track_number, Some(5));
assert_eq!(song.disc_number, None);
assert_eq!(song.title, Some("シャーベット (Sherbet)".to_owned()));
assert_eq!(song.artist, Some("Tobokegao".to_owned()));
assert_eq!(song.album_artist, None);
assert_eq!(song.album, Some("Picnic".to_owned()));
assert_eq!(song.year, Some(2016));
assert_eq!(song.artwork,
Some(artwork_path.to_string_lossy().into_owned()));
}
#[test]
fn test_browse_top_level() {
let mut root_path = PathBuf::new();
root_path.push("root");
let db = db::_get_test_db("browse_top_level.sqlite");
update(&db).unwrap();
let results = browse(&db, Path::new("")).unwrap();
assert_eq!(results.len(), 1);
match results[0] {
CollectionFile::Directory(ref d) => assert_eq!(d.path, root_path.to_str().unwrap()),
_ => panic!("Expected directory"),
}
}
#[test]
fn test_browse() {
let mut khemmis_path = PathBuf::new();
khemmis_path.push("root");
khemmis_path.push("Khemmis");
let mut tobokegao_path = PathBuf::new();
tobokegao_path.push("root");
tobokegao_path.push("Tobokegao");
let db = db::_get_test_db("browse.sqlite");
update(&db).unwrap();
let results = browse(&db, Path::new("root")).unwrap();
assert_eq!(results.len(), 2);
match results[0] {
CollectionFile::Directory(ref d) => assert_eq!(d.path, khemmis_path.to_str().unwrap()),
_ => panic!("Expected directory"),
}
match results[1] {
CollectionFile::Directory(ref d) => assert_eq!(d.path, tobokegao_path.to_str().unwrap()),
_ => panic!("Expected directory"),
}
}
#[test]
fn test_flatten() {
let db = db::_get_test_db("flatten.sqlite");
update(&db).unwrap();
let results = flatten(&db, Path::new("root")).unwrap();
assert_eq!(results.len(), 12);
assert_eq!(results[0].title, Some("Above The Water".to_owned()));
}
#[test]
fn test_random() {
let db = db::_get_test_db("random.sqlite");
update(&db).unwrap();
let results = get_random_albums(&db, 1).unwrap();
assert_eq!(results.len(), 1);
}
#[test]
fn test_recent() {
let db = db::_get_test_db("recent.sqlite");
update(&db).unwrap();
let results = get_recent_albums(&db, 2).unwrap();
assert_eq!(results.len(), 2);
assert!(results[0].date_added >= results[1].date_added);
}

View file

@ -1,209 +1,165 @@
#![recursion_limit = "128"]
#![cfg_attr(all(windows, feature = "ui"), windows_subsystem = "windows")]
#![recursion_limit = "256"]
extern crate ape;
extern crate app_dirs;
extern crate core;
#[macro_use]
extern crate diesel;
#[macro_use]
extern crate diesel_codegen;
#[macro_use]
extern crate error_chain;
extern crate getopts;
extern crate hyper;
extern crate id3;
extern crate image;
extern crate iron;
extern crate lewton;
extern crate metaflac;
extern crate mount;
extern crate params;
extern crate rand;
extern crate reqwest;
extern crate regex;
extern crate ring;
extern crate router;
extern crate secure_session;
extern crate serde;
#[macro_use]
extern crate serde_derive;
extern crate serde_json;
extern crate staticfile;
extern crate toml;
extern crate typemap;
extern crate url;
use log::{error, info};
use options::CLIOptions;
use simplelog::{
ColorChoice, CombinedLogger, LevelFilter, SharedLogger, TermLogger, TerminalMode, WriteLogger,
};
use std::fs;
use std::path::{Path, PathBuf};
#[cfg(windows)]
extern crate uuid;
#[cfg(windows)]
extern crate winapi;
#[cfg(windows)]
extern crate kernel32;
#[cfg(windows)]
extern crate shell32;
#[cfg(windows)]
extern crate user32;
#[cfg(unix)]
extern crate unix_daemonize;
#[cfg(unix)]
use unix_daemonize::{daemonize_redirect, ChdirMode};
use core::ops::Deref;
use errors::*;
#[cfg(unix)]
use getopts::Matches;
use getopts::Options;
use iron::prelude::*;
use mount::Mount;
use staticfile::Static;
use std::path::Path;
use std::sync::{Arc, Mutex};
use std::sync::mpsc::channel;
mod api;
mod config;
mod db;
mod ddns;
mod errors;
mod index;
mod metadata;
mod playlist;
mod app;
mod options;
mod paths;
mod server;
#[cfg(test)]
mod test;
mod ui;
mod user;
mod utils;
mod serve;
mod thumbnails;
mod vfs;
fn main() {
if let Err(ref e) = run() {
println!("Error: {}", e);
for e in e.iter().skip(1) {
println!("caused by: {}", e);
}
if let Some(backtrace) = e.backtrace() {
println!("backtrace: {:?}", backtrace);
}
::std::process::exit(1);
}
#[derive(thiserror::Error, Debug)]
pub enum Error {
#[error(transparent)]
App(#[from] app::Error),
#[error("Could not start web services")]
ServiceStartup(std::io::Error),
#[error("Could not parse command line arguments:\n\n{0}")]
CliArgsParsing(getopts::Fail),
#[cfg(unix)]
#[error("Failed to turn polaris process into a daemon:\n\n{0}")]
Daemonize(daemonize::Error),
#[error("Could not create log directory `{0}`:\n\n{1}")]
LogDirectoryCreationError(PathBuf, std::io::Error),
#[error("Could not create log file `{0}`:\n\n{1}")]
LogFileCreationError(PathBuf, std::io::Error),
#[error("Could not initialize log system:\n\n{0}")]
LogInitialization(log::SetLoggerError),
#[cfg(unix)]
#[error("Could not create pid directory `{0}`:\n\n{1}")]
PidDirectoryCreationError(PathBuf, std::io::Error),
#[cfg(unix)]
#[error("Could not notify systemd of initialization success:\n\n{0}")]
SystemDNotify(std::io::Error),
}
#[cfg(unix)]
fn daemonize(options: &getopts::Matches) -> Result<()> {
if options.opt_present("f") {
fn daemonize<T: AsRef<Path>>(foreground: bool, pid_file_path: T) -> Result<(), Error> {
if foreground {
return Ok(());
}
let mut log_file = utils::get_data_root()?;
log_file.push("polaris.log");
match daemonize_redirect(Some(&log_file), Some(&log_file), ChdirMode::NoChdir) {
Ok(_) => Ok(()),
Err(_) => bail!(ErrorKind::DaemonError),
if let Some(parent) = pid_file_path.as_ref().parent() {
fs::create_dir_all(parent)
.map_err(|e| Error::PidDirectoryCreationError(parent.to_owned(), e))?;
}
let daemonize = daemonize::Daemonize::new()
.pid_file(pid_file_path.as_ref())
.working_directory(".");
daemonize.start().map_err(Error::Daemonize)?;
Ok(())
}
fn run() -> Result<()> {
#[cfg(unix)]
fn notify_ready() -> Result<(), Error> {
if let Ok(true) = sd_notify::booted() {
sd_notify::notify(true, &[sd_notify::NotifyState::Ready]).map_err(Error::SystemDNotify)?;
}
Ok(())
}
fn init_logging<T: AsRef<Path>>(
log_level: LevelFilter,
log_file_path: &Option<T>,
) -> Result<(), Error> {
let log_config = simplelog::ConfigBuilder::new()
.set_location_level(LevelFilter::Error)
.add_filter_ignore_str("symphonia")
.build();
let mut loggers: Vec<Box<dyn SharedLogger>> = vec![TermLogger::new(
log_level,
log_config.clone(),
TerminalMode::Mixed,
ColorChoice::Auto,
)];
if let Some(path) = log_file_path {
if let Some(parent) = path.as_ref().parent() {
fs::create_dir_all(parent)
.map_err(|e| Error::LogDirectoryCreationError(parent.to_owned(), e))?;
}
loggers.push(WriteLogger::new(
log_level,
log_config,
fs::File::create(path)
.map_err(|e| Error::LogFileCreationError(path.as_ref().to_owned(), e))?,
));
}
CombinedLogger::init(loggers).map_err(Error::LogInitialization)?;
Ok(())
}
fn main() -> Result<(), Error> {
// Parse CLI options
let args: Vec<String> = std::env::args().collect();
let mut options = Options::new();
options.optopt("c", "config", "set the configuration file", "FILE");
options.optopt("p", "port", "set polaris to run on a custom port", "PORT");
options.optopt("d", "database", "set the path to index database", "FILE");
options.optopt("w", "web", "set the path to web client files", "DIRECTORY");
let options_manager = options::Manager::new();
let cli_options = options_manager
.parse(&args[1..])
.map_err(Error::CliArgsParsing)?;
#[cfg(unix)]
options.optflag("f",
"foreground",
"run polaris in the foreground instead of daemonizing");
options.optflag("h", "help", "print this help menu");
let matches = options.parse(&args[1..])?;
if matches.opt_present("h") {
if cli_options.show_help {
let program = args[0].clone();
let brief = format!("Usage: {} [options]", program);
print!("{}", options.usage(&brief));
print!("{}", options_manager.usage(&brief));
return Ok(());
}
let paths = paths::Paths::new(&cli_options);
// Logging
let log_level = cli_options.log_level.unwrap_or(LevelFilter::Info);
init_logging(log_level, &paths.log_file_path)?;
// Fork
#[cfg(unix)]
daemonize(&matches)?;
daemonize(cli_options.foreground, &paths.pid_file_path)?;
// Init DB
println!("Starting up database");
let db_path = matches.opt_str("d");
let mut default_db_path = utils::get_data_root()?;
default_db_path.push("db.sqlite");
let db_path = db_path
.map(|n| Path::new(n.as_str()).to_path_buf())
.unwrap_or(default_db_path);
let db = Arc::new(db::DB::new(&db_path)?);
info!("Cache files location is {:#?}", paths.cache_dir_path);
info!("Data files location is {:#?}", paths.data_dir_path);
info!("Config file location is {:#?}", paths.config_file_path);
info!("Legacy database file location is {:#?}", paths.db_file_path);
info!("Log file location is {:#?}", paths.log_file_path);
#[cfg(unix)]
if !cli_options.foreground {
info!("Pid file location is {:#?}", paths.pid_file_path);
}
info!("Web client files location is {:#?}", paths.web_dir_path);
// Parse config
let config_file_name = matches.opt_str("c");
let config_file_path = config_file_name.map(|p| Path::new(p.as_str()).to_path_buf());
if let Some(path) = config_file_path {
let config = config::parse_toml_file(&path)?;
config::overwrite(db.deref(), &config)?;
async_main(cli_options, paths)
}
#[tokio::main]
async fn async_main(cli_options: CLIOptions, paths: paths::Paths) -> Result<(), Error> {
// Create and run app
let app = app::App::new(cli_options.port.unwrap_or(5050), paths).await?;
app.scanner.queue_scan();
app.ddns_manager.begin_periodic_updates();
// Start server
info!("Starting up server");
if let Err(e) = server::launch(app).await {
return Err(Error::ServiceStartup(e));
}
// Init index
let (index_sender, index_receiver) = channel();
let index_sender = Arc::new(Mutex::new(index_sender));
let db_ref = db.clone();
std::thread::spawn(move || {
let db = db_ref.deref();
index::update_loop(db, index_receiver);
});
// Trigger auto-indexing
let db_ref = db.clone();
let sender_ref = index_sender.clone();
std::thread::spawn(move || { index::self_trigger(db_ref.deref(), sender_ref); });
// Mount API
println!("Mounting API");
let mut mount = Mount::new();
let handler = api::get_handler(db.clone(), index_sender)?;
mount.mount("/api/", handler);
// Mount static files
println!("Mounting static files");
let web_dir_name = matches.opt_str("w");
let mut default_web_dir = utils::get_data_root()?;
default_web_dir.push("web");
let web_dir_path = web_dir_name
.map(|n| Path::new(n.as_str()).to_path_buf())
.unwrap_or(default_web_dir);
mount.mount("/", Static::new(web_dir_path));
println!("Starting up server");
let port: u16 = matches
.opt_str("p")
.unwrap_or("5050".to_owned())
.parse()
.or(Err("invalid port number"))?;
let mut server = match Iron::new(mount).http(("0.0.0.0", port)) {
Ok(s) => s,
Err(e) => bail!("Error starting up server: {}", e),
};
// Start DDNS updates
let db_ref = db.clone();
std::thread::spawn(move || { ddns::run(db_ref.deref()); });
// Send readiness notification
#[cfg(unix)]
notify_ready()?;
// Run UI
ui::run();
println!("Shutting down server");
if let Err(e) = server.close() {
bail!("Error shutting down server: {}", e);
}
info!("Shutting down server");
Ok(())
}

View file

@ -1,170 +0,0 @@
use ape;
use id3;
use lewton::inside_ogg::OggStreamReader;
use metaflac;
use regex::Regex;
use std::fs;
use std::path::Path;
use errors::*;
use utils;
use utils::AudioFormat;
#[derive(Debug, PartialEq)]
pub struct SongTags {
pub disc_number: Option<u32>,
pub track_number: Option<u32>,
pub title: Option<String>,
pub artist: Option<String>,
pub album_artist: Option<String>,
pub album: Option<String>,
pub year: Option<i32>,
}
pub fn read(path: &Path) -> Result<SongTags> {
match utils::get_audio_format(path) {
Some(AudioFormat::FLAC) => read_flac(path),
Some(AudioFormat::MP3) => read_id3(path),
Some(AudioFormat::MPC) => read_ape(path),
Some(AudioFormat::OGG) => read_vorbis(path),
_ => bail!("Unsupported file format for reading metadata"),
}
}
fn read_id3(path: &Path) -> Result<SongTags> {
let tag = id3::Tag::read_from_path(path)?;
let artist = tag.artist().map(|s| s.to_string());
let album_artist = tag.album_artist().map(|s| s.to_string());
let album = tag.album().map(|s| s.to_string());
let title = tag.title().map(|s| s.to_string());
let disc_number = tag.disc();
let track_number = tag.track();
let year = tag.year()
.map(|y| y as i32)
.or(tag.date_released().and_then(|d| Some(d.year)))
.or(tag.date_recorded().and_then(|d| Some(d.year)));
Ok(SongTags {
artist: artist,
album_artist: album_artist,
album: album,
title: title,
disc_number: disc_number,
track_number: track_number,
year: year,
})
}
fn read_ape_string(item: &ape::Item) -> Option<String> {
match item.value {
ape::ItemValue::Text(ref s) => Some(s.clone()),
_ => None,
}
}
fn read_ape_i32(item: &ape::Item) -> Option<i32> {
match item.value {
ape::ItemValue::Text(ref s) => s.parse::<i32>().ok(),
_ => None,
}
}
fn read_ape_x_of_y(item: &ape::Item) -> Option<u32> {
match item.value {
ape::ItemValue::Text(ref s) => {
let format = Regex::new(r#"^\d+"#).unwrap();
if let Some(m) = format.find(s) {
s[m.start()..m.end()].parse().ok()
} else {
None
}
}
_ => None,
}
}
fn read_ape(path: &Path) -> Result<SongTags> {
let tag = ape::read(path)?;
let artist = tag.item("Artist").and_then(read_ape_string);
let album = tag.item("Album").and_then(read_ape_string);
let album_artist = tag.item("Album artist").and_then(read_ape_string);
let title = tag.item("Title").and_then(read_ape_string);
let year = tag.item("Year").and_then(read_ape_i32);
let disc_number = tag.item("Disc").and_then(read_ape_x_of_y);
let track_number = tag.item("Track").and_then(read_ape_x_of_y);
Ok(SongTags {
artist: artist,
album_artist: album_artist,
album: album,
title: title,
disc_number: disc_number,
track_number: track_number,
year: year,
})
}
fn read_vorbis(path: &Path) -> Result<SongTags> {
let file = fs::File::open(path)?;
let source = OggStreamReader::new(file)?;
let mut tags = SongTags {
artist: None,
album_artist: None,
album: None,
title: None,
disc_number: None,
track_number: None,
year: None,
};
for (key, value) in source.comment_hdr.comment_list {
match key.as_str() {
"TITLE" => tags.title = Some(value),
"ALBUM" => tags.album = Some(value),
"ARTIST" => tags.artist = Some(value),
"ALBUMARTIST" => tags.album_artist = Some(value),
"TRACKNUMBER" => tags.track_number = value.parse::<u32>().ok(),
"DISCNUMBER" => tags.disc_number = value.parse::<u32>().ok(),
"DATE" => tags.year = value.parse::<i32>().ok(),
_ => (),
}
}
Ok(tags)
}
fn read_flac(path: &Path) -> Result<SongTags> {
let tag = metaflac::Tag::read_from_path(path)?;
let vorbis = tag.vorbis_comments().ok_or("Missing Vorbis comments")?;
let disc_number = vorbis
.get("DISCNUMBER")
.and_then(|d| d[0].parse::<u32>().ok());
let year = vorbis.get("DATE").and_then(|d| d[0].parse::<i32>().ok());
Ok(SongTags {
artist: vorbis.artist().map(|v| v[0].clone()),
album_artist: vorbis.album_artist().map(|v| v[0].clone()),
album: vorbis.album().map(|v| v[0].clone()),
title: vorbis.title().map(|v| v[0].clone()),
disc_number: disc_number,
track_number: vorbis.track(),
year: year,
})
}
#[test]
fn test_read_metadata() {
let sample_tags = SongTags {
disc_number: Some(3),
track_number: Some(1),
title: Some("TEST TITLE".into()),
artist: Some("TEST ARTIST".into()),
album_artist: Some("TEST ALBUM ARTIST".into()),
album: Some("TEST ALBUM".into()),
year: Some(2016),
};
assert_eq!(read(Path::new("test/sample.mp3")).unwrap(), sample_tags);
assert_eq!(read(Path::new("test/sample.ogg")).unwrap(), sample_tags);
assert_eq!(read(Path::new("test/sample.flac")).unwrap(), sample_tags);
}

93
src/options.rs Normal file
View file

@ -0,0 +1,93 @@
use simplelog::LevelFilter;
use std::path::PathBuf;
pub struct CLIOptions {
pub show_help: bool,
pub foreground: bool,
pub log_file_path: Option<PathBuf>,
#[cfg(unix)]
pub pid_file_path: Option<PathBuf>,
pub config_file_path: Option<PathBuf>,
pub database_file_path: Option<PathBuf>,
pub cache_dir_path: Option<PathBuf>,
pub data_dir_path: Option<PathBuf>,
pub web_dir_path: Option<PathBuf>,
pub port: Option<u16>,
pub log_level: Option<LevelFilter>,
}
pub struct Manager {
protocol: getopts::Options,
}
impl Manager {
pub fn new() -> Self {
Self {
protocol: get_options(),
}
}
pub fn parse(&self, input: &[String]) -> Result<CLIOptions, getopts::Fail> {
let matches = self.protocol.parse(input)?;
Ok(CLIOptions {
show_help: matches.opt_present("h"),
#[cfg(unix)]
foreground: matches.opt_present("f"),
#[cfg(windows)]
foreground: !cfg!(feature = "ui"),
log_file_path: matches.opt_str("log").map(PathBuf::from),
#[cfg(unix)]
pid_file_path: matches.opt_str("pid").map(PathBuf::from),
config_file_path: matches.opt_str("c").map(PathBuf::from),
database_file_path: matches.opt_str("d").map(PathBuf::from),
cache_dir_path: matches.opt_str("cache").map(PathBuf::from),
data_dir_path: matches.opt_str("data").map(PathBuf::from),
web_dir_path: matches.opt_str("w").map(PathBuf::from),
port: matches.opt_str("p").and_then(|p| p.parse().ok()),
log_level: matches.opt_str("log-level").and_then(|l| l.parse().ok()),
})
}
pub fn usage(&self, brief: &str) -> String {
self.protocol.usage(brief)
}
}
fn get_options() -> getopts::Options {
let mut options = getopts::Options::new();
options.optopt("c", "config", "set the configuration file", "FILE");
options.optopt("p", "port", "set polaris to run on a custom port", "PORT");
options.optopt("d", "database", "set the path to index database", "FILE");
options.optopt("w", "web", "set the path to web client files", "DIRECTORY");
options.optopt(
"",
"cache",
"set the directory to use as cache",
"DIRECTORY",
);
options.optopt(
"",
"data",
"set the directory for persistent data",
"DIRECTORY",
);
options.optopt("", "log", "set the path to the log file", "FILE");
options.optopt("", "pid", "set the path to the pid file", "FILE");
options.optopt(
"",
"log-level",
"set the log level to a value between 0 (off) and 3 (debug)",
"LEVEL",
);
#[cfg(unix)]
options.optflag(
"f",
"foreground",
"run polaris in the foreground instead of daemonizing",
);
options.optflag("h", "help", "print this help menu");
options
}

113
src/paths.rs Normal file
View file

@ -0,0 +1,113 @@
use std::path::PathBuf;
use crate::options::CLIOptions;
pub struct Paths {
pub cache_dir_path: PathBuf,
pub config_file_path: PathBuf,
pub data_dir_path: PathBuf,
pub db_file_path: PathBuf,
pub log_file_path: Option<PathBuf>,
#[cfg(unix)]
pub pid_file_path: PathBuf,
pub web_dir_path: PathBuf,
}
// TODO Make this the only implementation when we can expand %LOCALAPPDATA% correctly on Windows
// And fix the installer accordingly (`release_script.ps1`)
#[cfg(not(windows))]
impl Default for Paths {
fn default() -> Self {
Self {
cache_dir_path: ["."].iter().collect(),
config_file_path: [".", "polaris.toml"].iter().collect(),
data_dir_path: ["."].iter().collect(),
db_file_path: [".", "db.sqlite"].iter().collect(),
log_file_path: Some([".", "polaris.log"].iter().collect()),
pid_file_path: [".", "polaris.pid"].iter().collect(),
web_dir_path: [".", "web"].iter().collect(),
}
}
}
#[cfg(windows)]
impl Default for Paths {
fn default() -> Self {
let local_app_data = std::env::var("LOCALAPPDATA").map(PathBuf::from).unwrap();
let install_directory: PathBuf =
local_app_data.join(["Permafrost", "Polaris"].iter().collect::<PathBuf>());
Self {
cache_dir_path: install_directory.clone(),
config_file_path: install_directory.join("polaris.toml"),
data_dir_path: install_directory.clone(),
db_file_path: install_directory.join("db.sqlite"),
log_file_path: Some(install_directory.join("polaris.log")),
web_dir_path: install_directory.join("web"),
}
}
}
impl Paths {
fn from_build() -> Self {
let defaults = Self::default();
Self {
db_file_path: option_env!("POLARIS_DB_DIR")
.map(PathBuf::from)
.map(|p| p.join("db.sqlite"))
.unwrap_or(defaults.db_file_path),
cache_dir_path: option_env!("POLARIS_CACHE_DIR")
.map(PathBuf::from)
.unwrap_or(defaults.cache_dir_path),
config_file_path: option_env!("POLARIS_CONFIG_DIR")
.map(|p| [p, "polaris.toml"].iter().collect())
.unwrap_or(defaults.config_file_path),
data_dir_path: option_env!("POLARIS_DATA_DIR")
.map(PathBuf::from)
.unwrap_or(defaults.data_dir_path),
log_file_path: option_env!("POLARIS_LOG_DIR")
.map(PathBuf::from)
.map(|p| p.join("polaris.log"))
.or(defaults.log_file_path),
#[cfg(unix)]
pid_file_path: option_env!("POLARIS_PID_DIR")
.map(PathBuf::from)
.map(|p| p.join("polaris.pid"))
.unwrap_or(defaults.pid_file_path),
web_dir_path: option_env!("POLARIS_WEB_DIR")
.map(PathBuf::from)
.unwrap_or(defaults.web_dir_path),
}
}
pub fn new(cli_options: &CLIOptions) -> Self {
let mut paths = Self::from_build();
if let Some(path) = &cli_options.cache_dir_path {
path.clone_into(&mut paths.cache_dir_path);
}
if let Some(path) = &cli_options.config_file_path {
path.clone_into(&mut paths.config_file_path);
}
if let Some(path) = &cli_options.data_dir_path {
path.clone_into(&mut paths.data_dir_path);
}
if let Some(path) = &cli_options.database_file_path {
path.clone_into(&mut paths.db_file_path);
}
#[cfg(unix)]
if let Some(path) = &cli_options.pid_file_path {
path.clone_into(&mut paths.pid_file_path);
}
if let Some(path) = &cli_options.web_dir_path {
path.clone_into(&mut paths.web_dir_path);
}
let log_to_file = cli_options.log_file_path.is_some() || !cli_options.foreground;
if log_to_file {
paths.log_file_path = cli_options.log_file_path.clone().or(paths.log_file_path);
} else {
paths.log_file_path = None;
};
paths
}
}

View file

@ -1,298 +0,0 @@
use core::clone::Clone;
use core::ops::Deref;
use diesel;
use diesel::expression::sql;
use diesel::prelude::*;
use diesel::BelongingToDsl;
use diesel::types::*;
use std::path::Path;
#[cfg(test)]
use db;
use db::ConnectionSource;
use db::{playlists, playlist_songs, songs, users};
use index::{self, Song};
use vfs::VFSSource;
use errors::*;
#[derive(Insertable)]
#[table_name="playlists"]
struct NewPlaylist {
name: String,
owner: i32,
}
#[derive(Identifiable, Queryable)]
pub struct User {
id: i32,
}
#[derive(Identifiable, Queryable, Associations)]
#[belongs_to(User, foreign_key="owner")]
pub struct Playlist {
id: i32,
owner: i32,
}
#[derive(Identifiable, Queryable, Associations)]
#[belongs_to(Playlist, foreign_key="playlist")]
pub struct PlaylistSong {
id: i32,
playlist: i32,
}
#[derive(Insertable)]
#[table_name="playlist_songs"]
pub struct NewPlaylistSong {
playlist: i32,
path: String,
ordering: i32,
}
pub fn list_playlists<T>(owner: &str, db: &T) -> Result<Vec<String>>
where T: ConnectionSource + VFSSource
{
let connection = db.get_connection();
let user: User;
{
use self::users::dsl::*;
user = users
.filter(name.eq(owner))
.select((id,))
.first(connection.deref())?;
}
{
use self::playlists::dsl::*;
let found_playlists: Vec<String> = Playlist::belonging_to(&user)
.select(name)
.load(connection.deref())?;
Ok(found_playlists)
}
}
pub fn save_playlist<T>(playlist_name: &str,
owner: &str,
content: &Vec<String>,
db: &T)
-> Result<()>
where T: ConnectionSource + VFSSource
{
let user: User;
let new_playlist: NewPlaylist;
let playlist: Playlist;
let vfs = db.get_vfs()?;
{
let connection = db.get_connection();
// Find owner
{
use self::users::dsl::*;
user = users
.filter(name.eq(owner))
.select((id,))
.get_result(connection.deref())?;
}
// Create playlist
new_playlist = NewPlaylist {
name: playlist_name.into(),
owner: user.id,
};
diesel::insert(&new_playlist)
.into(playlists::table)
.execute(connection.deref())?;
{
use self::playlists::dsl::*;
playlist = playlists
.select((id, owner))
.filter(name.eq(playlist_name).and(owner.eq(user.id)))
.get_result(connection.deref())?;
}
}
let mut new_songs: Vec<NewPlaylistSong> = Vec::new();
new_songs.reserve(content.len());
for (i, path) in content.iter().enumerate() {
let virtual_path = Path::new(&path);
if let Some(real_path) = vfs.virtual_to_real(virtual_path)
.ok()
.and_then(|p| p.to_str().map(|s| s.to_owned())) {
new_songs.push(NewPlaylistSong {
playlist: playlist.id,
path: real_path.into(),
ordering: i as i32,
});
}
}
{
let connection = db.get_connection();
connection
.deref()
.transaction::<_, diesel::result::Error, _>(|| {
// Delete old content (if any)
let old_songs = PlaylistSong::belonging_to(&playlist);
diesel::delete(old_songs).execute(connection.deref())?;
// Insert content
diesel::insert(&new_songs)
.into(playlist_songs::table)
.execute(connection.deref())?;
Ok(())
})?;
}
Ok(())
}
pub fn read_playlist<T>(playlist_name: &str, owner: &str, db: &T) -> Result<Vec<Song>>
where T: ConnectionSource + VFSSource
{
let vfs = db.get_vfs()?;
let songs: Vec<Song>;
{
let connection = db.get_connection();
let user: User;
let playlist: Playlist;
// Find owner
{
use self::users::dsl::*;
user = users
.filter(name.eq(owner))
.select((id,))
.get_result(connection.deref())?;
}
// Find playlist
{
use self::playlists::dsl::*;
playlist = playlists
.select((id, owner))
.filter(name.eq(playlist_name).and(owner.eq(user.id)))
.get_result(connection.deref())?;
}
// Select songs. Not using Diesel because we need to LEFT JOIN using a custom column
let query = sql::<songs::SqlType>(r#"
SELECT s.id, s.path, s.parent, s.track_number, s.disc_number, s.title, s.artist, s.album_artist, s.year, s.album, s.artwork
FROM playlist_songs ps
LEFT JOIN songs s ON ps.path = s.path
WHERE ps.playlist = ?
ORDER BY ps.ordering
"#);
let query = query.clone().bind::<Integer, _>(playlist.id);
songs = query.get_results(connection.deref())?;
}
// Map real path to virtual paths
let virtual_songs = songs
.into_iter()
.filter_map(|s| index::virtualize_song(&vfs, s))
.collect();
Ok(virtual_songs)
}
pub fn delete_playlist<T>(playlist_name: &str, owner: &str, db: &T) -> Result<()>
where T: ConnectionSource + VFSSource
{
let connection = db.get_connection();
let user: User;
{
use self::users::dsl::*;
user = users
.filter(name.eq(owner))
.select((id,))
.first(connection.deref())?;
}
{
use self::playlists::dsl::*;
let q = Playlist::belonging_to(&user).filter(name.eq(playlist_name));
diesel::delete(q).execute(connection.deref())?;
}
Ok(())
}
#[test]
fn test_create_playlist() {
let db = db::_get_test_db("create_playlist.sqlite");
let found_playlists = list_playlists("test_user", &db).unwrap();
assert!(found_playlists.is_empty());
save_playlist("chill_and_grill", "test_user", &Vec::new(), &db).unwrap();
let found_playlists = list_playlists("test_user", &db).unwrap();
assert_eq!(found_playlists.len(), 1);
assert_eq!(found_playlists[0], "chill_and_grill");
let found_playlists = list_playlists("someone_else", &db);
assert!(found_playlists.is_err());
}
#[test]
fn test_delete_playlist() {
let db = db::_get_test_db("delete_playlist.sqlite");
let playlist_content = Vec::new();
save_playlist("chill_and_grill", "test_user", &playlist_content, &db).unwrap();
save_playlist("mellow_bungalow", "test_user", &playlist_content, &db).unwrap();
let found_playlists = list_playlists("test_user", &db).unwrap();
assert_eq!(found_playlists.len(), 2);
delete_playlist("chill_and_grill", "test_user", &db).unwrap();
let found_playlists = list_playlists("test_user", &db).unwrap();
assert_eq!(found_playlists.len(), 1);
assert_eq!(found_playlists[0], "mellow_bungalow");
let delete_result = delete_playlist("mellow_bungalow", "someone_else", &db);
assert!(delete_result.is_err());
}
#[test]
fn test_fill_playlist() {
use index;
let db = db::_get_test_db("fill_playlist.sqlite");
index::update(&db).unwrap();
let mut playlist_content: Vec<String> = index::flatten(&db, Path::new("root"))
.unwrap()
.into_iter()
.map(|s| s.path)
.collect();
assert_eq!(playlist_content.len(), 12);
let first_song = playlist_content[0].clone();
playlist_content.push(first_song);
assert_eq!(playlist_content.len(), 13);
save_playlist("all_the_music", "test_user", &playlist_content, &db).unwrap();
let songs = read_playlist("all_the_music", "test_user", &db).unwrap();
assert_eq!(songs.len(), 13);
assert_eq!(songs[0].title, Some("Above The Water".to_owned()));
assert_eq!(songs[12].title, Some("Above The Water".to_owned()));
use std::path::PathBuf;
let mut first_song_path = PathBuf::new();
first_song_path.push("root");
first_song_path.push("Khemmis");
first_song_path.push("Hunted");
first_song_path.push("01 - Above The Water.mp3");
assert_eq!(songs[0].path, first_song_path.to_str().unwrap());
// Save again to verify that we don't dupe the content
save_playlist("all_the_music", "test_user", &playlist_content, &db).unwrap();
let songs = read_playlist("all_the_music", "test_user", &db).unwrap();
assert_eq!(songs.len(), 13);
}

View file

@ -1,169 +0,0 @@
use std::cmp;
use std::fs::{self, File};
use std::io::{self, Read, Seek, SeekFrom, Write};
use std::path::Path;
use iron::headers::{AcceptRanges, ByteRangeSpec, ContentLength, ContentRange, ContentRangeSpec,
Range, RangeUnit};
use iron::modifier::Modifier;
use iron::modifiers::Header;
use iron::prelude::*;
use iron::response::WriteBody;
use iron::status::{self, Status};
use errors::{Error, ErrorKind};
pub fn deliver(path: &Path, range_header: Option<&Range>) -> IronResult<Response> {
match fs::metadata(path) {
Ok(meta) => meta,
Err(e) => {
let status = match e.kind() {
io::ErrorKind::NotFound => status::NotFound,
io::ErrorKind::PermissionDenied => status::Forbidden,
_ => status::InternalServerError,
};
return Err(IronError::new(e, status));
}
};
let accept_range_header = Header(AcceptRanges(vec![RangeUnit::Bytes]));
let range_header = range_header.map(|h| h.clone());
match range_header {
None => Ok(Response::with((status::Ok, path, accept_range_header))),
Some(range) => {
match range {
Range::Bytes(vec_range) => {
if let Ok(partial_file) = PartialFile::from_path(path, vec_range) {
Ok(Response::with((status::Ok, partial_file, accept_range_header)))
} else {
Err(Error::from(ErrorKind::FileNotFound).into())
}
}
_ => Ok(Response::with(status::RangeNotSatisfiable)),
}
}
}
}
pub enum PartialFileRange {
AllFrom(u64),
FromTo(u64, u64),
Last(u64),
}
impl From<ByteRangeSpec> for PartialFileRange {
fn from(b: ByteRangeSpec) -> PartialFileRange {
match b {
ByteRangeSpec::AllFrom(from) => PartialFileRange::AllFrom(from),
ByteRangeSpec::FromTo(from, to) => PartialFileRange::FromTo(from, to),
ByteRangeSpec::Last(last) => PartialFileRange::Last(last),
}
}
}
pub struct PartialFile {
file: File,
range: PartialFileRange,
}
impl From<Vec<ByteRangeSpec>> for PartialFileRange {
fn from(v: Vec<ByteRangeSpec>) -> PartialFileRange {
match v.into_iter().next() {
None => PartialFileRange::AllFrom(0),
Some(byte_range) => PartialFileRange::from(byte_range),
}
}
}
impl PartialFile {
pub fn new<Range>(file: File, range: Range) -> PartialFile
where Range: Into<PartialFileRange>
{
let range = range.into();
PartialFile {
file: file,
range: range,
}
}
pub fn from_path<P: AsRef<Path>, Range>(path: P, range: Range) -> Result<PartialFile, io::Error>
where Range: Into<PartialFileRange>
{
let file = File::open(path.as_ref())?;
Ok(Self::new(file, range))
}
}
impl Modifier<Response> for PartialFile {
fn modify(self, res: &mut Response) {
use self::PartialFileRange::*;
let metadata: Option<_> = self.file.metadata().ok();
let file_length: Option<u64> = metadata.map(|m| m.len());
let range: Option<(u64, u64)> = match (self.range, file_length) {
(FromTo(from, to), Some(file_length)) => {
if from <= to && from < file_length {
Some((from, cmp::min(to, file_length - 1)))
} else {
None
}
}
(AllFrom(from), Some(file_length)) => {
if from < file_length {
Some((from, file_length - 1))
} else {
None
}
}
(Last(last), Some(file_length)) => {
if last < file_length {
Some((file_length - last, file_length - 1))
} else {
Some((0, file_length - 1))
}
}
(_, None) => None,
};
if let Some(range) = range {
let content_range = ContentRange(ContentRangeSpec::Bytes {
range: Some(range),
instance_length: file_length,
});
let content_len = range.1 - range.0 + 1;
res.headers.set(ContentLength(content_len));
res.headers.set(content_range);
let partial_content = PartialContentBody {
file: self.file,
offset: range.0,
len: content_len,
};
res.status = Some(Status::PartialContent);
res.body = Some(Box::new(partial_content));
} else {
if let Some(file_length) = file_length {
res.headers
.set(ContentRange(ContentRangeSpec::Bytes {
range: None,
instance_length: Some(file_length),
}));
};
res.status = Some(Status::RangeNotSatisfiable);
}
}
}
struct PartialContentBody {
pub file: File,
pub offset: u64,
pub len: u64,
}
impl WriteBody for PartialContentBody {
fn write_body(&mut self, res: &mut Write) -> io::Result<()> {
self.file.seek(SeekFrom::Start(self.offset))?;
let mut limiter = <File as Read>::by_ref(&mut self.file).take(self.len);
io::copy(&mut limiter, res).map(|_| ())
}
}

32
src/server.rs Normal file
View file

@ -0,0 +1,32 @@
use error::APIError;
mod doc;
mod dto;
mod error;
#[cfg(test)]
mod test;
pub enum APIMajorVersion {
V7,
V8,
}
impl TryFrom<i32> for APIMajorVersion {
type Error = APIError;
fn try_from(value: i32) -> Result<Self, Self::Error> {
match value {
7 => Ok(Self::V7),
8 => Ok(Self::V8),
_ => Err(APIError::UnsupportedAPIVersion),
}
}
}
pub const API_MAJOR_VERSION: i32 = 8;
pub const API_MINOR_VERSION: i32 = 0;
pub const API_ARRAY_SEPARATOR: &'static str = "\u{000C}";
mod axum;
pub use axum::*;

Some files were not shown because too many files have changed in this diff Show more