Compare commits

...

295 commits

Author SHA1 Message Date
Daniel Hååvi
4d48ea1844
Merge pull request #229 from safing/feature/ready-api
Ready API
2024-04-23 10:56:20 +02:00
Daniel
e35d320498 Improve ready API error message 2024-04-23 10:49:16 +02:00
Daniel
b15a4aac46 Add ready API endpoint and temporarily "backport" to ping 2024-04-23 10:46:50 +02:00
Daniel
20a72df439 Improve error messages for not ready API endpoints 2024-04-23 10:46:09 +02:00
Daniel Hååvi
c4a6f2ea67
Merge pull request #228 from safing/fix/version-metadata
Fix and improve parsing of git tag based version metadata
2024-04-17 11:46:36 +02:00
Daniel
e888e08b66 Fix and improve parsing of git tag based version metadata 2024-04-16 17:10:54 +02:00
Daniel
c6fa7a8b8d Remove debug statement 2024-04-11 15:11:07 +02:00
Daniel
a5b6129e6f Improve version metadata 2024-04-11 14:59:01 +02:00
Daniel Hååvi
ae1468fea1
Merge pull request #227 from safing/maint/improve-version-info
Improve version info
2024-04-10 14:01:44 +02:00
Daniel
e7611f0469 Fix test bin 2024-04-10 13:55:43 +02:00
Daniel
16d99c76e5 Allow referrers on same origin 2024-04-10 13:48:01 +02:00
Daniel
3248926cfb Improve version info, add build time 2024-04-10 13:47:49 +02:00
Patrick Pacher
a90357bbc2
Merge pull request #226 from safing/migrate-build-info
Migrate to runtime/debug.BuildInfo for most VCS information
2024-03-27 13:48:20 +01:00
Patrick Pacher
045eedc978 fix CheckVersion assuming license is set by the build script 2024-03-26 10:13:10 +01:00
Patrick Pacher
ff5e461b84 Migrate to runtime/debug.BuildInfo for most VCS information 2024-03-26 10:10:38 +01:00
Daniel
704e9e256c Update deps 2023-12-22 14:24:35 +01:00
Daniel
7cd682c894 Close action trigger when notification is deleted 2023-12-22 14:21:18 +01:00
Daniel Hovie
88f974fa66
Merge pull request #225 from safing/feature/pm-updates
Expose SaveConfig, Remove binmeta utils, improvements
2023-12-19 15:43:59 +01:00
Daniel
865cb5dd8f Improve migration logs 2023-12-18 17:10:21 +01:00
Daniel
75e24bea70 Remove binmeta utils, which moved to portmaster/profile/icons 2023-12-18 17:10:01 +01:00
Daniel
05348192cb Disable CodeQL workflow 2023-12-18 17:09:31 +01:00
Daniel
7631b9d28a Expose SaveConfig for writing config to disk 2023-12-18 17:09:27 +01:00
Daniel Hovie
0607924762
Merge pull request #194 from safing/feature/config-value-migration
Add value migrations to config options
2023-12-01 11:49:14 +01:00
Patrick Pacher
9a29e2e4c2 Add blob: to CSP image-src 2023-12-01 11:47:44 +01:00
Daniel
3afd5009bf Add value migrations to config options 2023-11-24 14:17:23 +01:00
Daniel Hovie
83b709526e
Merge pull request #224 from safing/fix/metrics-modules-api
Fix metrics, modules, api
2023-10-13 11:55:21 +02:00
Daniel
be48ba38c8 Fix enabling metric persistence 2023-10-13 11:46:36 +02:00
Daniel
3b22f8497d Improve stopping of modules 2023-10-13 11:46:17 +02:00
Daniel
3dbffd9c1a Improve handling of service worker errors 2023-10-12 17:18:02 +02:00
Daniel
ec1616c1f5 Improve api logging when handler/endpoint does not exist 2023-10-12 17:11:31 +02:00
Daniel
7799e85d7a Report info metric as 0 the first time to track when software is (re)started 2023-10-12 17:11:00 +02:00
Daniel
05bdc44611 Fix waiting for log writers on shutdown, improve persistence enabling 2023-10-12 17:05:53 +02:00
Daniel Hovie
7872911480
Merge pull request #223 from safing/feature/mimetype-by-extension
Add MimeTypeByExtension
2023-10-11 14:39:40 +02:00
Daniel
2c0a2b26fd Add MimeTypeByExtension 2023-10-11 10:22:49 +02:00
Daniel
5150a030bf Update deps 2023-10-06 15:03:49 +02:00
Daniel Hovie
f507ff8b70
Merge pull request #222 from safing/fix/mime-type-selection
Improve mime type selection
2023-10-06 15:02:38 +02:00
Daniel
916d124231 Update go version in CI workflow 2023-10-06 12:30:40 +02:00
Daniel
47f6eb5163 Improve mime type selection 2023-10-06 10:47:39 +02:00
Daniel Hovie
b41b567d2a
Merge pull request #219 from safing/feature/config-improvements
Improve config import and export utils
2023-10-03 11:38:11 +02:00
Daniel
918841e7ea Improve call limiter test 2023-10-03 11:35:07 +02:00
Daniel
3232f2d644 Improve mime type parsing 2023-10-03 11:21:44 +02:00
Daniel
1f542005cc Fix comment 2023-10-03 11:21:44 +02:00
Daniel
a31d2c5e16 Update deps 2023-10-03 11:21:44 +02:00
Daniel
fb766d6bc9 Fix linter warning 2023-10-03 11:21:44 +02:00
Daniel
e3840f765e Add SettablePerAppAnnotation 2023-10-03 11:21:44 +02:00
Daniel
ef9e112d8b Improve mime type support for api endpoints 2023-10-03 11:21:44 +02:00
Daniel
683df179e0 Improve DSD mime type and http utils 2023-10-03 11:21:44 +02:00
Daniel
277a0ea669 Add yaml support to DSD 2023-10-03 11:21:44 +02:00
Daniel
4451b6985c Improve config import and export utils 2023-10-03 11:21:44 +02:00
Daniel Hovie
01b03aa936
Merge pull request #221 from safing/fix/version-selection
Fix version selection
2023-10-02 16:14:35 +02:00
Daniel Hovie
433ad6bf2d
Merge pull request #220 from safing/feature/call-limiter
Add call limiter
2023-10-02 16:13:52 +02:00
Daniel
85db3d9776 Fix version selection test 2023-10-02 16:01:55 +02:00
Daniel
a9dffddd7e Improve documentation 2023-10-02 16:01:45 +02:00
Daniel
7f749464dc Improve method naming and update status data 2023-10-02 13:48:15 +02:00
Daniel
dba610683d Exclude files that cannot be downloaded from version selection 2023-10-02 13:47:43 +02:00
Daniel
2ca78b1803 Add call limiter 2023-09-28 15:11:59 +02:00
Daniel Hovie
900a654a4d
Merge pull request #218 from safing/feature/key-reset-and-metrics-race-condition
Fix metrics race condition and add key reset method
2023-09-19 16:58:58 +02:00
Daniel
3d8c3de6a2 Wait for metrics pusher before persisting metrics 2023-09-19 16:55:51 +02:00
Daniel
1f08d4f02f Add method to reset key of record 2023-09-19 16:44:54 +02:00
Daniel
3dffea1d37 Update deps 2023-09-13 15:51:00 +02:00
Daniel Hovie
5e2e970ec3
Merge pull request #216 from safing/feature/apprise
Add apprise convenience lib
2023-09-13 15:31:53 +02:00
Daniel Hovie
b6c86f30dd
Merge pull request #217 from safing/feature/config-annotation-ui-reload
Add new config annotation for settings that require a UI reload
2023-09-13 15:28:28 +02:00
Patrick Pacher
65a9371fec Add new config annotation for settings that require a UI reload 2023-09-13 10:38:37 +02:00
Daniel
f7b8e4e7c3 Fix linter warning 2023-09-12 16:43:47 +02:00
Daniel
c259c5dea5 Fix comment 2023-09-12 14:02:14 +02:00
Daniel
e593d3ee45 Add apprise convenience wrapper 2023-09-12 14:02:07 +02:00
Daniel Hovie
d777cd6809
Merge pull request #215 from safing/feature/internal-metric-id
Improve Metrics and API
2023-09-05 13:14:37 +02:00
Daniel
936e42b043 Improve go profiling APIs 2023-09-05 12:51:05 +02:00
Daniel
82ed043721 Add response headers to APIRequest 2023-09-05 12:50:42 +02:00
Daniel
f2208faf8c Export metrics with values and also export values only 2023-09-05 12:50:16 +02:00
Daniel
a34de1ce8e Add internal ID to metric 2023-09-05 12:48:55 +02:00
Daniel
8d792bdacc Issue Mgmt: Disable stale PR handling 2023-08-30 12:58:32 +02:00
Daniel
f3e752f406 Add label response for fixed label 2023-08-30 11:43:22 +02:00
Daniel
5c3f9eca53 Udpate permissions 2023-08-30 11:32:34 +02:00
Daniel
624d6a4047 Switch to different label action workflow 2023-08-30 11:30:36 +02:00
Daniel Hovie
1cdc45d716
Merge pull request #212 from safing/Raphty-patch-2
Create greetings.yml
2023-08-30 10:52:41 +02:00
Daniel
8dba0a5360 Improve issue handlers 2023-08-30 10:52:09 +02:00
Raphty
5ea8354cea
Update issues-label-responder.yml 2023-08-30 08:18:48 +02:00
Daniel
4490d27b55 Add new issue management workflows 2023-08-29 18:01:23 +02:00
Raphty
d481098e66
Create greetings.yml 2023-08-28 14:30:51 +02:00
Daniel
055c220a58 Expose config change event name 2023-08-22 16:37:32 +02:00
Daniel
48711570af Document API endpoint metadata 2023-08-09 14:54:29 +02:00
Daniel
cdfdbe929c Update deps 2023-08-04 22:03:15 +02:00
Daniel
3f7fd83fbc Fix slices package usage 2023-08-04 22:03:09 +02:00
Daniel
5197807d56 Update deps 2023-07-20 14:54:40 +02:00
Daniel
3f5345e674 Add IsSetByUser to config.Option 2023-07-20 14:53:23 +02:00
Daniel
076ea970af Only parse flags if not yet parsed 2023-07-20 14:53:06 +02:00
Daniel Hovie
29ac7d1aae
Merge pull request #209 from safing/feature/database-allow-custom-interface
Add database custom interface functions
2023-07-20 14:51:24 +02:00
Daniel
ee9f722a9c Make linter happy 2023-07-20 14:48:14 +02:00
Vladimir Stoilov
df62abdf1b Add database custom interface functions 2023-07-20 14:33:55 +02:00
Daniel Hovie
e033cff403
Merge pull request #211 from safing/feature/config-annotation
Add config annotation to mark a setting as "plan requires"
2023-07-20 14:29:21 +02:00
Daniel Hovie
52ba3f0a15
Update option.go
Improve naming
2023-07-20 14:24:14 +02:00
Patrick Pacher
c992b8ea54
Add config annotation to mark a setting as "plan requires" 2023-07-19 10:59:31 +02:00
Daniel
2d0ce85661 Move auto-comment config 2023-05-16 13:22:04 +02:00
Daniel
ed58a16cbd Update issue managers 2023-05-16 13:09:47 +02:00
Daniel Hovie
bfb439adeb
Merge pull request #206 from safing/feature/module-sleep-mode
Add support for module sleep mode
2023-04-20 15:11:36 +02:00
Vladimir Stoilov
d14791df9f Improve task scheduler sleep mode 2023-04-20 15:03:15 +02:00
Vladimir Stoilov
ad52a8dc1b Fix linter error 2023-04-20 10:18:51 +02:00
Vladimir Stoilov
9367eb1365 Fix android versioning in debug info 2023-04-19 17:39:18 +02:00
Vladimir Stoilov
0ed865f4e4 Add support for module sleep mode 2023-04-19 17:38:56 +02:00
Daniel Hovie
98574e44c4
Merge pull request #205 from safing/fix/golang-ci-linter-ci
Fix golang-ci linter in CI
2023-04-13 16:35:14 +02:00
Daniel
0260986a3d Fix golang-ci linter in CI 2023-04-13 16:31:08 +02:00
Daniel Hovie
ddf9b00d40
Merge pull request #204 from safing/fix/enable-server-flag
Fix enable server flag
2023-04-13 15:47:20 +02:00
Vladimir Stoilov
d6337281e3 Fix enable server flag 2023-04-12 17:17:49 +02:00
Daniel
b174b27ccd Update deps 2023-04-06 14:53:14 +02:00
Daniel
fe11bff6d5 Merge branch 'develop' of github.com:Safing/portbase into develop 2023-04-06 14:49:56 +02:00
Daniel
c067126a0e Update deps 2023-04-06 14:49:24 +02:00
Daniel
124885b807 Move metrics test setup to testdata dir 2023-04-06 14:48:09 +02:00
Daniel Hovie
fcd91a8111
Merge pull request #203 from safing/feature/improve-update-system
Add and expose updater registry state
2023-04-06 14:46:23 +02:00
Daniel
ca8c784c23 Add switch to enable/disable API HTTP server 2023-04-06 14:35:45 +02:00
Daniel
1ae8c0698e Add TODO to improve module worker context 2023-04-06 14:34:46 +02:00
Daniel
6a7dea876b Improve updater logging 2023-04-06 14:34:27 +02:00
Vladimir Stoilov
cb63b07700 Update android debug platform info 2023-03-16 11:03:47 +01:00
Vladimir Stoilov
fca6951ecc Merge branch 'develop' into feature/improve-update-system 2023-03-14 12:28:23 +01:00
Vladimir Stoilov
7cdb71c461 Fix typos 2023-03-14 12:27:57 +01:00
Vladimir Stoilov
c9d77682f8 Fix report downloads deleting pending updates 2023-03-10 16:42:04 +01:00
Daniel
d6687ecbad Add and expose updater registry state 2023-03-09 12:05:59 +01:00
Vladimir Stoilov
efd40ea3cc
Merge pull request #199 from safing/feature/android-support
Add android specific debug info
2023-01-17 13:44:25 +01:00
vladimir
5a9e76403d Add android specific debug info 2023-01-17 13:52:52 +02:00
Daniel Hovie
0d13bca496
Merge pull request #197 from safing/fix/error-handling-api-modules
Improve error handling in api and modules
2022-12-12 15:01:16 +01:00
Daniel
d90d14ce02 Improve error handling in api and modules 2022-12-12 14:07:30 +01:00
Daniel Hovie
72288a45d7
Merge pull request #195 from safing/feature/modules-microtask-improvements
Microtasks Improvements and Module Status Export
2022-11-10 15:45:42 +01:00
Daniel
f6f644fd8e Improve logging control flow 2022-11-04 16:12:42 +01:00
Daniel
70b58138b9 Set default microtask threshold in init for easier override 2022-11-04 16:11:38 +01:00
Daniel
d21c8e6cda Add module status export func and api endpoint 2022-11-04 16:10:57 +01:00
Daniel
b0e5bc90c2 Fix api endpoint log message 2022-11-04 16:10:25 +01:00
Daniel Hovie
985a174aff
Merge pull request #189 from safing/fix/fs-error-handling
Fix fs error handling
2022-10-11 12:45:29 +02:00
Daniel
0d3a0ebb95 Fix fs error handling 2022-10-11 12:27:30 +02:00
Patrick Pacher
2b4c15c1f7
Fix error handling in LoadIndexes 2022-10-11 11:51:17 +02:00
Daniel Hovie
8471f4f38a
Merge pull request #188 from safing/maintain/sig-delete--binmeta--svchost
Improve: delete sigs, binary metadata, svchost service detection
2022-10-10 22:12:59 +02:00
Daniel
40015b54b7 Add name generation test case 2022-10-10 16:25:01 +02:00
Daniel
a391eb3dad Improve svchost service discovery 2022-10-10 16:24:51 +02:00
Daniel
b564e77168 Delete signature file when deleting resource 2022-10-10 16:24:29 +02:00
Daniel
37b91788c2 Update deps 2022-09-29 14:22:02 +02:00
Daniel Hovie
797b3691cd
Merge pull request #180 from safing/feature/update-sigs
Add support for signed updates
2022-09-29 14:13:56 +02:00
Daniel
c30e62c8e2 Update jess dep 2022-09-29 10:55:08 +02:00
Daniel
412b4242c2 FIx linter errors 2022-09-29 10:55:01 +02:00
Daniel
3c697abd5b Fix linter errors 2022-09-28 22:37:54 +02:00
Daniel
5accaad794 Require download policy to be stricter 2022-09-28 14:41:27 +02:00
Daniel
109f51e834 Verify signatures of indexes when loading from disk 2022-09-28 14:41:11 +02:00
Daniel
cded2438f6 Download missing sigs 2022-09-28 14:40:48 +02:00
Daniel
44dc8df5d6 Save verification options to resource and save if versions have a sig available 2022-09-28 14:39:41 +02:00
Daniel
77a6ab050b Add path existence check to utils 2022-09-28 14:38:14 +02:00
Daniel
beaa7482d0 Add support for signed indexes 2022-09-23 14:57:42 +02:00
Daniel
f6fc67ad46 Add support for new index format 2022-09-23 14:57:42 +02:00
Daniel
0e5eb4b6de Add support for signed updates 2022-09-23 14:57:42 +02:00
Daniel Hovie
85a84c1210
Merge pull request #187 from safing/maintain/small-fixes
Small fixes and improvements
2022-09-22 16:37:36 +02:00
Daniel
9e8d1fdd4d Improve exit code handling in run package 2022-09-22 14:36:46 +02:00
Daniel
ddf230b33e Add option to disable module management 2022-09-22 14:36:32 +02:00
Daniel
52a2a1f673 Improve profiling API endpoint docs 2022-09-22 14:36:22 +02:00
Daniel
eda7a122db Fix linter errors 2022-09-22 14:35:58 +02:00
Daniel
3920412b4b Update golang-ci and go workflow 2022-09-22 14:35:48 +02:00
Daniel
ff88d9e486
Merge pull request #178 from safing/feature/remove-metrics-instance-name-on-export
Remove metrics instance name on export
2022-08-27 13:16:03 +02:00
Daniel
43dee31466
Merge pull request #175 from safing/dependabot/go_modules/github.com/tidwall/sjson-1.2.5
Bump github.com/tidwall/sjson from 1.2.4 to 1.2.5
2022-08-24 15:01:36 +02:00
dependabot[bot]
8b5f1385b6
Bump github.com/tidwall/sjson from 1.2.4 to 1.2.5
Bumps [github.com/tidwall/sjson](https://github.com/tidwall/sjson) from 1.2.4 to 1.2.5.
- [Release notes](https://github.com/tidwall/sjson/releases)
- [Commits](https://github.com/tidwall/sjson/compare/v1.2.4...v1.2.5)

---
updated-dependencies:
- dependency-name: github.com/tidwall/sjson
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2022-08-24 11:44:22 +00:00
Daniel
6a9c18335b
Merge pull request #183 from safing/dependabot/go_modules/github.com/tidwall/gjson-1.14.3
Bump github.com/tidwall/gjson from 1.14.1 to 1.14.3
2022-08-24 13:43:41 +02:00
Daniel
0acb09a3c4
Merge pull request #184 from safing/dependabot/go_modules/github.com/VictoriaMetrics/metrics-1.22.2
Bump github.com/VictoriaMetrics/metrics from 1.20.1 to 1.22.2
2022-08-24 13:43:21 +02:00
Daniel
f5420b71eb
Merge pull request #181 from safing/fix/canceled-tasks
Stop logging an error for canceled tasks and workers
2022-08-24 13:39:52 +02:00
dependabot[bot]
23459d6b5a
Bump github.com/VictoriaMetrics/metrics from 1.20.1 to 1.22.2
Bumps [github.com/VictoriaMetrics/metrics](https://github.com/VictoriaMetrics/metrics) from 1.20.1 to 1.22.2.
- [Release notes](https://github.com/VictoriaMetrics/metrics/releases)
- [Commits](https://github.com/VictoriaMetrics/metrics/compare/v1.20.1...v1.22.2)

---
updated-dependencies:
- dependency-name: github.com/VictoriaMetrics/metrics
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2022-08-19 10:18:01 +00:00
dependabot[bot]
7a3116d7a0
Bump github.com/tidwall/gjson from 1.14.1 to 1.14.3
Bumps [github.com/tidwall/gjson](https://github.com/tidwall/gjson) from 1.14.1 to 1.14.3.
- [Release notes](https://github.com/tidwall/gjson/releases)
- [Commits](https://github.com/tidwall/gjson/compare/v1.14.1...v1.14.3)

---
updated-dependencies:
- dependency-name: github.com/tidwall/gjson
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2022-08-17 10:23:42 +00:00
Daniel
47f071ac9b Stop logging an error for canceled tasks and workers 2022-08-12 13:27:33 +02:00
Daniel
e393dac9c3 Remove metrics instance name on export 2022-08-05 14:53:13 +02:00
Daniel
866cd484c4
Merge pull request #171 from safing/dependabot/go_modules/github.com/VictoriaMetrics/metrics-1.20.1
Bump github.com/VictoriaMetrics/metrics from 1.18.1 to 1.20.1
2022-08-02 09:33:16 +02:00
Daniel
2c0c580c2f
Merge pull request #174 from safing/feature/queued-signaled-microtasks
Improve microtasks with queues and signaling
2022-08-02 09:32:19 +02:00
Daniel
1444756302 Improve function naming 2022-08-02 09:26:37 +02:00
Daniel
01c4fc75da Use new microtask function in api module 2022-08-01 11:37:53 +02:00
Daniel
aabd4fef77 Fix microtask signaling and improve tests 2022-08-01 11:14:28 +02:00
Daniel
fed9346e46 Update golangci-lint config 2022-08-01 11:14:28 +02:00
Daniel
5db95ac362 Expose container.Gather 2022-08-01 11:14:28 +02:00
Daniel
456a235b8b Remove deprecated code 2022-08-01 11:14:28 +02:00
Daniel
7fec4f5428 Add microtask queues and signaling 2022-08-01 11:14:28 +02:00
Daniel
ce02b26ff5
Merge pull request #172 from safing/fix/cors-preflight
Fix CORS preflight checks requiring authentication
2022-08-01 11:08:57 +02:00
Daniel
5bf056e584 Elaborate on open questions regarding CORS of browser extensions 2022-08-01 11:04:12 +02:00
Daniel
119dbaef97 Move security headers to the start of the router 2022-08-01 11:03:37 +02:00
Patrick Pacher
2431914756 Fix CORS preflight checks requiring authentication 2022-08-01 10:59:34 +02:00
Daniel
e5b8dd77da
Merge pull request #168 from safing/feature/token-expiration
Add support for API token expiration
2022-08-01 10:57:59 +02:00
Daniel
edbe072412
Merge pull request #173 from safing/feature/minor-improvements
Add file-picker display hint and options for stopping task scheduling
2022-07-29 15:43:30 +02:00
Daniel
b001302cb2 Add options to disable task repeating and schedule 2022-07-29 13:59:57 +02:00
Daniel
f08f16a5f3 Add file-picker display hint 2022-07-29 13:59:25 +02:00
Patrick Pacher
d30c4f4072
Automatically remove expired API keys from the setting 2022-07-27 15:27:42 +02:00
Patrick Pacher
4c6b834ae5
Add support for API token expiration 2022-07-27 14:59:52 +02:00
dependabot[bot]
7ae64ed4f5
Bump github.com/VictoriaMetrics/metrics from 1.18.1 to 1.20.1
Bumps [github.com/VictoriaMetrics/metrics](https://github.com/VictoriaMetrics/metrics) from 1.18.1 to 1.20.1.
- [Release notes](https://github.com/VictoriaMetrics/metrics/releases)
- [Commits](https://github.com/VictoriaMetrics/metrics/compare/v1.18.1...v1.20.1)

---
updated-dependencies:
- dependency-name: github.com/VictoriaMetrics/metrics
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2022-07-25 10:51:29 +00:00
Daniel
5007ced908
Merge pull request #167 from safing/dependabot/go_modules/github.com/tidwall/gjson-1.14.1
Bump github.com/tidwall/gjson from 1.14.0 to 1.14.1
2022-06-30 14:28:27 +02:00
Daniel
5fc3211c27
Merge pull request #166 from safing/dependabot/go_modules/github.com/stretchr/testify-1.8.0
Bump github.com/stretchr/testify from 1.6.1 to 1.8.0
2022-06-30 14:28:04 +02:00
dependabot[bot]
58e48afbcf
Bump github.com/tidwall/gjson from 1.14.0 to 1.14.1
Bumps [github.com/tidwall/gjson](https://github.com/tidwall/gjson) from 1.14.0 to 1.14.1.
- [Release notes](https://github.com/tidwall/gjson/releases)
- [Commits](https://github.com/tidwall/gjson/compare/v1.14.0...v1.14.1)

---
updated-dependencies:
- dependency-name: github.com/tidwall/gjson
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2022-06-30 12:27:38 +00:00
dependabot[bot]
ec2b4a21ee
Bump github.com/stretchr/testify from 1.6.1 to 1.8.0
Bumps [github.com/stretchr/testify](https://github.com/stretchr/testify) from 1.6.1 to 1.8.0.
- [Release notes](https://github.com/stretchr/testify/releases)
- [Commits](https://github.com/stretchr/testify/compare/v1.6.1...v1.8.0)

---
updated-dependencies:
- dependency-name: github.com/stretchr/testify
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2022-06-30 12:27:36 +00:00
Daniel
36c0a07d10
Merge pull request #165 from safing/dependabot/go_modules/github.com/gorilla/websocket-1.5.0
Bump github.com/gorilla/websocket from 1.4.2 to 1.5.0
2022-06-30 14:27:23 +02:00
dependabot[bot]
d59c1d1429
Bump github.com/gorilla/websocket from 1.4.2 to 1.5.0
Bumps [github.com/gorilla/websocket](https://github.com/gorilla/websocket) from 1.4.2 to 1.5.0.
- [Release notes](https://github.com/gorilla/websocket/releases)
- [Commits](https://github.com/gorilla/websocket/compare/v1.4.2...v1.5.0)

---
updated-dependencies:
- dependency-name: github.com/gorilla/websocket
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2022-06-30 12:27:07 +00:00
Daniel
e25e567ea7
Merge pull request #164 from safing/dependabot/go_modules/github.com/hashicorp/go-version-1.6.0
Bump github.com/hashicorp/go-version from 1.4.0 to 1.6.0
2022-06-30 14:26:20 +02:00
Daniel
a735c62ae9
Create codeql-analysis.yml 2022-06-30 11:09:18 +02:00
dependabot[bot]
483a781604
Bump github.com/hashicorp/go-version from 1.4.0 to 1.6.0
Bumps [github.com/hashicorp/go-version](https://github.com/hashicorp/go-version) from 1.4.0 to 1.6.0.
- [Release notes](https://github.com/hashicorp/go-version/releases)
- [Changelog](https://github.com/hashicorp/go-version/blob/main/CHANGELOG.md)
- [Commits](https://github.com/hashicorp/go-version/compare/v1.4.0...v1.6.0)

---
updated-dependencies:
- dependency-name: github.com/hashicorp/go-version
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2022-06-30 09:07:32 +00:00
Daniel
fe84b18b0d
Create dependabot.yml 2022-06-30 11:06:51 +02:00
Daniel
d32e46aca3
Merge pull request #163 from safing/feature/export-improvements
Export Improvements
2022-06-22 15:19:15 +02:00
Daniel
aef3f523ce Improve updater.Resource.Export() documentation 2022-06-22 09:44:38 +02:00
Daniel
e7d9ee9db0 Improve log message formatting 2022-06-21 16:59:50 +02:00
Daniel
efcea66226 Add better handling for panics within api endpoint handlers 2022-06-21 16:59:41 +02:00
Daniel
f3591e81c3 Fix update cycle to notification attach to module 2022-06-21 16:58:06 +02:00
Daniel
8421b8fba8 Add clean export function for updater.Resource 2022-06-21 16:57:37 +02:00
Daniel
38d3c839ef Add config.GetActiveConfigValues to export active config values 2022-06-21 16:56:57 +02:00
Daniel
1ffef463f8
Merge pull request #162 from safing/fix/ci-linting
Update Go CI workflow
2022-05-10 15:57:13 +02:00
Daniel
a8f7c33f1f Update Go CI workflow 2022-05-10 15:51:36 +02:00
Daniel
59e88b15f7
Merge pull request #161 from safing/fix/namespace-and-labels-for-internal-metrics
Add namespace and labels to go internal metrics
2022-05-09 14:18:55 +02:00
Daniel
11b71d54e9 Add namespace and labels to go internal metrics 2022-05-09 14:15:06 +02:00
Daniel
2b73adfc22
Merge pull request #160 from safing/fix/endpoint-http-handler
Restore http.Request body for http.HandlerFunc in api package
2022-05-03 16:46:08 +02:00
Patrick Pacher
ba6dee246b
Restore http.Request body for http.HandlerFunc in api package 2022-05-03 16:09:40 +02:00
Daniel
85f8832911
Merge pull request #159 from safing/fix/custom-task-scheduling
Allow tasks to schedule themselves in-task
2022-04-26 15:28:22 +02:00
Daniel
3f8f50ad1f Allow tasks to schedule themselves in-task 2022-04-26 14:35:52 +02:00
Daniel
7797a54d18
Merge pull request #158 from safing/feature/debug-apis-and-metrics-comment
Go profiling APIs and metrics comment
2022-04-13 11:30:55 +02:00
Daniel
8a16414766 Add metrics comment for some additional info 2022-04-13 11:19:07 +02:00
Daniel
dce16ad393 Add go profiling debug api endpoints 2022-04-13 11:18:40 +02:00
Daniel
ba802b25c6
Merge pull request #157 from safing/feature/debug-data-improvements
Improve debug data
2022-03-21 16:11:29 +01:00
Daniel
6ef0282dc4 Fix linter warnings 2022-03-21 15:42:41 +01:00
Daniel
a9b491cac2 Add debug info for config options 2022-03-19 22:00:57 +01:00
Daniel
deef6cdafc Mark config options as sensitive 2022-03-19 22:00:39 +01:00
Daniel
1546261fcc Improve running commands 2022-03-19 22:00:15 +01:00
Patrick Pacher
7f87e417d8
Enable parallel downloads of updates and fix EOF when unpacking archives 2022-03-02 12:03:46 +01:00
Daniel
9504c41702
Merge pull request #155 from safing/feature/config-option-validation-func
Add optional validation function to config options
2022-03-01 16:00:28 +01:00
Daniel
f739d08a40 Fix error handling when setting single config options 2022-03-01 15:49:16 +01:00
Daniel
50b87e0240 Display error notifications when config loading failed 2022-03-01 15:29:41 +01:00
Daniel
bdd1bc2d86 Add proper validation errors to config module, enable soft-fail on start 2022-03-01 15:28:55 +01:00
Daniel
7d144dae89 Start config validity flag with invalid 2022-02-25 15:57:01 +01:00
Daniel
874214ea59 Improve validation error messages 2022-02-25 15:56:40 +01:00
Daniel
92c16b1c88 Add optional validation function to config options 2022-02-17 15:35:14 +01:00
Daniel
76648d6c4e Update go workflow 2022-02-17 10:49:28 +01:00
Daniel
fe1ceba8b5 Add issue manager workflow 2022-02-15 15:00:31 +01:00
Daniel
d111f8183c Update deps 2022-02-14 16:01:47 +01:00
Daniel
cc1375e1de Fix go build flags 2022-02-14 16:01:42 +01:00
Patrick Pacher
95942bbc88
Merge pull request #154 from safing/feature/restart-pending-setting-annotation
Annotate config option if changed and restart is required
2022-02-04 09:48:40 +01:00
Daniel
e6903c65dc Annotate config option if changed and restart is required 2022-02-03 15:36:18 +01:00
Daniel
6b2e20ca56
Merge pull request #153 from safing/fix/tests-and-linters
Fix Tests and Linters
2022-02-03 15:34:20 +01:00
Daniel
6650fb3b19 Update linter settings and fix warnings 2022-02-02 14:58:27 +01:00
Daniel
22c59c50cc Disable gofmt CI test for now 2022-02-01 13:57:38 +01:00
Daniel
9222e0b328 Fix gofmt CI test 2022-02-01 13:47:57 +01:00
Daniel
3e4e0c361d Fix or disable new linters 2022-02-01 13:40:50 +01:00
Daniel
fcb5ca2473 Update golangci-lint to v1.44 2022-02-01 13:16:53 +01:00
Daniel
dba4ba3dc7 Update golangci-lint config 2022-02-01 13:16:38 +01:00
Daniel
ea57310483 Update test script 2022-02-01 13:16:02 +01:00
Daniel
f59ad0357a Fix tests and linter warnings 2022-02-01 13:12:46 +01:00
Daniel
7d2cd6c15d
Merge pull request #152 from safing/fix/csrf-protection
Fix CSRF Protection
2022-01-20 09:46:41 +01:00
Daniel
4c7d61fad5 Buffer signal channel 2022-01-20 09:02:57 +01:00
Daniel
3a77659670 Use request.Host instead of the header's Host 2022-01-20 09:02:47 +01:00
Daniel
ddc934fbe4
Merge pull request #151 from safing/feature/migration
Add new database migration system
2022-01-19 11:06:02 +01:00
Daniel
e0e47d9ba6
Merge pull request #150 from safing/feature/csrf-protection
Add CSRF protection
2022-01-19 11:05:49 +01:00
Patrick Pacher
ef45e11d09 Add new database migration system 2022-01-19 10:07:28 +01:00
Daniel
c92fcf340d Add CSRF protection 2022-01-11 08:58:07 +01:00
Daniel
31a7c99f74
Merge pull request #149 from safing/feature/host-metrics
Add host metrics
2021-12-22 11:24:45 +01:00
Daniel
75d108001b Improve metric instance detection and pushing 2021-12-22 11:20:18 +01:00
Daniel
471c3030b7 Rename metric pkg files for clarity 2021-12-21 17:31:05 +01:00
Daniel
50212d7596 Improve metrics pkg and fix linter warnings 2021-12-21 17:29:51 +01:00
Daniel
b304e88e79 Add logging metrics 2021-12-21 17:29:01 +01:00
Daniel
a509febd48 Add fetching counter metric type 2021-12-21 17:28:21 +01:00
Daniel
370609c091 Add host metrics 2021-12-17 22:16:39 +01:00
Daniel
698afba091
Merge pull request #148 from safing/fix/wrapper-format
Fix wrapper dsd format
2021-11-30 09:26:36 +01:00
Daniel
3a92993600 Fix wrapper dsd format 2021-11-29 23:03:03 +01:00
Daniel
b956aba66e Update deps 2021-11-29 16:22:52 +01:00
Daniel
33f9a2596b
Merge pull request #147 from safing/fix/cors
Fix CORS headers
2021-11-29 16:14:13 +01:00
Daniel
cbd9d9a8ae Fix dsd http interface 2021-11-29 11:58:00 +01:00
Daniel
1695420b0e Fix CORS handling 2021-11-28 23:48:45 +01:00
Patrick Pacher
b3dd9a1b3f
Merge pull request #146 from safing/feature/dsd-improvements
Improve DSD and API
2021-11-26 08:55:04 +01:00
Daniel
644947a9c9 Implement review suggestions 2021-11-24 16:37:09 +01:00
Daniel
08016b16cd Add Read/WriteMethod field to api.Endpoint 2021-11-24 16:36:39 +01:00
Daniel
93ff8eb19a Improve dev mode API security 2021-11-23 12:58:01 +01:00
Daniel
140389d142 Add option to supply HTTP status codes with API errors 2021-11-23 12:57:16 +01:00
Daniel
8c758e7e52 Improve endpoint metadata 2021-11-23 12:56:30 +01:00
Daniel
462570f0c9 Delete copy of dsd formats in database.record package 2021-11-23 12:56:13 +01:00
Daniel
40b25a0af7 Add support for dsd formats in HTTP 2021-11-23 11:24:52 +01:00
Daniel
560b96a825 Revert back to dsd format constant typing 2021-11-23 11:24:32 +01:00
Daniel
6cde860324 Add support for MsgPack dsd format 2021-11-22 14:49:50 +01:00
Daniel
8813102b7b Add support for RAW dsd format. 2021-11-22 14:49:16 +01:00
Daniel
601dbffa4f Make dsd formats stronger typed, return parsed format, remove STRING and BYTES format 2021-11-21 23:18:52 +01:00
Daniel
7de63b0c18 Remove BSON format 2021-11-21 23:17:52 +01:00
Daniel
e55fe85da3 Update deps 2021-11-17 16:16:14 +01:00
Daniel
bb456a2953
Merge pull request #145 from safing/fix/patch-set-4
Improve error handling
2021-11-17 16:11:46 +01:00
Daniel
28942e4232 Add module name to module error 2021-11-17 14:24:47 +01:00
Daniel
1dfba1d596 Report module mgmt errors as module error messages 2021-11-17 14:24:26 +01:00
Daniel
c2f77b0cb4 Add dsd CBOR support 2021-11-17 14:23:47 +01:00
Daniel
52d83f6edd Add missing newlines to api ActionFunc messages 2021-11-17 14:23:08 +01:00
Patrick Pacher
d717d01197
Merge pull request #142 from safing/fix/runtime-sr-provider
Fix runtime single record provider key check
2021-11-15 10:36:05 +01:00
Patrick Pacher
ca97747576
Merge pull request #143 from safing/feature/withdraw-injected-dbs
Add Withdraw function to database controllers
2021-11-15 10:35:14 +01:00
Patrick Pacher
bdefc102e3
Merge pull request #144 from safing/fix/patch-set-3
Minor fixes #3
2021-11-15 10:34:18 +01:00
Daniel
08da6e1d7c Improve logging of updated update indexes 2021-11-09 10:11:58 +01:00
Daniel
daed152e6d Update expertise level option names 2021-11-09 10:11:38 +01:00
Daniel
22cd112eaf Fix api endpoint description 2021-11-09 10:11:12 +01:00
Daniel
f57c4a4874 Fix database api serialization warnings 2021-11-09 10:10:53 +01:00
Daniel
95fbe85ba2 Add Withdraw function to database controllers 2021-11-09 10:09:30 +01:00
Daniel
0dcc397bf3 Fix runtime single record provider key check 2021-11-09 10:07:02 +01:00
Daniel
b50f922fb5 Update release level config 2021-10-15 16:39:47 +02:00
Patrick Pacher
ec0a3a6903
Merge pull request #141 from safing/fix/patch-set-2
Fix failure updating during module shutdown
2021-10-14 20:21:16 +02:00
Daniel
a92d8f47f0 Revert testing code 2021-10-14 16:00:15 +02:00
Daniel
7bb9c48bb0 Update dependencies 2021-10-14 14:20:08 +02:00
Daniel
3597a6900e Fix failure updating during module shutdown 2021-10-14 14:11:27 +02:00
Daniel
9a76cf153d
Merge pull request #139 from safing/feature/shutdown-hook-fn
Add global shutdown hook function to modules
2021-10-11 13:48:14 +02:00
Daniel
f7eafb9674
Merge pull request #138 from safing/fix/dir-structure-permissions
Force permissions when creating directories
2021-10-11 13:47:36 +02:00
Daniel
e7ae898ddb Add global shutdown hook function to modules 2021-10-07 14:23:49 +02:00
Daniel
02a62c1126 Force permissions when creating directories 2021-10-07 11:53:04 +02:00
245 changed files with 7044 additions and 2990 deletions

11
.github/dependabot.yml vendored Normal file
View file

@ -0,0 +1,11 @@
# To get started with Dependabot version updates, you'll need to specify which
# package ecosystems to update and where the package manifests are located.
# Please see the documentation for all configuration options:
# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
version: 2
updates:
- package-ecosystem: "gomod"
directory: "/"
schedule:
interval: "daily"

40
.github/label-actions.yml vendored Normal file
View file

@ -0,0 +1,40 @@
# Configuration for Label Actions - https://github.com/dessant/label-actions
community support:
comment: |
Hey @{issue-author}, thank you for raising this issue with us.
After a first review we noticed that this does not seem to be a technical issue, but rather a configuration issue or general question about how Portmaster works.
Thus, we invite the community to help with configuration and/or answering this questions.
If you are in a hurry or haven't received an answer, a good place to ask is in [our Discord community](https://discord.gg/safing).
If your problem or question has been resolved or answered, please come back and give an update here for other users encountering the same and then close this issue.
If you are a paying subscriber and want this issue to be checked out by Safing, please send us a message [on Discord](https://discord.gg/safing) or [via Email](mailto:support@safing.io) with your username and the link to this issue, so we can prioritize accordingly.
needs debug info:
comment: |
Hey @{issue-author}, thank you for raising this issue with us.
After a first review we noticed that we will require the Debug Info for further investigation. However, you haven't supplied any Debug Info in your report.
Please [collect Debug Info](https://wiki.safing.io/en/FAQ/DebugInfo) from Portmaster _while_ the reported issue is present.
in/compatibility:
comment: |
Hey @{issue-author}, thank you for reporting on a compatibility.
We keep a list of compatible software and user provided guides for improving compatibility [in the wiki - please have a look there](https://wiki.safing.io/en/Portmaster/App/Compatibility).
If you can't find your software in the list, then a good starting point is our guide on [How do I make software compatible with Portmaster](https://wiki.safing.io/en/FAQ/MakeSoftwareCompatibleWithPortmaster).
If you have managed to establish compatibility with an application, please share your findings here. This will greatly help other users encountering the same issues.
fixed:
comment: |
This issue has been fixed by the recently referenced commit or PR.
However, the fix is not released yet.
It is expected to go into the [Beta Release Channel](https://wiki.safing.io/en/FAQ/SwitchReleaseChannel) for testing within the next two weeks and will be available for everyone within the next four weeks. While this is the typical timeline we work with, things are subject to change.

View file

@ -15,73 +15,41 @@ jobs:
name: Linter
runs-on: ubuntu-latest
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Check out code
uses: actions/checkout@v3
- uses: actions/setup-go@v2
- name: Setup Go
uses: actions/setup-go@v4
with:
go-version: '^1.15'
# nektos/act does not have sudo install but we need it on GH actions so
# try to install it.
- name: Install sudo
run: bash -c "apt-get update || true ; apt-get install sudo || true"
env:
DEBIAN_FRONTEND: noninteractive
- name: Install git and gcc
run: sudo bash -c "apt-get update && apt-get install -y git gcc libc6-dev"
env:
DEBIAN_FRONTEND: noninteractive
- name: Run golangci-lint
uses: golangci/golangci-lint-action@v2
with:
version: v1.29
only-new-issues: true
args: -c ./.golangci.yml
skip-go-installation: true
go-version: '^1.21'
- name: Get dependencies
run: go mod download
- name: Run golangci-lint
uses: golangci/golangci-lint-action@v3
with:
version: v1.52.2
only-new-issues: true
args: -c ./.golangci.yml --timeout 15m
- name: Run go vet
run: go vet ./...
- name: Install golint
run: bash -c "GOBIN=$(pwd) go get -u golang.org/x/lint/golint"
- name: Run golint
run: ./golint -set_exit_status -min_confidence 1.0 ./...
- name: Run gofmt
run: bash -c "test -z $(gofmt -s -l .)"
test:
name: Test
runs-on: ubuntu-latest
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Check out code
uses: actions/checkout@v3
- uses: actions/setup-go@v2
- name: Setup Go
uses: actions/setup-go@v4
with:
go-version: '^1.15'
# nektos/act does not have sudo install but we need it on GH actions so
# try to install it.
- name: Install sudo
run: bash -c "apt-get update || true ; apt-get install sudo || true"
env:
DEBIAN_FRONTEND: noninteractive
- name: Install git and gcc
run: sudo bash -c "apt-get update && apt-get install -y git gcc libc6-dev"
env:
DEBIAN_FRONTEND: noninteractive
go-version: '^1.21'
- name: Get dependencies
run: go mod download
- name: Test
- name: Run tests
run: ./test --test-only

View file

@ -0,0 +1,26 @@
# This workflow responds to first time posters with a greeting message.
# Docs: https://github.com/actions/first-interaction
name: Greet New Users
# This workflow is triggered when a new issue is created.
on:
issues:
types: opened
permissions:
contents: read
issues: write
jobs:
greet:
runs-on: ubuntu-latest
steps:
- uses: actions/first-interaction@v1
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
# Respond to first time issue raisers.
issue-message: |
Greetings and welcome to our community! As this is the first issue you opened here, we wanted to share some useful infos with you:
- 🗣️ Our community on [Discord](https://discord.gg/safing) is super helpful and active. We also have an AI-enabled support bot that knows Portmaster well and can give you immediate help.
- 📖 The [Wiki](https://wiki.safing.io/) answers all common questions and has many important details. If you can't find an answer there, let us know, so we can add anything that's missing.

View file

@ -0,0 +1,22 @@
# This workflow responds with a message when certain labels are added to an issue or PR.
# Docs: https://github.com/dessant/label-actions
name: Label Actions
# This workflow is triggered when a label is added to an issue.
on:
issues:
types: labeled
permissions:
contents: read
issues: write
jobs:
action:
runs-on: ubuntu-latest
steps:
- uses: dessant/label-actions@v3
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
config-path: ".github/label-actions.yml"
process-only: "issues"

42
.github/workflows/issues-stale.yml vendored Normal file
View file

@ -0,0 +1,42 @@
# This workflow warns and then closes stale issues and PRs.
# Docs: https://github.com/actions/stale
name: Close Stale Issues
on:
schedule:
- cron: "17 5 * * 1-5" # run at 5:17 (UTC) on Monday to Friday
workflow_dispatch:
permissions:
contents: read
issues: write
jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v8
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
# Increase max operations.
# When using GITHUB_TOKEN, the rate limit is 1,000 requests per hour per repository.
operations-per-run: 500
# Handle stale issues
stale-issue-label: 'stale'
# Exemptions
exempt-all-issue-assignees: true
exempt-issue-labels: 'support,dependencies,pinned,security'
# Mark as stale
days-before-issue-stale: 63 # 2 months / 9 weeks
stale-issue-message: |
This issue has been automatically marked as inactive because it has not had activity in the past two months.
If no further activity occurs, this issue will be automatically closed in one week in order to increase our focus on active topics.
# Close
days-before-issue-close: 7 # 1 week
close-issue-message: |
This issue has been automatically closed because it has not had recent activity. Thank you for your contributions.
If the issue has not been resolved, you can [find more information in our Wiki](https://wiki.safing.io/) or [continue the conversation on our Discord](https://discord.gg/safing).
# TODO: Handle stale PRs
days-before-pr-stale: 36500 # 100 years - effectively disabled.

2
.gitignore vendored
View file

@ -4,3 +4,5 @@ misc
go.mod.*
vendor
go.work
go.work.sum

View file

@ -1,19 +1,72 @@
# Docs:
# https://golangci-lint.run/usage/linters/
linters:
enable-all: true
disable:
- lll
- gochecknoinits
- gochecknoglobals
- containedctx
- contextcheck
- cyclop
- depguard
- exhaustivestruct
- exhaustruct
- forbidigo
- funlen
- whitespace
- wsl
- gomnd
- gochecknoglobals
- gochecknoinits
- gocognit
- gocyclo
- goerr113
- gomnd
- ifshort
- interfacebloat
- interfacer
- ireturn
- lll
- musttag
- nestif
- nilnil
- nlreturn
- noctx
- nolintlint
- nonamedreturns
- nosnakecase
- revive
- tagliatelle
- testpackage
- varnamelen
- whitespace
- wrapcheck
- wsl
linters-settings:
revive:
# See https://github.com/mgechev/revive#available-rules for details.
enable-all-rules: true
gci:
# put imports beginning with prefix after 3rd-party packages;
# only support one prefix
# if not set, use goimports.local-prefixes
local-prefixes: github.com/safing
godox:
# report any comments starting with keywords, this is useful for TODO or FIXME comments that
# might be left in the code accidentally and should be resolved before merging
keywords:
- FIXME
gosec:
# To specify a set of rules to explicitly exclude.
# Available rules: https://github.com/securego/gosec#available-rules
excludes:
- G204 # Variables in commands.
- G304 # Variables in file paths.
- G505 # We need crypto/sha1 for non-security stuff. Using `nolint:` triggers another linter.
issues:
exclude-use-default: false
exclude-rules:
- text: "a blank import .*"
linters:
- golint
- text: "ST1000: at least one file in a package should have a package comment.*"
linters:
- stylecheck

View file

@ -37,6 +37,7 @@ type endpointBridgeStorage struct {
storage.InjectBase
}
// EndpointBridgeRequest holds a bridged request API request.
type EndpointBridgeRequest struct {
record.Base
sync.Mutex
@ -48,6 +49,7 @@ type EndpointBridgeRequest struct {
MimeType string
}
// EndpointBridgeResponse holds a bridged request API response.
type EndpointBridgeResponse struct {
record.Base
sync.Mutex

View file

@ -13,9 +13,9 @@ import (
"github.com/tevino/abool"
"github.com/safing/portbase/modules"
"github.com/safing/portbase/config"
"github.com/safing/portbase/log"
"github.com/safing/portbase/modules"
"github.com/safing/portbase/rng"
)
@ -84,8 +84,9 @@ type AuthenticatorFunc func(r *http.Request, s *http.Server) (*AuthToken, error)
// later. Functions may be called at any time.
// The Write permission implicitly also includes reading.
type AuthToken struct {
Read Permission
Write Permission
Read Permission
Write Permission
ValidUntil *time.Time
}
type session struct {
@ -133,16 +134,13 @@ func SetAuthenticator(fn AuthenticatorFunc) error {
return nil
}
func authenticateRequest(w http.ResponseWriter, r *http.Request, targetHandler http.Handler) *AuthToken {
func authenticateRequest(w http.ResponseWriter, r *http.Request, targetHandler http.Handler, readMethod bool) *AuthToken {
tracer := log.Tracer(r.Context())
// Check if request is read only.
readRequest := isReadMethod(r.Method)
// Get required permission for target handler.
requiredPermission := PermitSelf
if authdHandler, ok := targetHandler.(AuthenticatedHandler); ok {
if readRequest {
if readMethod {
requiredPermission = authdHandler.ReadPermission(r)
} else {
requiredPermission = authdHandler.WritePermission(r)
@ -150,10 +148,10 @@ func authenticateRequest(w http.ResponseWriter, r *http.Request, targetHandler h
}
// Check if we need to do any authentication at all.
switch requiredPermission {
switch requiredPermission { //nolint:exhaustive
case NotFound:
// Not found.
tracer.Trace("api: authenticated handler reported: not found")
tracer.Debug("api: no API endpoint registered for this path")
http.Error(w, "Not found.", http.StatusNotFound)
return nil
case NotSupported:
@ -200,7 +198,7 @@ func authenticateRequest(w http.ResponseWriter, r *http.Request, targetHandler h
// Get effective permission for request.
var requestPermission Permission
if readRequest {
if readMethod {
requestPermission = token.Read
} else {
requestPermission = token.Write
@ -221,7 +219,10 @@ func authenticateRequest(w http.ResponseWriter, r *http.Request, targetHandler h
if requestPermission < requiredPermission {
// If the token is strictly public, return an authentication request.
if token.Read == PermitAnyone && token.Write == PermitAnyone {
w.Header().Set("WWW-Authenticate", "Bearer realm=Portmaster API")
w.Header().Set(
"WWW-Authenticate",
`Bearer realm="Portmaster API" domain="/"`,
)
http.Error(w, "Authorization required.", http.StatusUnauthorized)
return nil
}
@ -341,6 +342,12 @@ func checkAPIKey(r *http.Request) *AuthToken {
return nil
}
// Abort if the token is expired.
if token.ValidUntil != nil && time.Now().After(*token.ValidUntil) {
log.Tracer(r.Context()).Warningf("api: denying api access from %s using expired token", r.RemoteAddr)
return nil
}
return token
}
@ -355,15 +362,26 @@ func updateAPIKeys(_ context.Context, _ interface{}) error {
delete(apiKeys, k)
}
// whether or not we found expired API keys that should be removed
// from the setting
hasExpiredKeys := false
// a list of valid API keys. Used when hasExpiredKeys is set to true.
// in that case we'll update the setting to only contain validAPIKeys
validAPIKeys := []string{}
// Parse new keys.
for _, key := range configuredAPIKeys() {
u, err := url.Parse(key)
if err != nil {
log.Errorf("api: failed to parse configured API key %s: %s", key, err)
continue
}
if u.Path == "" {
log.Errorf("api: malformed API key %s: missing path section", key)
continue
}
@ -390,8 +408,40 @@ func updateAPIKeys(_ context.Context, _ interface{}) error {
}
token.Write = writePermission
expireStr := q.Get("expires")
if expireStr != "" {
validUntil, err := time.Parse(time.RFC3339, expireStr)
if err != nil {
log.Errorf("api: invalid API key %s: %s", key, err)
continue
}
// continue to the next token if this one is already invalid
if time.Now().After(validUntil) {
// mark the key as expired so we'll remove it from the setting afterwards
hasExpiredKeys = true
continue
}
token.ValidUntil = &validUntil
}
// Save token.
apiKeys[u.Path] = token
validAPIKeys = append(validAPIKeys, key)
}
if hasExpiredKeys {
module.StartLowPriorityMicroTask("api key cleanup", 0, func(ctx context.Context) error {
if err := config.SetConfigOption(CfgAPIKeys, validAPIKeys); err != nil {
log.Errorf("api: failed to remove expired API keys: %s", err)
} else {
log.Infof("api: removed expired API keys from %s", CfgAPIKeys)
}
return nil
})
}
return nil
@ -477,12 +527,24 @@ func deleteSession(sessionKey string) {
delete(sessions, sessionKey)
}
func isReadMethod(method string) bool {
func getEffectiveMethod(r *http.Request) (eMethod string, readMethod bool, ok bool) {
method := r.Method
// Get CORS request method if OPTIONS request.
if r.Method == http.MethodOptions {
method = r.Header.Get("Access-Control-Request-Method")
if method == "" {
return "", false, false
}
}
switch method {
case http.MethodGet, http.MethodHead, http.MethodOptions:
return true
case http.MethodGet, http.MethodHead:
return http.MethodGet, true, true
case http.MethodPost, http.MethodPut, http.MethodDelete:
return method, false, true
default:
return false
return "", false, false
}
}
@ -531,6 +593,8 @@ func (p Permission) Role() string {
return "Admin"
case PermitSelf:
return "Self"
case Dynamic, NotFound, NotSupported:
return "Invalid"
default:
return "Invalid"
}

View file

@ -9,9 +9,7 @@ import (
"github.com/stretchr/testify/assert"
)
var (
testToken = new(AuthToken)
)
var testToken = new(AuthToken)
func testAuthenticator(r *http.Request, s *http.Server) (*AuthToken, error) {
switch {
@ -65,7 +63,9 @@ func init() {
}
}
func TestPermissions(t *testing.T) { //nolint:gocognit
func TestPermissions(t *testing.T) {
t.Parallel()
testHandler := &mainHandler{
mux: mainMux,
}
@ -99,10 +99,11 @@ func TestPermissions(t *testing.T) { //nolint:gocognit
http.MethodHead,
http.MethodPost,
http.MethodPut,
http.MethodDelete,
} {
// Set request permission for test requests.
reading := isReadMethod(method)
_, reading, _ := getEffectiveMethod(&http.Request{Method: method})
if reading {
testToken.Read = requestPerm
testToken.Write = NotSupported
@ -147,7 +148,6 @@ func TestPermissions(t *testing.T) { //nolint:gocognit
}
if expectSuccess {
// Test for success.
if !assert.HTTPBodyContains(
t,
@ -164,9 +164,7 @@ func TestPermissions(t *testing.T) { //nolint:gocognit
handlerPerm, handlerPerm,
)
}
} else {
// Test for error.
if !assert.HTTPError(t,
testHandler.ServeHTTP,
@ -181,7 +179,6 @@ func TestPermissions(t *testing.T) { //nolint:gocognit
handlerPerm, handlerPerm,
)
}
}
}
}
@ -189,6 +186,8 @@ func TestPermissions(t *testing.T) { //nolint:gocognit
}
func TestPermissionDefinitions(t *testing.T) {
t.Parallel()
if NotSupported != 0 {
t.Fatalf("NotSupported must be zero, was %v", NotSupported)
}

View file

@ -5,9 +5,9 @@ import (
"sync"
"time"
"github.com/safing/portbase/log"
"github.com/tevino/abool"
"github.com/safing/portbase/log"
)
const (

View file

@ -25,6 +25,4 @@ const (
apiSeperator = "|"
)
var (
apiSeperatorBytes = []byte(apiSeperator)
)
var apiSeperatorBytes = []byte(apiSeperator)

View file

@ -4,15 +4,14 @@ import (
"bytes"
"errors"
"github.com/tevino/abool"
"github.com/safing/portbase/container"
"github.com/safing/portbase/formats/dsd"
"github.com/tevino/abool"
)
// Client errors.
var (
ErrMalformedMessage = errors.New("malformed message")
)
// ErrMalformedMessage is returned when a malformed message was encountered.
var ErrMalformedMessage = errors.New("malformed message")
// Message is an API message.
type Message struct {

View file

@ -4,10 +4,10 @@ import (
"fmt"
"sync"
"github.com/safing/portbase/log"
"github.com/gorilla/websocket"
"github.com/tevino/abool"
"github.com/gorilla/websocket"
"github.com/safing/portbase/log"
)
type wsState struct {
@ -41,7 +41,7 @@ func (c *Client) wsConnect() error {
case <-c.shutdownSignal:
state.Error("")
}
state.wsConn.Close()
_ = state.wsConn.Close()
state.wg.Wait()
return nil

View file

@ -64,7 +64,8 @@ func registerConfig() error {
err = config.Register(&config.Option{
Name: "API Keys",
Key: CfgAPIKeys,
Description: "Define API keys for priviledged access to the API. Every entry is a separate API key with respective permissions. Format is `<key>?read=<perm>&write=<perm>`. Permissions are `anyone`, `user` and `admin`, and may be omitted.",
Description: "Define API keys for privileged access to the API. Every entry is a separate API key with respective permissions. Format is `<key>?read=<perm>&write=<perm>`. Permissions are `anyone`, `user` and `admin`, and may be omitted.",
Sensitive: true,
OptType: config.OptTypeStringArray,
ExpertiseLevel: config.ExpertiseLevelDeveloper,
ReleaseLevel: config.ReleaseLevelStable,

View file

@ -8,19 +8,18 @@ import (
"net/http"
"sync"
"github.com/tidwall/sjson"
"github.com/safing/portbase/database/iterator"
"github.com/safing/portbase/formats/varint"
"github.com/gorilla/websocket"
"github.com/tevino/abool"
"github.com/tidwall/gjson"
"github.com/tidwall/sjson"
"github.com/safing/portbase/container"
"github.com/safing/portbase/database"
"github.com/safing/portbase/database/iterator"
"github.com/safing/portbase/database/query"
"github.com/safing/portbase/database/record"
"github.com/safing/portbase/formats/dsd"
"github.com/safing/portbase/formats/varint"
"github.com/safing/portbase/log"
)
@ -45,7 +44,7 @@ var (
func init() {
RegisterHandler("/api/database/v1", WrapInAuthHandler(
startDatabaseAPI,
startDatabaseWebsocketAPI,
// Default to admin read/write permissions until the database gets support
// for api permissions.
dbCompatibilityPermission,
@ -53,11 +52,8 @@ func init() {
))
}
// DatabaseAPI is a database API instance.
// DatabaseAPI is a generic database API interface.
type DatabaseAPI struct {
conn *websocket.Conn
sendQueue chan []byte
queriesLock sync.Mutex
queries map[string]*iterator.Iterator
@ -67,14 +63,35 @@ type DatabaseAPI struct {
shutdownSignal chan struct{}
shuttingDown *abool.AtomicBool
db *database.Interface
sendBytes func(data []byte)
}
// DatabaseWebsocketAPI is a database websocket API interface.
type DatabaseWebsocketAPI struct {
DatabaseAPI
sendQueue chan []byte
conn *websocket.Conn
}
func allowAnyOrigin(r *http.Request) bool {
return true
}
func startDatabaseAPI(w http.ResponseWriter, r *http.Request) {
// CreateDatabaseAPI creates a new database interface.
func CreateDatabaseAPI(sendFunction func(data []byte)) DatabaseAPI {
return DatabaseAPI{
queries: make(map[string]*iterator.Iterator),
subs: make(map[string]*database.Subscription),
shutdownSignal: make(chan struct{}),
shuttingDown: abool.NewBool(false),
db: database.NewInterface(nil),
sendBytes: sendFunction,
}
}
func startDatabaseWebsocketAPI(w http.ResponseWriter, r *http.Request) {
upgrader := websocket.Upgrader{
CheckOrigin: allowAnyOrigin,
ReadBufferSize: 1024,
@ -84,28 +101,104 @@ func startDatabaseAPI(w http.ResponseWriter, r *http.Request) {
if err != nil {
errMsg := fmt.Sprintf("could not upgrade: %s", err)
log.Error(errMsg)
http.Error(w, errMsg, 400)
http.Error(w, errMsg, http.StatusBadRequest)
return
}
new := &DatabaseAPI{
conn: wsConn,
sendQueue: make(chan []byte, 100),
queries: make(map[string]*iterator.Iterator),
subs: make(map[string]*database.Subscription),
shutdownSignal: make(chan struct{}),
shuttingDown: abool.NewBool(false),
db: database.NewInterface(nil),
newDBAPI := &DatabaseWebsocketAPI{
DatabaseAPI: DatabaseAPI{
queries: make(map[string]*iterator.Iterator),
subs: make(map[string]*database.Subscription),
shutdownSignal: make(chan struct{}),
shuttingDown: abool.NewBool(false),
db: database.NewInterface(nil),
},
sendQueue: make(chan []byte, 100),
conn: wsConn,
}
module.StartWorker("database api handler", new.handler)
module.StartWorker("database api writer", new.writer)
newDBAPI.sendBytes = func(data []byte) {
newDBAPI.sendQueue <- data
}
module.StartWorker("database api handler", newDBAPI.handler)
module.StartWorker("database api writer", newDBAPI.writer)
log.Tracer(r.Context()).Infof("api request: init websocket %s %s", r.RemoteAddr, r.RequestURI)
}
func (api *DatabaseAPI) handler(context.Context) error {
func (api *DatabaseWebsocketAPI) handler(context.Context) error {
defer func() {
_ = api.shutdown(nil)
}()
for {
_, msg, err := api.conn.ReadMessage()
if err != nil {
return api.shutdown(err)
}
api.Handle(msg)
}
}
func (api *DatabaseWebsocketAPI) writer(ctx context.Context) error {
defer func() {
_ = api.shutdown(nil)
}()
var data []byte
var err error
for {
select {
// prioritize direct writes
case data = <-api.sendQueue:
if len(data) == 0 {
return nil
}
case <-ctx.Done():
return nil
case <-api.shutdownSignal:
return nil
}
// log.Tracef("api: sending %s", string(*msg))
err = api.conn.WriteMessage(websocket.BinaryMessage, data)
if err != nil {
return api.shutdown(err)
}
}
}
func (api *DatabaseWebsocketAPI) shutdown(err error) error {
// Check if we are the first to shut down.
if !api.shuttingDown.SetToIf(false, true) {
return nil
}
// Check the given error.
if err != nil {
if websocket.IsCloseError(err,
websocket.CloseNormalClosure,
websocket.CloseGoingAway,
websocket.CloseAbnormalClosure,
) {
log.Infof("api: websocket connection to %s closed", api.conn.RemoteAddr())
} else {
log.Warningf("api: websocket connection error with %s: %s", api.conn.RemoteAddr(), err)
}
}
// Trigger shutdown.
close(api.shutdownSignal)
_ = api.conn.Close()
return nil
}
// Handle handles a message for the database API.
func (api *DatabaseAPI) Handle(msg []byte) {
// 123|get|<key>
// 123|ok|<key>|<data>
// 123|error|<message>
@ -144,120 +237,62 @@ func (api *DatabaseAPI) handler(context.Context) error {
// 131|success
// 131|error|<message>
for {
parts := bytes.SplitN(msg, []byte("|"), 3)
_, msg, err := api.conn.ReadMessage()
if err != nil {
return api.shutdown(err)
}
// Handle special command "cancel"
if len(parts) == 2 && string(parts[1]) == "cancel" {
// 124|cancel
// 125|cancel
// 127|cancel
go api.handleCancel(parts[0])
return
}
parts := bytes.SplitN(msg, []byte("|"), 3)
if len(parts) != 3 {
api.send(nil, dbMsgTypeError, "bad request: malformed message", nil)
return
}
// Handle special command "cancel"
if len(parts) == 2 && string(parts[1]) == "cancel" {
// 124|cancel
// 125|cancel
// 127|cancel
go api.handleCancel(parts[0])
continue
}
if len(parts) != 3 {
switch string(parts[1]) {
case "get":
// 123|get|<key>
go api.handleGet(parts[0], string(parts[2]))
case "query":
// 124|query|<query>
go api.handleQuery(parts[0], string(parts[2]))
case "sub":
// 125|sub|<query>
go api.handleSub(parts[0], string(parts[2]))
case "qsub":
// 127|qsub|<query>
go api.handleQsub(parts[0], string(parts[2]))
case "create", "update", "insert":
// split key and payload
dataParts := bytes.SplitN(parts[2], []byte("|"), 2)
if len(dataParts) != 2 {
api.send(nil, dbMsgTypeError, "bad request: malformed message", nil)
continue
return
}
switch string(parts[1]) {
case "get":
// 123|get|<key>
go api.handleGet(parts[0], string(parts[2]))
case "query":
// 124|query|<query>
go api.handleQuery(parts[0], string(parts[2]))
case "sub":
// 125|sub|<query>
go api.handleSub(parts[0], string(parts[2]))
case "qsub":
// 127|qsub|<query>
go api.handleQsub(parts[0], string(parts[2]))
case "create", "update", "insert":
// split key and payload
dataParts := bytes.SplitN(parts[2], []byte("|"), 2)
if len(dataParts) != 2 {
api.send(nil, dbMsgTypeError, "bad request: malformed message", nil)
continue
}
switch string(parts[1]) {
case "create":
// 128|create|<key>|<data>
go api.handlePut(parts[0], string(dataParts[0]), dataParts[1], true)
case "update":
// 129|update|<key>|<data>
go api.handlePut(parts[0], string(dataParts[0]), dataParts[1], false)
case "insert":
// 130|insert|<key>|<data>
go api.handleInsert(parts[0], string(dataParts[0]), dataParts[1])
}
case "delete":
// 131|delete|<key>
go api.handleDelete(parts[0], string(parts[2]))
default:
api.send(parts[0], dbMsgTypeError, "bad request: unknown method", nil)
case "create":
// 128|create|<key>|<data>
go api.handlePut(parts[0], string(dataParts[0]), dataParts[1], true)
case "update":
// 129|update|<key>|<data>
go api.handlePut(parts[0], string(dataParts[0]), dataParts[1], false)
case "insert":
// 130|insert|<key>|<data>
go api.handleInsert(parts[0], string(dataParts[0]), dataParts[1])
}
case "delete":
// 131|delete|<key>
go api.handleDelete(parts[0], string(parts[2]))
default:
api.send(parts[0], dbMsgTypeError, "bad request: unknown method", nil)
}
}
func (api *DatabaseAPI) writer(ctx context.Context) error {
var data []byte
var err error
for {
select {
// prioritize direct writes
case data = <-api.sendQueue:
if len(data) == 0 {
return api.shutdown(nil)
}
case <-ctx.Done():
return api.shutdown(nil)
case <-api.shutdownSignal:
return api.shutdown(nil)
}
// log.Tracef("api: sending %s", string(*msg))
err = api.conn.WriteMessage(websocket.BinaryMessage, data)
if err != nil {
return api.shutdown(err)
}
}
}
func (api *DatabaseAPI) shutdown(err error) error {
// Check if we are the first to shut down.
if !api.shuttingDown.SetToIf(false, true) {
return nil
}
// Check the given error.
if err != nil {
if websocket.IsCloseError(err,
websocket.CloseNormalClosure,
websocket.CloseGoingAway,
websocket.CloseAbnormalClosure,
) {
log.Infof("api: websocket connection to %s closed", api.conn.RemoteAddr())
} else {
log.Warningf("api: websocket connection error with %s: %s", api.conn.RemoteAddr(), err)
}
}
// Trigger shutdown.
close(api.shutdownSignal)
api.conn.Close()
return nil
}
func (api *DatabaseAPI) send(opID []byte, msgType string, msgOrKey string, data []byte) {
c := container.New(opID)
c.Append(dbAPISeperatorBytes)
@ -273,7 +308,7 @@ func (api *DatabaseAPI) send(opID []byte, msgType string, msgOrKey string, data
c.Append(data)
}
api.sendQueue <- c.CompileData()
api.sendBytes(c.CompileData())
}
func (api *DatabaseAPI) handleGet(opID []byte, key string) {
@ -285,7 +320,7 @@ func (api *DatabaseAPI) handleGet(opID []byte, key string) {
r, err := api.db.Get(key)
if err == nil {
data, err = marshalRecord(r, true)
data, err = MarshalRecord(r, true)
}
if err != nil {
api.send(opID, dbMsgTypeError, err.Error(), nil)
@ -338,14 +373,15 @@ func (api *DatabaseAPI) processQuery(opID []byte, q *query.Query) (ok bool) {
case <-api.shutdownSignal:
// cancel query and return
it.Cancel()
return
return false
case r := <-it.Next:
// process query feed
if r != nil {
// process record
data, err := marshalRecord(r, true)
data, err := MarshalRecord(r, true)
if err != nil {
api.send(opID, dbMsgTypeWarning, err.Error(), nil)
continue
}
api.send(opID, dbMsgTypeOk, r.Key(), data)
} else {
@ -361,7 +397,7 @@ func (api *DatabaseAPI) processQuery(opID []byte, q *query.Query) (ok bool) {
}
}
// func (api *DatabaseAPI) runQuery()
// func (api *DatabaseWebsocketAPI) runQuery()
func (api *DatabaseAPI) handleSub(opID []byte, queryText string) {
// 125|sub|<query>
@ -419,7 +455,7 @@ func (api *DatabaseAPI) processSub(opID []byte, sub *database.Subscription) {
// process sub feed
if r != nil {
// process record
data, err := marshalRecord(r, true)
data, err := MarshalRecord(r, true)
if err != nil {
api.send(opID, dbMsgTypeWarning, err.Error(), nil)
continue
@ -427,12 +463,12 @@ func (api *DatabaseAPI) processSub(opID []byte, sub *database.Subscription) {
// TODO: use upd, new and delete msgTypes
r.Lock()
isDeleted := r.Meta().IsDeleted()
new := r.Meta().Created == r.Meta().Modified
isNew := r.Meta().Created == r.Meta().Modified
r.Unlock()
switch {
case isDeleted:
api.send(opID, dbMsgTypeDel, r.Key(), nil)
case new:
case isNew:
api.send(opID, dbMsgTypeNew, r.Key(), data)
default:
api.send(opID, dbMsgTypeUpd, r.Key(), data)
@ -533,9 +569,9 @@ func (api *DatabaseAPI) handlePut(opID []byte, key string, data []byte, create b
}
// TODO - staged for deletion: remove transition code
// if data[0] != record.JSON {
// if data[0] != dsd.JSON {
// typedData := make([]byte, len(data)+1)
// typedData[0] = record.JSON
// typedData[0] = dsd.JSON
// copy(typedData[1:], data)
// data = typedData
// }
@ -623,20 +659,20 @@ func (api *DatabaseAPI) handleDelete(opID []byte, key string) {
api.send(opID, dbMsgTypeSuccess, emptyString, nil)
}
// marsharlRecords locks and marshals the given record, additionally adding
// MarshalRecord locks and marshals the given record, additionally adding
// metadata and returning it as json.
func marshalRecord(r record.Record, withDSDIdentifier bool) ([]byte, error) {
func MarshalRecord(r record.Record, withDSDIdentifier bool) ([]byte, error) {
r.Lock()
defer r.Unlock()
// Pour record into JSON.
jsonData, err := r.Marshal(r, record.JSON)
jsonData, err := r.Marshal(r, dsd.JSON)
if err != nil {
return nil, err
}
// Remove JSON identifier for manual editing.
jsonData = bytes.TrimPrefix(jsonData, varint.Pack8(record.JSON))
jsonData = bytes.TrimPrefix(jsonData, varint.Pack8(dsd.JSON))
// Add metadata.
jsonData, err = sjson.SetBytes(jsonData, "_meta", r.Meta())
@ -652,7 +688,7 @@ func marshalRecord(r record.Record, withDSDIdentifier bool) ([]byte, error) {
// Add JSON identifier again.
if withDSDIdentifier {
formatID := varint.Pack8(record.JSON)
formatID := varint.Pack8(dsd.JSON)
finalData := make([]byte, 0, len(formatID)+len(jsonData))
finalData = append(finalData, formatID...)
finalData = append(finalData, jsonData...)

View file

@ -1,10 +1,10 @@
package api
import (
"encoding/json"
"bytes"
"errors"
"fmt"
"io/ioutil"
"io"
"net/http"
"sort"
"strconv"
@ -14,6 +14,7 @@ import (
"github.com/gorilla/mux"
"github.com/safing/portbase/database/record"
"github.com/safing/portbase/formats/dsd"
"github.com/safing/portbase/log"
"github.com/safing/portbase/modules"
)
@ -21,12 +22,49 @@ import (
// Endpoint describes an API Endpoint.
// Path and at least one permission are required.
// As is exactly one function.
type Endpoint struct {
Path string
MimeType string
Read Permission
Write Permission
BelongsTo *modules.Module
type Endpoint struct { //nolint:maligned
// Name is the human reabable name of the endpoint.
Name string
// Description is the human readable description and documentation of the endpoint.
Description string
// Parameters is the parameter documentation.
Parameters []Parameter `json:",omitempty"`
// Path describes the URL path of the endpoint.
Path string
// MimeType defines the content type of the returned data.
MimeType string
// Read defines the required read permission.
Read Permission `json:",omitempty"`
// ReadMethod sets the required read method for the endpoint.
// Available methods are:
// GET: Returns data only, no action is taken, nothing is changed.
// If omitted, defaults to GET.
//
// This field is currently being introduced and will only warn and not deny
// access if the write method does not match.
ReadMethod string `json:",omitempty"`
// Write defines the required write permission.
Write Permission `json:",omitempty"`
// WriteMethod sets the required write method for the endpoint.
// Available methods are:
// POST: Create a new resource; Change a status; Execute a function
// PUT: Update an existing resource
// DELETE: Remove an existing resource
// If omitted, defaults to POST.
//
// This field is currently being introduced and will only warn and not deny
// access if the write method does not match.
WriteMethod string `json:",omitempty"`
// BelongsTo defines which module this endpoint belongs to.
// The endpoint will not be accessible if the module is not online.
BelongsTo *modules.Module `json:"-"`
// ActionFunc is for simple actions with a return message for the user.
ActionFunc ActionFunc `json:"-"`
@ -43,12 +81,6 @@ type Endpoint struct {
// HandlerFunc is the raw http handler.
HandlerFunc http.HandlerFunc `json:"-"`
// Documentation Metadata.
Name string
Description string
Parameters []Parameter
}
// Parameter describes a parameterized variation of an endpoint.
@ -59,6 +91,41 @@ type Parameter struct {
Description string
}
// HTTPStatusProvider is an interface for errors to provide a custom HTTP
// status code.
type HTTPStatusProvider interface {
HTTPStatus() int
}
// HTTPStatusError represents an error with an HTTP status code.
type HTTPStatusError struct {
err error
code int
}
// Error returns the error message.
func (e *HTTPStatusError) Error() string {
return e.err.Error()
}
// Unwrap return the wrapped error.
func (e *HTTPStatusError) Unwrap() error {
return e.err
}
// HTTPStatus returns the HTTP status code this error.
func (e *HTTPStatusError) HTTPStatus() int {
return e.code
}
// ErrorWithStatus adds the HTTP status code to the error.
func ErrorWithStatus(err error, code int) error {
return &HTTPStatusError{
err: err,
code: code,
}
}
type (
// ActionFunc is for simple actions with a return message for the user.
ActionFunc func(ar *Request) (msg string, err error)
@ -142,7 +209,7 @@ func getAPIContext(r *http.Request) (apiEndpoint *Endpoint, apiRequest *Request)
// does not pass the sanity checks.
func RegisterEndpoint(e Endpoint) error {
if err := e.check(); err != nil {
return fmt.Errorf("%w: %s", ErrInvalidEndpoint, err)
return fmt.Errorf("%w: %w", ErrInvalidEndpoint, err)
}
endpointsLock.Lock()
@ -158,6 +225,18 @@ func RegisterEndpoint(e Endpoint) error {
return nil
}
// GetEndpointByPath returns the endpoint registered with the given path.
func GetEndpointByPath(path string) (*Endpoint, error) {
endpointsLock.Lock()
defer endpointsLock.Unlock()
endpoint, ok := endpoints[path]
if !ok {
return nil, fmt.Errorf("no registered endpoint on path: %q", path)
}
return endpoint, nil
}
func (e *Endpoint) check() error {
// Check path.
if strings.TrimSpace(e.Path) == "" {
@ -172,6 +251,36 @@ func (e *Endpoint) check() error {
return errors.New("invalid write permission")
}
// Check methods.
if e.Read != NotSupported {
switch e.ReadMethod {
case http.MethodGet:
// All good.
case "":
// Set to default.
e.ReadMethod = http.MethodGet
default:
return errors.New("invalid read method")
}
} else {
e.ReadMethod = ""
}
if e.Write != NotSupported {
switch e.WriteMethod {
case http.MethodPost,
http.MethodPut,
http.MethodDelete:
// All good.
case "":
// Set to default.
e.WriteMethod = http.MethodPost
default:
return errors.New("invalid write method")
}
} else {
e.WriteMethod = ""
}
// Check functions.
var defaultMimeType string
fnCnt := 0
@ -272,14 +381,45 @@ func (e *Endpoint) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// Wait for the owning module to be ready.
if !moduleIsReady(e.BelongsTo) {
http.Error(w, "The API endpoint is not ready yet or the its module is not enabled. Please try again later.", http.StatusServiceUnavailable)
http.Error(w, "The API endpoint is not ready yet or the its module is not enabled. Reload (F5) to try again.", http.StatusServiceUnavailable)
return
}
switch r.Method {
case http.MethodHead:
w.WriteHeader(http.StatusOK)
// Return OPTIONS request before starting to handle normal requests.
if r.Method == http.MethodOptions {
w.WriteHeader(http.StatusNoContent)
return
}
eMethod, readMethod, ok := getEffectiveMethod(r)
if !ok {
http.Error(w, "unsupported method for the actions API", http.StatusMethodNotAllowed)
return
}
if readMethod {
if eMethod != e.ReadMethod {
log.Tracer(r.Context()).Warningf(
"api: method %q does not match required read method %q%s",
r.Method,
e.ReadMethod,
" - this will be an error and abort the request in the future",
)
}
} else {
if eMethod != e.WriteMethod {
log.Tracer(r.Context()).Warningf(
"api: method %q does not match required write method %q%s",
r.Method,
e.WriteMethod,
" - this will be an error and abort the request in the future",
)
}
}
switch eMethod {
case http.MethodGet, http.MethodDelete:
// Nothing to do for these.
case http.MethodPost, http.MethodPut:
// Read body data.
inputData, ok := readBody(w, r)
@ -287,16 +427,18 @@ func (e *Endpoint) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return
}
apiRequest.InputData = inputData
case http.MethodGet:
// Nothing special to do here.
case http.MethodOptions:
w.WriteHeader(http.StatusNoContent)
return
// restore request body for any http.HandlerFunc below
r.Body = io.NopCloser(bytes.NewReader(inputData))
default:
// Defensive.
http.Error(w, "unsupported method for the actions API", http.StatusMethodNotAllowed)
return
}
// Add response headers to request struct so that the endpoint can work with them.
apiRequest.ResponseHeader = w.Header()
// Execute action function and get response data
var responseData []byte
var err error
@ -305,6 +447,9 @@ func (e *Endpoint) ServeHTTP(w http.ResponseWriter, r *http.Request) {
case e.ActionFunc != nil:
var msg string
msg, err = e.ActionFunc(apiRequest)
if !strings.HasSuffix(msg, "\n") {
msg += "\n"
}
if err == nil {
responseData = []byte(msg)
}
@ -316,14 +461,18 @@ func (e *Endpoint) ServeHTTP(w http.ResponseWriter, r *http.Request) {
var v interface{}
v, err = e.StructFunc(apiRequest)
if err == nil && v != nil {
responseData, err = json.Marshal(v)
var mimeType string
responseData, mimeType, _, err = dsd.MimeDump(v, r.Header.Get("Accept"))
if err == nil {
w.Header().Set("Content-Type", mimeType)
}
}
case e.RecordFunc != nil:
var rec record.Record
rec, err = e.RecordFunc(apiRequest)
if err == nil && r != nil {
responseData, err = marshalRecord(rec, false)
responseData, err = MarshalRecord(rec, false)
}
case e.HandlerFunc != nil:
@ -337,12 +486,27 @@ func (e *Endpoint) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// Check for handler error.
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
var statusProvider HTTPStatusProvider
if errors.As(err, &statusProvider) {
http.Error(w, err.Error(), statusProvider.HTTPStatus())
} else {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
return
}
// Return no content if there is none, or if request is HEAD.
if len(responseData) == 0 || r.Method == http.MethodHead {
w.WriteHeader(http.StatusNoContent)
return
}
// Set content type if not yet set.
if w.Header().Get("Content-Type") == "" {
w.Header().Set("Content-Type", e.MimeType+"; charset=utf-8")
}
// Write response.
w.Header().Set("Content-Type", e.MimeType+"; charset=utf-8")
w.Header().Set("Content-Length", strconv.Itoa(len(responseData)))
w.WriteHeader(http.StatusOK)
_, err = w.Write(responseData)
@ -359,7 +523,7 @@ func readBody(w http.ResponseWriter, r *http.Request) (inputData []byte, ok bool
}
// Read and close body.
inputData, err := ioutil.ReadAll(r.Body)
inputData, err := io.ReadAll(r.Body)
if err != nil {
http.Error(w, "failed to read body"+err.Error(), http.StatusInternalServerError)
return nil, false

View file

@ -2,11 +2,17 @@ package api
import (
"bytes"
"context"
"errors"
"fmt"
"net/http"
"os"
"runtime/pprof"
"strings"
"time"
"github.com/safing/portbase/info"
"github.com/safing/portbase/modules"
"github.com/safing/portbase/utils/debug"
)
@ -21,6 +27,16 @@ func registerDebugEndpoints() error {
return err
}
if err := RegisterEndpoint(Endpoint{
Path: "ready",
Read: PermitAnyone,
ActionFunc: ready,
Name: "Ready",
Description: "Check if Portmaster has completed starting and is ready.",
}); err != nil {
return err
}
if err := RegisterEndpoint(Endpoint{
Path: "debug/stack",
Read: PermitAnyone,
@ -41,6 +57,58 @@ func registerDebugEndpoints() error {
return err
}
if err := RegisterEndpoint(Endpoint{
Path: "debug/cpu",
MimeType: "application/octet-stream",
Read: PermitAnyone,
DataFunc: handleCPUProfile,
Name: "Get CPU Profile",
Description: strings.ReplaceAll(`Gather and return the CPU profile.
This data needs to gathered over a period of time, which is specified using the duration parameter.
You can easily view this data in your browser with this command (with Go installed):
"go tool pprof -http :8888 http://127.0.0.1:817/api/v1/debug/cpu"
`, `"`, "`"),
Parameters: []Parameter{{
Method: http.MethodGet,
Field: "duration",
Value: "10s",
Description: "Specify the formatting style. The default is simple markdown formatting.",
}},
}); err != nil {
return err
}
if err := RegisterEndpoint(Endpoint{
Path: "debug/heap",
MimeType: "application/octet-stream",
Read: PermitAnyone,
DataFunc: handleHeapProfile,
Name: "Get Heap Profile",
Description: strings.ReplaceAll(`Gather and return the heap memory profile.
You can easily view this data in your browser with this command (with Go installed):
"go tool pprof -http :8888 http://127.0.0.1:817/api/v1/debug/heap"
`, `"`, "`"),
}); err != nil {
return err
}
if err := RegisterEndpoint(Endpoint{
Path: "debug/allocs",
MimeType: "application/octet-stream",
Read: PermitAnyone,
DataFunc: handleAllocsProfile,
Name: "Get Allocs Profile",
Description: strings.ReplaceAll(`Gather and return the memory allocation profile.
You can easily view this data in your browser with this command (with Go installed):
"go tool pprof -http :8888 http://127.0.0.1:817/api/v1/debug/allocs"
`, `"`, "`"),
}); err != nil {
return err
}
if err := RegisterEndpoint(Endpoint{
Path: "debug/info",
Read: PermitAnyone,
@ -62,9 +130,22 @@ func registerDebugEndpoints() error {
// ping responds with pong.
func ping(ar *Request) (msg string, err error) {
// TODO: Remove upgrade to "ready" when all UI components have transitioned.
if modules.IsStarting() || modules.IsShuttingDown() {
return "", ErrorWithStatus(errors.New("portmaster is not ready, reload (F5) to try again"), http.StatusTooEarly)
}
return "Pong.", nil
}
// ready checks if Portmaster has completed starting.
func ready(ar *Request) (msg string, err error) {
if modules.IsStarting() || modules.IsShuttingDown() {
return "", ErrorWithStatus(errors.New("portmaster is not ready, reload (F5) to try again"), http.StatusTooEarly)
}
return "Portmaster is ready.", nil
}
// getStack returns the current goroutine stack.
func getStack(_ *Request) (data []byte, err error) {
buf := &bytes.Buffer{}
@ -90,6 +171,73 @@ func printStack(_ *Request) (msg string, err error) {
return "stack printed to stdout", nil
}
// handleCPUProfile returns the CPU profile.
func handleCPUProfile(ar *Request) (data []byte, err error) {
// Parse duration.
duration := 10 * time.Second
if durationOption := ar.Request.URL.Query().Get("duration"); durationOption != "" {
parsedDuration, err := time.ParseDuration(durationOption)
if err != nil {
return nil, fmt.Errorf("failed to parse duration: %w", err)
}
duration = parsedDuration
}
// Indicate download and filename.
ar.ResponseHeader.Set(
"Content-Disposition",
fmt.Sprintf(`attachment; filename="portmaster-cpu-profile_v%s.pprof"`, info.Version()),
)
// Start CPU profiling.
buf := new(bytes.Buffer)
if err := pprof.StartCPUProfile(buf); err != nil {
return nil, fmt.Errorf("failed to start cpu profile: %w", err)
}
// Wait for the specified duration.
select {
case <-time.After(duration):
case <-ar.Context().Done():
pprof.StopCPUProfile()
return nil, context.Canceled
}
// Stop CPU profiling and return data.
pprof.StopCPUProfile()
return buf.Bytes(), nil
}
// handleHeapProfile returns the Heap profile.
func handleHeapProfile(ar *Request) (data []byte, err error) {
// Indicate download and filename.
ar.ResponseHeader.Set(
"Content-Disposition",
fmt.Sprintf(`attachment; filename="portmaster-memory-heap-profile_v%s.pprof"`, info.Version()),
)
buf := new(bytes.Buffer)
if err := pprof.Lookup("heap").WriteTo(buf, 0); err != nil {
return nil, fmt.Errorf("failed to write heap profile: %w", err)
}
return buf.Bytes(), nil
}
// handleAllocsProfile returns the Allocs profile.
func handleAllocsProfile(ar *Request) (data []byte, err error) {
// Indicate download and filename.
ar.ResponseHeader.Set(
"Content-Disposition",
fmt.Sprintf(`attachment; filename="portmaster-memory-allocs-profile_v%s.pprof"`, info.Version()),
)
buf := new(bytes.Buffer)
if err := pprof.Lookup("allocs").WriteTo(buf, 0); err != nil {
return nil, fmt.Errorf("failed to write allocs profile: %w", err)
}
return buf.Bytes(), nil
}
// debugInfo returns the debugging information for support requests.
func debugInfo(ar *Request) (data []byte, err error) {
// Create debug information helper.

View file

@ -51,7 +51,6 @@ func registerMetaEndpoints() error {
if err := RegisterEndpoint(Endpoint{
Path: "auth/reset",
Read: PermitAnyone,
Write: PermitAnyone,
HandlerFunc: authReset,
Name: "Reset Authenticated Session",
Description: "Resets authentication status internally and in the browser.",
@ -94,7 +93,10 @@ func authBearer(w http.ResponseWriter, r *http.Request) {
}
// Respond with desired authentication header.
w.Header().Set("WWW-Authenticate", "Bearer realm=Portmaster API")
w.Header().Set(
"WWW-Authenticate",
`Bearer realm="Portmaster API" domain="/"`,
)
http.Error(w, "Authorization required.", http.StatusUnauthorized)
}
@ -107,7 +109,10 @@ func authBasic(w http.ResponseWriter, r *http.Request) {
}
// Respond with desired authentication header.
w.Header().Set("WWW-Authenticate", "Basic realm=Portmaster API")
w.Header().Set(
"WWW-Authenticate",
`Basic realm="Portmaster API" domain="/"`,
)
http.Error(w, "Authorization required.", http.StatusUnauthorized)
}
@ -128,7 +133,7 @@ func authReset(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Clear-Site-Data", "*")
// Set HTTP Auth Realm without requesting authorization.
w.Header().Set("WWW-Authenticate", "None realm=Portmaster API")
w.Header().Set("WWW-Authenticate", `None realm="Portmaster API"`)
// Reply with 401 Unauthorized in order to clear HTTP Basic Auth data.
http.Error(w, "Session deleted.", http.StatusUnauthorized)

View file

@ -3,15 +3,27 @@ package api
import (
"errors"
"fmt"
"github.com/safing/portbase/modules"
)
func registerModulesEndpoints() error {
if err := RegisterEndpoint(Endpoint{
Path: "modules/status",
Read: PermitUser,
StructFunc: getStatusfunc,
Name: "Get Module Status",
Description: "Returns status information of all modules.",
}); err != nil {
return err
}
if err := RegisterEndpoint(Endpoint{
Path: "modules/{moduleName:.+}/trigger/{eventName:.+}",
Write: PermitSelf,
ActionFunc: triggerEvent,
Name: "Export Configuration Options",
Description: "Returns a list of all registered configuration options and their metadata. This does not include the current active or default settings.",
Name: "Trigger Event",
Description: "Triggers an event of an internal module.",
}); err != nil {
return err
}
@ -19,6 +31,14 @@ func registerModulesEndpoints() error {
return nil
}
func getStatusfunc(ar *Request) (i interface{}, err error) {
status := modules.GetStatus()
if status == nil {
return nil, errors.New("modules not yet initialized")
}
return status, nil
}
func triggerEvent(ar *Request) (msg string, err error) {
// Get parameters.
moduleName := ar.URLVars["moduleName"]

View file

@ -5,8 +5,9 @@ import (
"sync"
"testing"
"github.com/safing/portbase/database/record"
"github.com/stretchr/testify/assert"
"github.com/safing/portbase/database/record"
)
const (
@ -21,6 +22,8 @@ type actionTestRecord struct {
}
func TestEndpoints(t *testing.T) {
t.Parallel()
testHandler := &mainHandler{
mux: mainMux,
}
@ -113,6 +116,8 @@ func TestEndpoints(t *testing.T) {
}
func TestActionRegistration(t *testing.T) {
t.Parallel()
assert.Error(t, RegisterEndpoint(Endpoint{}))
assert.Error(t, RegisterEndpoint(Endpoint{

View file

@ -1,7 +1,6 @@
package api
import (
"context"
"encoding/json"
"errors"
"flag"
@ -58,7 +57,7 @@ func prep() error {
}
func start() error {
go Serve()
startServer()
_ = updateAPIKeys(module.Ctx, nil)
err := module.RegisterEventHook("config", "config change", "update API keys", updateAPIKeys)
@ -75,10 +74,7 @@ func start() error {
}
func stop() error {
if server != nil {
return server.Shutdown(context.Background())
}
return nil
return stopServer()
}
func exportEndpointsCmd() error {

View file

@ -2,15 +2,13 @@ package api
import (
"fmt"
"io/ioutil"
"os"
"testing"
"github.com/safing/portbase/dataroot"
"github.com/safing/portbase/modules"
// API depends on the database for the database api.
_ "github.com/safing/portbase/database/dbmodule"
"github.com/safing/portbase/dataroot"
"github.com/safing/portbase/modules"
)
func init() {
@ -22,13 +20,13 @@ func TestMain(m *testing.M) {
module.Enable()
// tmp dir for data root (db & config)
tmpDir, err := ioutil.TempDir("", "portbase-testing-")
tmpDir, err := os.MkdirTemp("", "portbase-testing-")
if err != nil {
fmt.Fprintf(os.Stderr, "failed to create tmp dir: %s\n", err)
os.Exit(1)
}
// initialize data dir
err = dataroot.Initialize(tmpDir, 0755)
err = dataroot.Initialize(tmpDir, 0o0755)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to initialize data root: %s\n", err)
os.Exit(1)
@ -53,6 +51,6 @@ func TestMain(m *testing.M) {
fmt.Fprintf(os.Stderr, "failed to cleanly shutdown test: %s\n", err)
}
// clean up and exit
os.RemoveAll(tmpDir)
_ = os.RemoveAll(tmpDir)
os.Exit(exitCode)
}

View file

@ -6,6 +6,7 @@ import (
"github.com/safing/portbase/modules"
)
// ModuleHandler specifies the interface for API endpoints that are bound to a module.
type ModuleHandler interface {
BelongsTo() *modules.Module
}

View file

@ -5,6 +5,7 @@ import (
"net/http"
"github.com/gorilla/mux"
"github.com/safing/portbase/log"
)
@ -25,6 +26,9 @@ type Request struct {
// AuthToken is the request-side authentication token assigned.
AuthToken *AuthToken
// ResponseHeader holds the response header.
ResponseHeader http.Header
// HandlerCache can be used by handlers to cache data between handlers within a request.
HandlerCache interface{}
}
@ -32,13 +36,12 @@ type Request struct {
// apiRequestContextKey is a key used for the context key/value storage.
type apiRequestContextKey struct{}
var (
requestContextKey = apiRequestContextKey{}
)
// RequestContextKey is the key used to add the API request to the context.
var RequestContextKey = apiRequestContextKey{}
// GetAPIRequest returns the API Request of the given http request.
func GetAPIRequest(r *http.Request) *Request {
ar, ok := r.Context().Value(requestContextKey).(*Request)
ar, ok := r.Context().Value(RequestContextKey).(*Request)
if ok {
return ar
}

View file

@ -3,8 +3,11 @@ package api
import (
"context"
"errors"
"fmt"
"net/http"
"net/url"
"path"
"runtime/debug"
"strings"
"sync"
"time"
@ -12,40 +15,74 @@ import (
"github.com/gorilla/mux"
"github.com/safing/portbase/log"
"github.com/safing/portbase/utils"
)
// EnableServer defines if the HTTP server should be started.
var EnableServer = true
var (
// gorilla mux
// mainMux is the main mux router.
mainMux = mux.NewRouter()
// main server and lock
server = &http.Server{}
// server is the main server.
server = &http.Server{
ReadHeaderTimeout: 10 * time.Second,
}
handlerLock sync.RWMutex
allowedDevCORSOrigins = []string{
"127.0.0.1",
"localhost",
}
)
// RegisterHandler registers a handler with the API endoint.
// RegisterHandler registers a handler with the API endpoint.
func RegisterHandler(path string, handler http.Handler) *mux.Route {
handlerLock.Lock()
defer handlerLock.Unlock()
return mainMux.Handle(path, handler)
}
// RegisterHandleFunc registers a handle function with the API endoint.
// RegisterHandleFunc registers a handle function with the API endpoint.
func RegisterHandleFunc(path string, handleFunc func(http.ResponseWriter, *http.Request)) *mux.Route {
handlerLock.Lock()
defer handlerLock.Unlock()
return mainMux.HandleFunc(path, handleFunc)
}
// Serve starts serving the API endpoint.
func Serve() {
// configure server
func startServer() {
// Check if server is enabled.
if !EnableServer {
return
}
// Configure server.
server.Addr = listenAddressConfig()
server.Handler = &mainHandler{
// TODO: mainMux should not be modified anymore.
mux: mainMux,
}
// Start server manager.
module.StartServiceWorker("http server manager", 0, serverManager)
}
func stopServer() error {
// Check if server is enabled.
if !EnableServer {
return nil
}
if server.Addr != "" {
return server.Shutdown(context.Background())
}
return nil
}
// Serve starts serving the API endpoint.
func serverManager(_ context.Context) error {
// start serving
log.Infof("api: starting to listen on %s", server.Addr)
backoffDuration := 10 * time.Second
@ -56,7 +93,7 @@ func Serve() {
})
// return on shutdown error
if errors.Is(err, http.ErrServerClosed) {
return
return nil
}
// log error and restart
log.Errorf("api: http endpoint failed: %s - restarting in %s", err, backoffDuration)
@ -81,7 +118,7 @@ func (mh *mainHandler) handle(w http.ResponseWriter, r *http.Request) error {
apiRequest := &Request{
Request: r,
}
ctx = context.WithValue(ctx, requestContextKey, apiRequest)
ctx = context.WithValue(ctx, RequestContextKey, apiRequest)
// Add context back to request.
r = r.WithContext(ctx)
lrw := NewLoggingResponseWriter(w, r)
@ -96,6 +133,80 @@ func (mh *mainHandler) handle(w http.ResponseWriter, r *http.Request) error {
tracer.Submit()
}()
// Add security headers.
w.Header().Set("Referrer-Policy", "same-origin")
w.Header().Set("X-Content-Type-Options", "nosniff")
w.Header().Set("X-Frame-Options", "deny")
w.Header().Set("X-XSS-Protection", "1; mode=block")
w.Header().Set("X-DNS-Prefetch-Control", "off")
// Add CSP Header in production mode.
if !devMode() {
w.Header().Set(
"Content-Security-Policy",
"default-src 'self'; "+
"connect-src https://*.safing.io 'self'; "+
"style-src 'self' 'unsafe-inline'; "+
"img-src 'self' data: blob:",
)
}
// Check Cross-Origin Requests.
origin := r.Header.Get("Origin")
isPreflighCheck := false
if origin != "" {
// Parse origin URL.
originURL, err := url.Parse(origin)
if err != nil {
tracer.Warningf("api: denied request from %s: failed to parse origin header: %s", r.RemoteAddr, err)
http.Error(lrw, "Invalid Origin.", http.StatusForbidden)
return nil
}
// Check if the Origin matches the Host.
switch {
case originURL.Host == r.Host:
// Origin (with port) matches Host.
case originURL.Hostname() == r.Host:
// Origin (without port) matches Host.
case originURL.Scheme == "chrome-extension":
// Allow access for the browser extension
// TODO(ppacher):
// This currently allows access from any browser extension.
// Can we reduce that to only our browser extension?
// Also, what do we need to support Firefox?
case devMode() &&
utils.StringInSlice(allowedDevCORSOrigins, originURL.Hostname()):
// We are in dev mode and the request is coming from the allowed
// development origins.
default:
// Origin and Host do NOT match!
tracer.Warningf("api: denied request from %s: Origin (`%s`) and Host (`%s`) do not match", r.RemoteAddr, origin, r.Host)
http.Error(lrw, "Cross-Origin Request Denied.", http.StatusForbidden)
return nil
// If the Host header has a port, and the Origin does not, requests will
// also end up here, as we cannot properly check for equality.
}
// Add Cross-Site Headers now as we need them in any case now.
w.Header().Set("Access-Control-Allow-Origin", origin)
w.Header().Set("Access-Control-Allow-Methods", "*")
w.Header().Set("Access-Control-Allow-Headers", "*")
w.Header().Set("Access-Control-Allow-Credentials", "true")
w.Header().Set("Access-Control-Expose-Headers", "*")
w.Header().Set("Access-Control-Max-Age", "60")
w.Header().Add("Vary", "Origin")
// if there's a Access-Control-Request-Method header this is a Preflight check.
// In that case, we will just check if the preflighMethod is allowed and then return
// success here
if preflighMethod := r.Header.Get("Access-Control-Request-Method"); r.Method == http.MethodOptions && preflighMethod != "" {
isPreflighCheck = true
}
}
// Clean URL.
cleanedRequestPath := cleanRequestPath(r.URL.Path)
@ -117,14 +228,41 @@ func (mh *mainHandler) handle(w http.ResponseWriter, r *http.Request) error {
apiRequest.Route = match.Route
apiRequest.URLVars = match.Vars
}
switch {
case match.MatchErr == nil:
// All good.
case errors.Is(match.MatchErr, mux.ErrMethodMismatch):
http.Error(lrw, "Method not allowed.", http.StatusMethodNotAllowed)
return nil
default:
tracer.Debug("api: no handler registered for this path")
http.Error(lrw, "Not found.", http.StatusNotFound)
return nil
}
// Be sure that URLVars always is a map.
if apiRequest.URLVars == nil {
apiRequest.URLVars = make(map[string]string)
}
// Check method.
_, readMethod, ok := getEffectiveMethod(r)
if !ok {
http.Error(lrw, "Method not allowed.", http.StatusMethodNotAllowed)
return nil
}
// At this point we know the method is allowed and there's a handler for the request.
// If this is just a CORS-Preflight, we'll accept the request with StatusOK now.
// There's no point in trying to authenticate the request because the Browser will
// not send authentication along a preflight check.
if isPreflighCheck && handler != nil {
lrw.WriteHeader(http.StatusOK)
return nil
}
// Check authentication.
apiRequest.AuthToken = authenticateRequest(lrw, r, handler)
apiRequest.AuthToken = authenticateRequest(lrw, r, handler, readMethod)
if apiRequest.AuthToken == nil {
// Authenticator already replied.
return nil
@ -133,38 +271,42 @@ func (mh *mainHandler) handle(w http.ResponseWriter, r *http.Request) error {
// Wait for the owning module to be ready.
if moduleHandler, ok := handler.(ModuleHandler); ok {
if !moduleIsReady(moduleHandler.BelongsTo()) {
http.Error(lrw, "The API endpoint is not ready yet. Please try again later.", http.StatusServiceUnavailable)
http.Error(lrw, "The API endpoint is not ready yet. Reload (F5) to try again.", http.StatusServiceUnavailable)
return nil
}
}
// Add security headers.
if !devMode() {
w.Header().Set(
"Content-Security-Policy",
"default-src 'self'; "+
"connect-src https://*.safing.io 'self'; "+
"style-src 'self' 'unsafe-inline'; "+
"img-src 'self' data:",
)
w.Header().Set("Referrer-Policy", "no-referrer")
w.Header().Set("X-Content-Type-Options", "nosniff")
w.Header().Set("X-Frame-Options", "deny")
w.Header().Set("X-XSS-Protection", "1; mode=block")
w.Header().Set("X-DNS-Prefetch-Control", "off")
} else {
w.Header().Set("Access-Control-Allow-Origin", "*")
// Check if we have a handler.
if handler == nil {
http.Error(lrw, "Not found.", http.StatusNotFound)
return nil
}
// Handle request.
switch {
case handler != nil:
handler.ServeHTTP(lrw, r)
case errors.Is(match.MatchErr, mux.ErrMethodMismatch):
http.Error(lrw, "Method not allowed.", http.StatusMethodNotAllowed)
default: // handler == nil or other error
http.Error(lrw, "Not found.", http.StatusNotFound)
}
// Format panics in handler.
defer func() {
if panicValue := recover(); panicValue != nil {
// Report failure via module system.
me := module.NewPanicError("api request", "custom", panicValue)
me.Report()
// Respond with a server error.
if devMode() {
http.Error(
lrw,
fmt.Sprintf(
"Internal Server Error: %s\n\n%s",
panicValue,
debug.Stack(),
),
http.StatusInternalServerError,
)
} else {
http.Error(lrw, "Internal Server Error.", http.StatusInternalServerError)
}
}
}()
// Handle with registered handler.
handler.ServeHTTP(lrw, r)
return nil
}

167
apprise/notify.go Normal file
View file

@ -0,0 +1,167 @@
package apprise
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"sync"
"github.com/safing/portbase/utils"
)
// Notifier sends messsages to an Apprise API.
type Notifier struct {
// URL defines the Apprise API endpoint.
URL string
// DefaultType defines the default message type.
DefaultType MsgType
// DefaultTag defines the default message tag.
DefaultTag string
// DefaultFormat defines the default message format.
DefaultFormat MsgFormat
// AllowUntagged defines if untagged messages are allowed,
// which are sent to all configured apprise endpoints.
AllowUntagged bool
client *http.Client
clientLock sync.Mutex
}
// Message represents the message to be sent to the Apprise API.
type Message struct {
// Title is an optional title to go along with the body.
Title string `json:"title,omitempty"`
// Body is the main message content. This is the only required field.
Body string `json:"body"`
// Type defines the message type you want to send as.
// The valid options are info, success, warning, and failure.
// If no type is specified then info is the default value used.
Type MsgType `json:"type,omitempty"`
// Tag is used to notify only those tagged accordingly.
// Use a comma (,) to OR your tags and a space ( ) to AND them.
Tag string `json:"tag,omitempty"`
// Format optionally identifies the text format of the data you're feeding Apprise.
// The valid options are text, markdown, html.
// The default value if nothing is specified is text.
Format MsgFormat `json:"format,omitempty"`
}
// MsgType defines the message type.
type MsgType string
// Message Types.
const (
TypeInfo MsgType = "info"
TypeSuccess MsgType = "success"
TypeWarning MsgType = "warning"
TypeFailure MsgType = "failure"
)
// MsgFormat defines the message format.
type MsgFormat string
// Message Formats.
const (
FormatText MsgFormat = "text"
FormatMarkdown MsgFormat = "markdown"
FormatHTML MsgFormat = "html"
)
type errorResponse struct {
Error string `json:"error"`
}
// Send sends a message to the Apprise API.
func (n *Notifier) Send(ctx context.Context, m *Message) error {
// Check if the message has a body.
if m.Body == "" {
return errors.New("the message must have a body")
}
// Apply notifier defaults.
n.applyDefaults(m)
// Check if the message is tagged.
if m.Tag == "" && !n.AllowUntagged {
return errors.New("the message must have a tag")
}
// Marshal the message to JSON.
payload, err := json.Marshal(m)
if err != nil {
return fmt.Errorf("failed to marshal message: %w", err)
}
// Create request.
request, err := http.NewRequestWithContext(ctx, http.MethodPost, n.URL, bytes.NewReader(payload))
if err != nil {
return fmt.Errorf("failed to create request: %w", err)
}
request.Header.Set("Content-Type", "application/json")
// Send message to API.
resp, err := n.getClient().Do(request)
if err != nil {
return fmt.Errorf("failed to send message: %w", err)
}
defer resp.Body.Close() //nolint:errcheck,gosec
switch resp.StatusCode {
case http.StatusOK, http.StatusCreated, http.StatusNoContent, http.StatusAccepted:
return nil
default:
// Try to tease body contents.
if body, err := io.ReadAll(resp.Body); err == nil && len(body) > 0 {
// Try to parse json response.
errorResponse := &errorResponse{}
if err := json.Unmarshal(body, errorResponse); err == nil && errorResponse.Error != "" {
return fmt.Errorf("failed to send message: apprise returned %q with an error message: %s", resp.Status, errorResponse.Error)
}
return fmt.Errorf("failed to send message: %s (body teaser: %s)", resp.Status, utils.SafeFirst16Bytes(body))
}
return fmt.Errorf("failed to send message: %s", resp.Status)
}
}
func (n *Notifier) applyDefaults(m *Message) {
if m.Type == "" {
m.Type = n.DefaultType
}
if m.Tag == "" {
m.Tag = n.DefaultTag
}
if m.Format == "" {
m.Format = n.DefaultFormat
}
}
// SetClient sets a custom http client for accessing the Apprise API.
func (n *Notifier) SetClient(client *http.Client) {
n.clientLock.Lock()
defer n.clientLock.Unlock()
n.client = client
}
func (n *Notifier) getClient() *http.Client {
n.clientLock.Lock()
defer n.clientLock.Unlock()
// Create client if needed.
if n.client == nil {
n.client = &http.Client{}
}
return n.client
}

View file

@ -80,7 +80,7 @@ func registerBasicOptions() error {
// Register to hook to update the log level.
if err := module.RegisterEventHook(
"config",
configChangeEvent,
ChangeEvent,
"update log level",
setLogLevel,
); err != nil {
@ -102,8 +102,8 @@ func registerBasicOptions() error {
})
}
func loadLogLevel() {
setDefaultConfigOption(CfgLogLevel, log.GetLogLevel().Name(), false)
func loadLogLevel() error {
return setDefaultConfigOption(CfgLogLevel, log.GetLogLevel().Name(), false)
}
func setLogLevel(ctx context.Context, data interface{}) error {

View file

@ -13,9 +13,7 @@ import (
"github.com/safing/portbase/log"
)
var (
dbController *database.Controller
)
var dbController *database.Controller
// StorageInterface provices a storage.Interface to the configuration manager.
type StorageInterface struct {
@ -67,6 +65,8 @@ func (s *StorageInterface) Put(r record.Record) (record.Record, error) {
value, ok = acc.GetInt("Value")
case OptTypeBool:
value, ok = acc.GetBool("Value")
case optTypeAny:
ok = false
}
if !ok {
return nil, errors.New("received invalid value in \"Value\"")

View file

@ -1,5 +1,3 @@
// Package config ... (linter fix)
//nolint:dupl
package config
import (
@ -53,17 +51,17 @@ func registerExpertiseLevelOption() {
},
PossibleValues: []PossibleValue{
{
Name: "Simple",
Name: "Simple Interface",
Value: ExpertiseLevelNameUser,
Description: "Hide complex settings and information.",
},
{
Name: "Advanced",
Name: "Advanced Interface",
Value: ExpertiseLevelNameExpert,
Description: "Show technical details.",
},
{
Name: "Developer",
Name: "Developer Interface",
Value: ExpertiseLevelNameDeveloper,
Description: "Developer mode. Please be careful!",
},

View file

@ -4,10 +4,8 @@ import "sync"
type safe struct{}
var (
// Concurrent makes concurrency safe get methods available.
Concurrent = &safe{}
)
// Concurrent makes concurrency safe get methods available.
var Concurrent = &safe{}
// GetAsString returns a function that returns the wanted string with high performance.
func (cs *safe) GetAsString(name string, fallback string) StringOption {

View file

@ -2,6 +2,7 @@ package config
import (
"encoding/json"
"fmt"
"testing"
"github.com/safing/portbase/log"
@ -13,7 +14,11 @@ func parseAndReplaceConfig(jsonData string) error {
return err
}
return replaceConfig(m)
validationErrors, _ := ReplaceConfig(m)
if len(validationErrors) > 0 {
return fmt.Errorf("%d errors, first: %w", len(validationErrors), validationErrors[0])
}
return nil
}
func parseAndReplaceDefaultConfig(jsonData string) error {
@ -22,10 +27,16 @@ func parseAndReplaceDefaultConfig(jsonData string) error {
return err
}
return replaceDefaultConfig(m)
validationErrors, _ := ReplaceDefaultConfig(m)
if len(validationErrors) > 0 {
return fmt.Errorf("%d errors, first: %w", len(validationErrors), validationErrors[0])
}
return nil
}
func quickRegister(t *testing.T, key string, optType OptionType, defaultValue interface{}) {
t.Helper()
err := Register(&Option{
Name: key,
Key: key,
@ -40,7 +51,7 @@ func quickRegister(t *testing.T, key string, optType OptionType, defaultValue in
}
}
func TestGet(t *testing.T) { //nolint:gocognit
func TestGet(t *testing.T) { //nolint:paralleltest
// reset
options = make(map[string]*Option)
@ -181,7 +192,7 @@ func TestGet(t *testing.T) { //nolint:gocognit
}
}
func TestReleaseLevel(t *testing.T) {
func TestReleaseLevel(t *testing.T) { //nolint:paralleltest
// reset
options = make(map[string]*Option)
registerReleaseLevelOption()

View file

@ -4,17 +4,20 @@ import (
"encoding/json"
"errors"
"flag"
"fmt"
"io/fs"
"os"
"path/filepath"
"sort"
"github.com/safing/portbase/dataroot"
"github.com/safing/portbase/modules"
"github.com/safing/portbase/utils"
"github.com/safing/portbase/utils/debug"
)
const (
configChangeEvent = "config change"
)
// ChangeEvent is the name of the config change event.
const ChangeEvent = "config change"
var (
module *modules.Module
@ -32,7 +35,7 @@ func SetDataRoot(root *utils.DirStructure) {
func init() {
module = modules.Register("config", prep, start, nil, "database")
module.RegisterEvent(configChangeEvent, true)
module.RegisterEvent(ChangeEvent, true)
flag.BoolVar(&exportConfig, "export-config-options", false, "export configuration registry and exit")
}
@ -54,21 +57,32 @@ func start() error {
configFilePath = filepath.Join(dataRoot.Path, "config.json")
// Load log level from log package after it started.
loadLogLevel()
err := registerAsDatabase()
if err != nil && !os.IsNotExist(err) {
err := loadLogLevel()
if err != nil {
return err
}
err = loadConfig()
if err != nil && !os.IsNotExist(err) {
err = registerAsDatabase()
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return err
}
err = loadConfig(false)
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("failed to load config file: %w", err)
}
return nil
}
func exportConfigCmd() error {
// Reset the metrics instance name option, as the default
// is set to the current hostname.
// Config key copied from metrics.CfgOptionInstanceKey.
option, err := GetOption("core/metrics/instance")
if err == nil {
option.DefaultValue = ""
}
data, err := json.MarshalIndent(ExportOptions(), "", " ")
if err != nil {
return err
@ -77,3 +91,51 @@ func exportConfigCmd() error {
_, err = os.Stdout.Write(data)
return err
}
// AddToDebugInfo adds all changed global config options to the given debug.Info.
func AddToDebugInfo(di *debug.Info) {
var lines []string
// Collect all changed settings.
_ = ForEachOption(func(opt *Option) error {
opt.Lock()
defer opt.Unlock()
if opt.ReleaseLevel <= getReleaseLevel() && opt.activeValue != nil {
if opt.Sensitive {
lines = append(lines, fmt.Sprintf("%s: [redacted]", opt.Key))
} else {
lines = append(lines, fmt.Sprintf("%s: %v", opt.Key, opt.activeValue.getData(opt)))
}
}
return nil
})
sort.Strings(lines)
// Add data as section.
di.AddSection(
fmt.Sprintf("Config: %d", len(lines)),
debug.UseCodeSection|debug.AddContentLineBreaks,
lines...,
)
}
// GetActiveConfigValues returns a map with the active config values.
func GetActiveConfigValues() map[string]interface{} {
values := make(map[string]interface{})
// Collect active values from options.
_ = ForEachOption(func(opt *Option) error {
opt.Lock()
defer opt.Unlock()
if opt.ReleaseLevel <= getReleaseLevel() && opt.activeValue != nil {
values[opt.Key] = opt.activeValue.getData(opt)
}
return nil
})
return values
}

View file

@ -3,12 +3,15 @@ package config
import (
"encoding/json"
"fmt"
"reflect"
"regexp"
"sync"
"github.com/mitchellh/copystructure"
"github.com/tidwall/sjson"
"github.com/safing/portbase/database/record"
"github.com/safing/portbase/formats/dsd"
)
// OptionType defines the value type of an option.
@ -63,6 +66,9 @@ type PossibleValue struct {
// Format: <vendor/package>:<scope>:<identifier> //.
type Annotations map[string]interface{}
// MigrationFunc is a function that migrates a config option value.
type MigrationFunc func(option *Option, value any) any
// Well known annotations defined by this package.
const (
// DisplayHintAnnotation provides a hint for the user
@ -93,6 +99,10 @@ const (
// may be extended to hold references to other options in the
// future.
StackableAnnotation = "safing/portbase:options:stackable"
// RestartPendingAnnotation is automatically set on a configuration option
// that requires a restart and has been changed.
// The value must always be a boolean with value "true".
RestartPendingAnnotation = "safing/portbase:options:restart-pending"
// QuickSettingAnnotation can be used to add quick settings to
// a configuration option. A quick setting can support the user
// by switching between pre-configured values.
@ -102,6 +112,19 @@ const (
// requirement. The type of RequiresAnnotation is []ValueRequirement
// or ValueRequirement.
RequiresAnnotation = "safing/portbase:config:requires"
// RequiresFeatureIDAnnotation can be used to mark a setting as only available
// when the user has a certain feature ID in the subscription plan.
// The type is []string or string.
RequiresFeatureIDAnnotation = "safing/portmaster:ui:config:requires-feature"
// SettablePerAppAnnotation can be used to mark a setting as settable per-app and
// is a boolean.
SettablePerAppAnnotation = "safing/portmaster:settable-per-app"
// RequiresUIReloadAnnotation can be used to inform the UI that changing the value
// of the annotated setting requires a full reload of the user interface.
// The value of this annotation does not matter as the sole presence of
// the annotation key is enough. Though, users are advised to set the value
// of this annotation to true.
RequiresUIReloadAnnotation = "safing/portmaster:ui:requires-reload"
)
// QuickSettingsAction defines the action of a quick setting.
@ -152,11 +175,14 @@ const (
// only sense together with the PossibleValues property
// of Option.
DisplayHintOneOf = "one-of"
// DisplayHintOrdered Used to mark a list option as ordered.
// DisplayHintOrdered is used to mark a list option as ordered.
// That is, the order of items is important and a user interface
// is encouraged to provide the user with re-ordering support
// (like drag'n'drop).
DisplayHintOrdered = "ordered"
// DisplayHintFilePicker is used to mark the option as being a file, which
// should give the option to use a file picker to select a local file from disk.
DisplayHintFilePicker = "file-picker"
)
// Option describes a configuration option.
@ -184,6 +210,9 @@ type Option struct {
// Help is considered immutable after the option has
// been created.
Help string
// Sensitive signifies that the configuration values may contain sensitive
// content, such as authentication keys.
Sensitive bool
// OptType defines the type of the option.
// OptType is considered immutable after the option has
// been created.
@ -218,6 +247,10 @@ type Option struct {
// ValidationRegex is considered immutable after the option has
// been created.
ValidationRegex string
// ValidationFunc may contain a function to validate more complex values.
// The error is returned beyond the scope of this package and may be
// displayed to a user.
ValidationFunc func(value interface{}) error `json:"-"`
// PossibleValues may be set to a slice of values that are allowed
// for this configuration setting. Note that PossibleValues makes most
// sense when ExternalOptType is set to HintOneOf
@ -229,6 +262,9 @@ type Option struct {
// Annotations is considered mutable and setting/reading annotation keys
// must be performed while the option is locked.
Annotations Annotations
// Migrations holds migration functions that are given the raw option value
// before any validation is run. The returned value is then used.
Migrations []MigrationFunc `json:"-"`
activeValue *valueCache // runtime value (loaded from config file or set by user)
activeDefaultValue *valueCache // runtime default value (may be set internally)
@ -257,6 +293,12 @@ func (option *Option) SetAnnotation(key string, value interface{}) {
option.Lock()
defer option.Unlock()
option.setAnnotation(key, value)
}
// setAnnotation sets the value of the annotation key overwritting an
// existing value if required. Does not lock the Option.
func (option *Option) setAnnotation(key string, value interface{}) {
if option.Annotations == nil {
option.Annotations = make(Annotations)
}
@ -275,6 +317,63 @@ func (option *Option) GetAnnotation(key string) (interface{}, bool) {
return val, ok
}
// AnnotationEquals returns whether the annotation of the given key matches the
// given value.
func (option *Option) AnnotationEquals(key string, value any) bool {
option.Lock()
defer option.Unlock()
if option.Annotations == nil {
return false
}
setValue, ok := option.Annotations[key]
if !ok {
return false
}
return reflect.DeepEqual(value, setValue)
}
// copyOrNil returns a copy of the option, or nil if copying failed.
func (option *Option) copyOrNil() *Option {
copied, err := copystructure.Copy(option)
if err != nil {
return nil
}
return copied.(*Option) //nolint:forcetypeassert
}
// IsSetByUser returns whether the option has been set by the user.
func (option *Option) IsSetByUser() bool {
option.Lock()
defer option.Unlock()
return option.activeValue != nil
}
// UserValue returns the value set by the user or nil if the value has not
// been changed from the default.
func (option *Option) UserValue() any {
option.Lock()
defer option.Unlock()
if option.activeValue == nil {
return nil
}
return option.activeValue.getData(option)
}
// ValidateValue checks if the given value is valid for the option.
func (option *Option) ValidateValue(value any) error {
option.Lock()
defer option.Unlock()
value = migrateValue(option, value)
if _, err := validateValue(option, value); err != nil {
return err
}
return nil
}
// Export expors an option to a Record.
func (option *Option) Export() (record.Record, error) {
option.Lock()
@ -303,7 +402,7 @@ func (option *Option) export() (record.Record, error) {
}
}
r, err := record.NewWrapper(fmt.Sprintf("config:%s", option.Key), nil, record.JSON, data)
r, err := record.NewWrapper(fmt.Sprintf("config:%s", option.Key), nil, dsd.JSON, data)
if err != nil {
return nil, err
}

View file

@ -2,25 +2,39 @@ package config
import (
"encoding/json"
"io/ioutil"
"fmt"
"os"
"path"
"strings"
"sync"
"github.com/safing/portbase/log"
)
var (
configFilePath string
loadedConfigValidationErrors []*ValidationError
loadedConfigValidationErrorsLock sync.Mutex
)
func loadConfig() error {
// GetLoadedConfigValidationErrors returns the encountered validation errors
// from the last time loading config from disk.
func GetLoadedConfigValidationErrors() []*ValidationError {
loadedConfigValidationErrorsLock.Lock()
defer loadedConfigValidationErrorsLock.Unlock()
return loadedConfigValidationErrors
}
func loadConfig(requireValidConfig bool) error {
// check if persistence is configured
if configFilePath == "" {
return nil
}
// read config file
data, err := ioutil.ReadFile(configFilePath)
data, err := os.ReadFile(configFilePath)
if err != nil {
return err
}
@ -31,13 +45,23 @@ func loadConfig() error {
return err
}
return replaceConfig(newValues)
validationErrors, _ := ReplaceConfig(newValues)
if requireValidConfig && len(validationErrors) > 0 {
return fmt.Errorf("encountered %d validation errors during config loading", len(validationErrors))
}
// Save validation errors.
loadedConfigValidationErrorsLock.Lock()
defer loadedConfigValidationErrorsLock.Unlock()
loadedConfigValidationErrors = validationErrors
return nil
}
// saveConfig saves the current configuration to file.
// SaveConfig saves the current configuration to file.
// It will acquire a read-lock on the global options registry
// lock and must lock each option!
func saveConfig() error {
func SaveConfig() error {
optionsLock.RLock()
defer optionsLock.RUnlock()
@ -69,7 +93,7 @@ func saveConfig() error {
}
// write file
return ioutil.WriteFile(configFilePath, data, 0600)
return os.WriteFile(configFilePath, data, 0o0600)
}
// JSONToMap parses and flattens a hierarchical json object.

View file

@ -36,6 +36,7 @@ var (
)
func TestJSONMapConversion(t *testing.T) {
t.Parallel()
// convert to json
j, err := MapToJSON(mapData)
@ -67,6 +68,8 @@ func TestJSONMapConversion(t *testing.T) {
}
func TestConfigCleaning(t *testing.T) {
t.Parallel()
// load
configFlat, err := JSONToMap(jsonBytes)
if err != nil {

View file

@ -35,6 +35,8 @@ optionsLoop:
if !ok {
continue
}
// migrate value
configValue = migrateValue(option, configValue)
// validate value
valueCache, err := validateValue(option, configValue)
if err != nil {
@ -55,7 +57,7 @@ optionsLoop:
if firstErr != nil {
if errCnt > 0 {
return perspective, fmt.Errorf("encountered %d errors, first was: %s", errCnt, firstErr)
return perspective, fmt.Errorf("encountered %d errors, first was: %w", errCnt, firstErr)
}
return perspective, firstErr
}

View file

@ -88,13 +88,14 @@ func Register(option *Option) error {
if option.ValidationRegex != "" {
option.compiledRegex, err = regexp.Compile(option.ValidationRegex)
if err != nil {
return fmt.Errorf("config: could not compile option.ValidationRegex: %s", err)
return fmt.Errorf("config: could not compile option.ValidationRegex: %w", err)
}
}
option.activeFallbackValue, err = validateValue(option, option.DefaultValue)
if err != nil {
return fmt.Errorf("config: invalid default value: %s", err)
var vErr *ValidationError
option.activeFallbackValue, vErr = validateValue(option, option.DefaultValue)
if vErr != nil {
return fmt.Errorf("config: invalid default value: %w", vErr)
}
optionsLock.Lock()

View file

@ -4,7 +4,7 @@ import (
"testing"
)
func TestRegistry(t *testing.T) {
func TestRegistry(t *testing.T) { //nolint:paralleltest
// reset
options = make(map[string]*Option)
@ -46,5 +46,4 @@ func TestRegistry(t *testing.T) {
}); err == nil {
t.Error("should fail")
}
}

View file

@ -1,5 +1,3 @@
// Package config ... (linter fix)
//nolint:dupl
package config
import (
@ -12,7 +10,7 @@ import (
// configuration setting.
type ReleaseLevel uint8
// Release Level constants
// Release Level constants.
const (
ReleaseLevelStable ReleaseLevel = 0
ReleaseLevelBeta ReleaseLevel = 1
@ -41,7 +39,7 @@ func registerReleaseLevelOption() {
Key: releaseLevelKey,
Description: `May break things. Decide if you want to experiment with unstable features. "Beta" has been tested roughly by the Safing team while "Experimental" is really raw. When "Beta" or "Experimental" are disabled, their settings use the default again.`,
OptType: OptTypeString,
ExpertiseLevel: ExpertiseLevelExpert,
ExpertiseLevel: ExpertiseLevelDeveloper,
ReleaseLevel: ReleaseLevelStable,
DefaultValue: ReleaseLevelNameStable,
Annotations: Annotations{

View file

@ -2,7 +2,6 @@ package config
import (
"errors"
"fmt"
"sync"
"github.com/tevino/abool"
@ -35,102 +34,126 @@ func signalChanges() {
validityFlag = abool.NewBool(true)
validityFlagLock.Unlock()
module.TriggerEvent(configChangeEvent, nil)
module.TriggerEvent(ChangeEvent, nil)
}
// replaceConfig sets the (prioritized) user defined config.
func replaceConfig(newValues map[string]interface{}) error {
var firstErr error
var errCnt int
// ValidateConfig validates the given configuration and returns all validation
// errors as well as whether the given configuration contains unknown keys.
func ValidateConfig(newValues map[string]interface{}) (validationErrors []*ValidationError, requiresRestart bool, containsUnknown bool) {
// RLock the options because we are not adding or removing
// options from the registration but rather only checking the
// options value which is guarded by the option's lock itself.
optionsLock.RLock()
defer optionsLock.RUnlock()
var checked int
for key, option := range options {
newValue, ok := newValues[key]
if ok {
checked++
func() {
option.Lock()
defer option.Unlock()
newValue = migrateValue(option, newValue)
_, err := validateValue(option, newValue)
if err != nil {
validationErrors = append(validationErrors, err)
}
if option.RequiresRestart {
requiresRestart = true
}
}()
}
}
return validationErrors, requiresRestart, checked < len(newValues)
}
// ReplaceConfig sets the (prioritized) user defined config.
func ReplaceConfig(newValues map[string]interface{}) (validationErrors []*ValidationError, requiresRestart bool) {
// RLock the options because we are not adding or removing
// options from the registration but rather only update the
// options value which is guarded by the option's lock itself
// options value which is guarded by the option's lock itself.
optionsLock.RLock()
defer optionsLock.RUnlock()
for key, option := range options {
newValue, ok := newValues[key]
option.Lock()
option.activeValue = nil
if ok {
valueCache, err := validateValue(option, newValue)
if err == nil {
option.activeValue = valueCache
} else {
errCnt++
if firstErr == nil {
firstErr = err
func() {
option.Lock()
defer option.Unlock()
option.activeValue = nil
if ok {
newValue = migrateValue(option, newValue)
valueCache, err := validateValue(option, newValue)
if err == nil {
option.activeValue = valueCache
} else {
validationErrors = append(validationErrors, err)
}
}
}
handleOptionUpdate(option, true)
handleOptionUpdate(option, true)
option.Unlock()
if option.RequiresRestart {
requiresRestart = true
}
}()
}
signalChanges()
if firstErr != nil {
if errCnt > 0 {
return fmt.Errorf("encountered %d errors, first was: %s", errCnt, firstErr)
}
return firstErr
}
return nil
return validationErrors, requiresRestart
}
// replaceDefaultConfig sets the (fallback) default config.
func replaceDefaultConfig(newValues map[string]interface{}) error {
var firstErr error
var errCnt int
// ReplaceDefaultConfig sets the (fallback) default config.
func ReplaceDefaultConfig(newValues map[string]interface{}) (validationErrors []*ValidationError, requiresRestart bool) {
// RLock the options because we are not adding or removing
// options from the registration but rather only update the
// options value which is guarded by the option's lock itself
// options value which is guarded by the option's lock itself.
optionsLock.RLock()
defer optionsLock.RUnlock()
for key, option := range options {
newValue, ok := newValues[key]
option.Lock()
option.activeDefaultValue = nil
if ok {
valueCache, err := validateValue(option, newValue)
if err == nil {
option.activeDefaultValue = valueCache
} else {
errCnt++
if firstErr == nil {
firstErr = err
func() {
option.Lock()
defer option.Unlock()
option.activeDefaultValue = nil
if ok {
newValue = migrateValue(option, newValue)
valueCache, err := validateValue(option, newValue)
if err == nil {
option.activeDefaultValue = valueCache
} else {
validationErrors = append(validationErrors, err)
}
}
}
handleOptionUpdate(option, true)
option.Unlock()
handleOptionUpdate(option, true)
if option.RequiresRestart {
requiresRestart = true
}
}()
}
signalChanges()
if firstErr != nil {
if errCnt > 0 {
return fmt.Errorf("encountered %d errors, first was: %s", errCnt, firstErr)
}
return firstErr
}
return nil
return validationErrors, requiresRestart
}
// SetConfigOption sets a single value in the (prioritized) user defined config.
func SetConfigOption(key string, value interface{}) error {
func SetConfigOption(key string, value any) error {
return setConfigOption(key, value, true)
}
func setConfigOption(key string, value interface{}, push bool) (err error) {
func setConfigOption(key string, value any, push bool) (err error) {
option, err := GetOption(key)
if err != nil {
return err
@ -140,13 +163,20 @@ func setConfigOption(key string, value interface{}, push bool) (err error) {
if value == nil {
option.activeValue = nil
} else {
var valueCache *valueCache
valueCache, err = validateValue(option, value)
if err == nil {
value = migrateValue(option, value)
valueCache, vErr := validateValue(option, value)
if vErr == nil {
option.activeValue = valueCache
} else {
err = vErr
}
}
// Add the "restart pending" annotation if the settings requires a restart.
if option.RequiresRestart {
option.setAnnotation(RestartPendingAnnotation, true)
}
handleOptionUpdate(option, push)
option.Unlock()
@ -157,7 +187,7 @@ func setConfigOption(key string, value interface{}, push bool) (err error) {
// finalize change, activate triggers
signalChanges()
return saveConfig()
return SaveConfig()
}
// SetDefaultConfigOption sets a single value in the (fallback) default config.
@ -175,13 +205,20 @@ func setDefaultConfigOption(key string, value interface{}, push bool) (err error
if value == nil {
option.activeDefaultValue = nil
} else {
var valueCache *valueCache
valueCache, err = validateValue(option, value)
if err == nil {
value = migrateValue(option, value)
valueCache, vErr := validateValue(option, value)
if vErr == nil {
option.activeDefaultValue = valueCache
} else {
err = vErr
}
}
// Add the "restart pending" annotation if the settings requires a restart.
if option.RequiresRestart {
option.setAnnotation(RestartPendingAnnotation, true)
}
handleOptionUpdate(option, push)
option.Unlock()

View file

@ -1,9 +1,9 @@
//nolint:goconst,errcheck
//nolint:goconst
package config
import "testing"
func TestLayersGetters(t *testing.T) {
func TestLayersGetters(t *testing.T) { //nolint:paralleltest
// reset
options = make(map[string]*Option)
@ -24,9 +24,9 @@ func TestLayersGetters(t *testing.T) {
t.Fatal(err)
}
err = replaceConfig(mapData)
if err != nil {
t.Fatal(err)
validationErrors, _ := ReplaceConfig(mapData)
if len(validationErrors) > 0 {
t.Fatalf("%d errors, first: %s", len(validationErrors), validationErrors[0].Error())
}
// Test missing values
@ -77,14 +77,13 @@ func TestLayersGetters(t *testing.T) {
if notBool() {
t.Error("expected fallback value: false")
}
}
func TestLayersSetters(t *testing.T) {
func TestLayersSetters(t *testing.T) { //nolint:paralleltest
// reset
options = make(map[string]*Option)
Register(&Option{
_ = Register(&Option{
Name: "name",
Key: "monkey",
Description: "description",
@ -94,7 +93,7 @@ func TestLayersSetters(t *testing.T) {
DefaultValue: "banana",
ValidationRegex: "^(banana|water)$",
})
Register(&Option{
_ = Register(&Option{
Name: "name",
Key: "zebras/zebra",
Description: "description",
@ -104,7 +103,7 @@ func TestLayersSetters(t *testing.T) {
DefaultValue: []string{"black", "white"},
ValidationRegex: "^[a-z]+$",
})
Register(&Option{
_ = Register(&Option{
Name: "name",
Key: "elephant",
Description: "description",
@ -114,7 +113,7 @@ func TestLayersSetters(t *testing.T) {
DefaultValue: 2,
ValidationRegex: "",
})
Register(&Option{
_ = Register(&Option{
Name: "name",
Key: "hot",
Description: "description",
@ -191,5 +190,4 @@ func TestLayersSetters(t *testing.T) {
if err := SetDefaultConfigOption("invalid_delete", nil); err == nil {
t.Error("should fail")
}
}

View file

@ -5,6 +5,8 @@ import (
"fmt"
"math"
"reflect"
"github.com/safing/portbase/log"
)
type valueCache struct {
@ -24,6 +26,8 @@ func (vc *valueCache) getData(opt *Option) interface{} {
return vc.stringVal
case OptTypeStringArray:
return vc.stringArrayVal
case optTypeAny:
return nil
default:
return nil
}
@ -59,110 +63,177 @@ func isAllowedPossibleValue(opt *Option, value interface{}) error {
}
}
return fmt.Errorf("value is not allowed")
return errors.New("value is not allowed")
}
// migrateValue runs all value migrations.
func migrateValue(option *Option, value any) any {
for _, migration := range option.Migrations {
newValue := migration(option, value)
if newValue != value {
log.Debugf("config: migrated %s value from %v to %v", option.Key, value, newValue)
}
value = newValue
}
return value
}
// validateValue ensures that value matches the expected type of option.
// It does not create a copy of the value!
func validateValue(option *Option, value interface{}) (*valueCache, error) { //nolint:gocyclo
func validateValue(option *Option, value interface{}) (*valueCache, *ValidationError) { //nolint:gocyclo
if option.OptType != OptTypeStringArray {
if err := isAllowedPossibleValue(option, value); err != nil {
return nil, fmt.Errorf("validation of option %s failed for %v: %w", option.Key, value, err)
return nil, &ValidationError{
Option: option.copyOrNil(),
Err: err,
}
}
}
reflect.TypeOf(value).ConvertibleTo(reflect.TypeOf(""))
var validated *valueCache
switch v := value.(type) {
case string:
if option.OptType != OptTypeString {
return nil, fmt.Errorf("expected type %s for option %s, got type %T", getTypeName(option.OptType), option.Key, v)
return nil, invalid(option, "expected type %s, got type %T", getTypeName(option.OptType), v)
}
if option.compiledRegex != nil {
if !option.compiledRegex.MatchString(v) {
return nil, fmt.Errorf("validation of option %s failed: string \"%s\" did not match validation regex for option", option.Key, v)
return nil, invalid(option, "did not match validation regex")
}
}
return &valueCache{stringVal: v}, nil
validated = &valueCache{stringVal: v}
case []interface{}:
vConverted := make([]string, len(v))
for pos, entry := range v {
s, ok := entry.(string)
if !ok {
return nil, fmt.Errorf("validation of option %s failed: element %+v at index %d is not a string", option.Key, entry, pos)
return nil, invalid(option, "entry #%d is not a string", pos+1)
}
vConverted[pos] = s
}
// continue to next case
return validateValue(option, vConverted)
// Call validation function again with converted value.
var vErr *ValidationError
validated, vErr = validateValue(option, vConverted)
if vErr != nil {
return nil, vErr
}
case []string:
if option.OptType != OptTypeStringArray {
return nil, fmt.Errorf("expected type %s for option %s, got type %T", getTypeName(option.OptType), option.Key, v)
return nil, invalid(option, "expected type %s, got type %T", getTypeName(option.OptType), v)
}
if option.compiledRegex != nil {
for pos, entry := range v {
if !option.compiledRegex.MatchString(entry) {
return nil, fmt.Errorf("validation of option %s failed: string \"%s\" at index %d did not match validation regex", option.Key, entry, pos)
return nil, invalid(option, "entry #%d did not match validation regex", pos+1)
}
if err := isAllowedPossibleValue(option, entry); err != nil {
return nil, fmt.Errorf("validation of option %s failed: string %q at index %d is not allowed", option.Key, entry, pos)
return nil, invalid(option, "entry #%d is not allowed", pos+1)
}
}
}
return &valueCache{stringArrayVal: v}, nil
validated = &valueCache{stringArrayVal: v}
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, float32, float64:
// uint64 is omitted, as it does not fit in a int64
if option.OptType != OptTypeInt {
return nil, fmt.Errorf("expected type %s for option %s, got type %T", getTypeName(option.OptType), option.Key, v)
return nil, invalid(option, "expected type %s, got type %T", getTypeName(option.OptType), v)
}
if option.compiledRegex != nil {
// we need to use %v here so we handle float and int correctly.
if !option.compiledRegex.MatchString(fmt.Sprintf("%v", v)) {
return nil, fmt.Errorf("validation of option %s failed: number \"%d\" did not match validation regex", option.Key, v)
return nil, invalid(option, "did not match validation regex")
}
}
switch v := value.(type) {
case int:
return &valueCache{intVal: int64(v)}, nil
validated = &valueCache{intVal: int64(v)}
case int8:
return &valueCache{intVal: int64(v)}, nil
validated = &valueCache{intVal: int64(v)}
case int16:
return &valueCache{intVal: int64(v)}, nil
validated = &valueCache{intVal: int64(v)}
case int32:
return &valueCache{intVal: int64(v)}, nil
validated = &valueCache{intVal: int64(v)}
case int64:
return &valueCache{intVal: v}, nil
validated = &valueCache{intVal: v}
case uint:
return &valueCache{intVal: int64(v)}, nil
validated = &valueCache{intVal: int64(v)}
case uint8:
return &valueCache{intVal: int64(v)}, nil
validated = &valueCache{intVal: int64(v)}
case uint16:
return &valueCache{intVal: int64(v)}, nil
validated = &valueCache{intVal: int64(v)}
case uint32:
return &valueCache{intVal: int64(v)}, nil
validated = &valueCache{intVal: int64(v)}
case float32:
// convert if float has no decimals
if math.Remainder(float64(v), 1) == 0 {
return &valueCache{intVal: int64(v)}, nil
validated = &valueCache{intVal: int64(v)}
} else {
return nil, invalid(option, "failed to convert float32 to int64")
}
return nil, fmt.Errorf("failed to convert float32 to int64 for option %s, got value %+v", option.Key, v)
case float64:
// convert if float has no decimals
if math.Remainder(v, 1) == 0 {
return &valueCache{intVal: int64(v)}, nil
validated = &valueCache{intVal: int64(v)}
} else {
return nil, invalid(option, "failed to convert float64 to int64")
}
return nil, fmt.Errorf("failed to convert float64 to int64 for option %s, got value %+v", option.Key, v)
default:
return nil, errors.New("internal error")
return nil, invalid(option, "internal error")
}
case bool:
if option.OptType != OptTypeBool {
return nil, fmt.Errorf("expected type %s for option %s, got type %T", getTypeName(option.OptType), option.Key, v)
return nil, invalid(option, "expected type %s, got type %T", getTypeName(option.OptType), v)
}
return &valueCache{boolVal: v}, nil
validated = &valueCache{boolVal: v}
default:
return nil, fmt.Errorf("invalid option value type for option %s: %T", option.Key, value)
return nil, invalid(option, "invalid option value type: %T", value)
}
// Check if there is an additional function to validate the value.
if option.ValidationFunc != nil {
var err error
switch option.OptType {
case optTypeAny:
err = errors.New("internal error")
case OptTypeString:
err = option.ValidationFunc(validated.stringVal)
case OptTypeStringArray:
err = option.ValidationFunc(validated.stringArrayVal)
case OptTypeInt:
err = option.ValidationFunc(validated.intVal)
case OptTypeBool:
err = option.ValidationFunc(validated.boolVal)
}
if err != nil {
return nil, &ValidationError{
Option: option.copyOrNil(),
Err: err,
}
}
}
return validated, nil
}
// ValidationError error holds details about a config option value validation error.
type ValidationError struct {
Option *Option
Err error
}
// Error returns the formatted error.
func (ve *ValidationError) Error() string {
return fmt.Sprintf("validation of %s failed: %s", ve.Option.Key, ve.Err)
}
// Unwrap returns the wrapped error.
func (ve *ValidationError) Unwrap() error {
return ve.Err
}
func invalid(option *Option, format string, a ...interface{}) *ValidationError {
return &ValidationError{
Option: option.copyOrNil(),
Err: fmt.Errorf(format, a...),
}
}

View file

@ -10,9 +10,11 @@ type ValidityFlag struct {
}
// NewValidityFlag returns a flag that signifies if the configuration has been changed.
// It always starts out as invalid. Refresh to start with the current value.
func NewValidityFlag() *ValidityFlag {
vf := &ValidityFlag{}
vf.Refresh()
vf := &ValidityFlag{
flag: abool.New(),
}
return vf
}

View file

@ -7,7 +7,7 @@ import (
"github.com/safing/portbase/formats/varint"
)
// Container is []byte sclie on steroids, allowing for quick data appending, prepending and fetching as well as transparent error transportation. (Error transportation requires use of varints for data)
// Container is []byte sclie on steroids, allowing for quick data appending, prepending and fetching.
type Container struct {
compartments [][]byte
offset int
@ -127,7 +127,7 @@ func (c *Container) CompileData() []byte {
// Get returns the given amount of bytes. Data MAY be copied and IS consumed.
func (c *Container) Get(n int) ([]byte, error) {
buf := c.gather(n)
buf := c.Peek(n)
if len(buf) < n {
return nil, errors.New("container: not enough data to return")
}
@ -138,24 +138,24 @@ func (c *Container) Get(n int) ([]byte, error) {
// GetAll returns all data. Data MAY be copied and IS consumed.
func (c *Container) GetAll() []byte {
// TODO: Improve.
buf := c.gather(c.Length())
buf := c.Peek(c.Length())
c.skip(len(buf))
return buf
}
// GetAsContainer returns the given amount of bytes in a new container. Data will NOT be copied and IS consumed.
func (c *Container) GetAsContainer(n int) (*Container, error) {
new := c.gatherAsContainer(n)
if new == nil {
newC := c.PeekContainer(n)
if newC == nil {
return nil, errors.New("container: not enough data to return")
}
c.skip(n)
return new, nil
return newC, nil
}
// GetMax returns as much as possible, but the given amount of bytes at maximum. Data MAY be copied and IS consumed.
func (c *Container) GetMax(n int) []byte {
buf := c.gather(n)
buf := c.Peek(n)
c.skip(len(buf))
return buf
}
@ -211,17 +211,13 @@ func (c *Container) renewCompartments() {
}
func (c *Container) carbonCopy() *Container {
new := &Container{
newC := &Container{
compartments: make([][]byte, len(c.compartments)),
offset: c.offset,
err: c.err,
}
for i := 0; i < len(c.compartments); i++ {
new.compartments[i] = c.compartments[i]
}
// TODO: investigate why copy fails to correctly duplicate [][]byte
// copy(new.compartments, c.compartments)
return new
copy(newC.compartments, c.compartments)
return newC
}
func (c *Container) checkOffset() {
@ -230,42 +226,6 @@ func (c *Container) checkOffset() {
}
}
// Error Handling
/*
DEPRECATING... like.... NOW.
// SetError sets an error.
func (c *Container) SetError(err error) {
c.err = err
c.Replace(append([]byte{0x00}, []byte(err.Error())...))
}
// CheckError checks if there is an error in the data. If so, it will parse the error and delete the data.
func (c *Container) CheckError() {
if len(c.compartments[c.offset]) > 0 && c.compartments[c.offset][0] == 0x00 {
c.compartments[c.offset] = c.compartments[c.offset][1:]
c.err = errors.New(string(c.CompileData()))
c.compartments = nil
}
}
// HasError returns wether or not the container is holding an error.
func (c *Container) HasError() bool {
return c.err != nil
}
// Error returns the error.
func (c *Container) Error() error {
return c.err
}
// ErrString returns the error as a string.
func (c *Container) ErrString() string {
return c.err.Error()
}
*/
// Block Handling
// PrependLength prepends the current full length of all bytes in the container.
@ -273,7 +233,8 @@ func (c *Container) PrependLength() {
c.Prepend(varint.Pack64(uint64(c.Length())))
}
func (c *Container) gather(n int) []byte {
// Peek returns the given amount of bytes. Data MAY be copied and IS NOT consumed.
func (c *Container) Peek(n int) []byte {
// Check requested length.
if n <= 0 {
return nil
@ -300,7 +261,8 @@ func (c *Container) gather(n int) []byte {
return slice[:n]
}
func (c *Container) gatherAsContainer(n int) (new *Container) {
// PeekContainer returns the given amount of bytes in a new container. Data will NOT be copied and IS NOT consumed.
func (c *Container) PeekContainer(n int) (newC *Container) {
// Check requested length.
if n < 0 {
return nil
@ -308,20 +270,20 @@ func (c *Container) gatherAsContainer(n int) (new *Container) {
return &Container{}
}
new = &Container{}
newC = &Container{}
for i := c.offset; i < len(c.compartments); i++ {
if n >= len(c.compartments[i]) {
new.compartments = append(new.compartments, c.compartments[i])
newC.compartments = append(newC.compartments, c.compartments[i])
n -= len(c.compartments[i])
} else {
new.compartments = append(new.compartments, c.compartments[i][:n])
newC.compartments = append(newC.compartments, c.compartments[i][:n])
n = 0
}
}
if n > 0 {
return nil
}
return new
return newC
}
func (c *Container) skip(n int) {
@ -363,7 +325,7 @@ func (c *Container) GetNextBlockAsContainer() (*Container, error) {
// GetNextN8 parses and returns a varint of type uint8.
func (c *Container) GetNextN8() (uint8, error) {
buf := c.gather(2)
buf := c.Peek(2)
num, n, err := varint.Unpack8(buf)
if err != nil {
return 0, err
@ -374,7 +336,7 @@ func (c *Container) GetNextN8() (uint8, error) {
// GetNextN16 parses and returns a varint of type uint16.
func (c *Container) GetNextN16() (uint16, error) {
buf := c.gather(3)
buf := c.Peek(3)
num, n, err := varint.Unpack16(buf)
if err != nil {
return 0, err
@ -385,7 +347,7 @@ func (c *Container) GetNextN16() (uint16, error) {
// GetNextN32 parses and returns a varint of type uint32.
func (c *Container) GetNextN32() (uint32, error) {
buf := c.gather(5)
buf := c.Peek(5)
num, n, err := varint.Unpack32(buf)
if err != nil {
return 0, err
@ -396,7 +358,7 @@ func (c *Container) GetNextN32() (uint32, error) {
// GetNextN64 parses and returns a varint of type uint64.
func (c *Container) GetNextN64() (uint64, error) {
buf := c.gather(10)
buf := c.Peek(10)
num, n, err := varint.Unpack64(buf)
if err != nil {
return 0, err

View file

@ -23,6 +23,7 @@ var (
)
func TestContainerDataHandling(t *testing.T) {
t.Parallel()
c1 := New(utils.DuplicateBytes(testData))
c1c := c1.carbonCopy()
@ -65,15 +66,17 @@ func TestContainerDataHandling(t *testing.T) {
}
c8.clean()
c9 := c8.gatherAsContainer(len(testData))
c9 := c8.PeekContainer(len(testData))
c10 := c9.gatherAsContainer(len(testData) - 1)
c10 := c9.PeekContainer(len(testData) - 1)
c10.Append(testData[len(testData)-1:])
compareMany(t, testData, c1.CompileData(), c2.CompileData(), c3.CompileData(), d4, d5, c6.CompileData(), c7.CompileData(), c8.CompileData(), c9.CompileData(), c10.CompileData())
}
func compareMany(t *testing.T, reference []byte, other ...[]byte) {
t.Helper()
for i, cmp := range other {
if !bytes.Equal(reference, cmp) {
t.Errorf("sample %d does not match reference: sample is '%s'", i+1, string(cmp))
@ -82,6 +85,8 @@ func compareMany(t *testing.T, reference []byte, other ...[]byte) {
}
func TestDataFetching(t *testing.T) {
t.Parallel()
c1 := New(utils.DuplicateBytes(testData))
data := c1.GetMax(1)
if string(data[0]) != "T" {
@ -100,6 +105,8 @@ func TestDataFetching(t *testing.T) {
}
func TestBlocks(t *testing.T) {
t.Parallel()
c1 := New(utils.DuplicateBytes(testData))
c1.PrependLength()
@ -137,10 +144,10 @@ func TestBlocks(t *testing.T) {
if n4 != 43 {
t.Errorf("n should be 43, was %d", n4)
}
}
func TestContainerBlockHandling(t *testing.T) {
t.Parallel()
c1 := New(utils.DuplicateBytes(testData))
c1.PrependLength()
@ -185,6 +192,8 @@ func TestContainerBlockHandling(t *testing.T) {
}
func TestContainerMisc(t *testing.T) {
t.Parallel()
c1 := New()
d1 := c1.CompileData()
if len(d1) > 0 {
@ -193,5 +202,7 @@ func TestContainerMisc(t *testing.T) {
}
func TestDeprecated(t *testing.T) {
t.Parallel()
NewContainer(utils.DuplicateBytes(testData))
}

View file

@ -5,23 +5,22 @@
// Byte slices added to the Container are not changed or appended, to not corrupt any other data that may be before and after the given slice.
// If interested, consider the following example to understand why this is important:
//
// package main
// package main
//
// import (
// "fmt"
// )
// import (
// "fmt"
// )
//
// func main() {
// a := []byte{0, 1,2,3,4,5,6,7,8,9}
// fmt.Printf("a: %+v\n", a)
// fmt.Printf("\nmaking changes...\n(we are not changing a directly)\n\n")
// b := a[2:6]
// c := append(b, 10, 11)
// fmt.Printf("b: %+v\n", b)
// fmt.Printf("c: %+v\n", c)
// fmt.Printf("a: %+v\n", a)
// }
// func main() {
// a := []byte{0, 1,2,3,4,5,6,7,8,9}
// fmt.Printf("a: %+v\n", a)
// fmt.Printf("\nmaking changes...\n(we are not changing a directly)\n\n")
// b := a[2:6]
// c := append(b, 10, 11)
// fmt.Printf("b: %+v\n", b)
// fmt.Printf("c: %+v\n", c)
// fmt.Printf("a: %+v\n", a)
// }
//
// run it here: https://play.golang.org/p/xu1BXT3QYeE
//
package container

View file

@ -27,11 +27,11 @@ func (ja *JSONBytesAccessor) Set(key string, value interface{}) error {
}
}
new, err := sjson.SetBytes(*ja.json, key, value)
newJSON, err := sjson.SetBytes(*ja.json, key, value)
if err != nil {
return err
}
*ja.json = new
*ja.json = newJSON
return nil
}
@ -60,15 +60,15 @@ func (ja *JSONBytesAccessor) GetStringArray(key string) (value []string, ok bool
return nil, false
}
slice := result.Array()
new := make([]string, len(slice))
sliceCopy := make([]string, len(slice))
for i, res := range slice {
if res.Type == gjson.String {
new[i] = res.String()
sliceCopy[i] = res.String()
} else {
return nil, false
}
}
return new, true
return sliceCopy, true
}
// GetInt returns the int found by the given json key and whether it could be successfully extracted.

View file

@ -29,11 +29,11 @@ func (ja *JSONAccessor) Set(key string, value interface{}) error {
}
}
new, err := sjson.Set(*ja.json, key, value)
newJSON, err := sjson.Set(*ja.json, key, value)
if err != nil {
return err
}
*ja.json = new
*ja.json = newJSON
return nil
}
@ -84,15 +84,15 @@ func (ja *JSONAccessor) GetStringArray(key string) (value []string, ok bool) {
return nil, false
}
slice := result.Array()
new := make([]string, len(slice))
sliceCopy := make([]string, len(slice))
for i, res := range slice {
if res.Type == gjson.String {
new[i] = res.String()
sliceCopy[i] = res.String()
} else {
return nil, false
}
}
return new, true
return sliceCopy, true
}
// GetInt returns the int found by the given json key and whether it could be successfully extracted.

View file

@ -37,12 +37,12 @@ func (sa *StructAccessor) Set(key string, value interface{}) error {
}
// handle special cases
switch field.Kind() {
switch field.Kind() { // nolint:exhaustive
// ints
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
var newInt int64
switch newVal.Kind() {
switch newVal.Kind() { // nolint:exhaustive
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
newInt = newVal.Int()
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
@ -58,7 +58,7 @@ func (sa *StructAccessor) Set(key string, value interface{}) error {
// uints
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
var newUint uint64
switch newVal.Kind() {
switch newVal.Kind() { // nolint:exhaustive
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
newUint = uint64(newVal.Int())
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
@ -73,7 +73,7 @@ func (sa *StructAccessor) Set(key string, value interface{}) error {
// floats
case reflect.Float32, reflect.Float64:
switch newVal.Kind() {
switch newVal.Kind() { // nolint:exhaustive
case reflect.Float32, reflect.Float64:
field.SetFloat(newVal.Float())
default:
@ -124,7 +124,7 @@ func (sa *StructAccessor) GetInt(key string) (value int64, ok bool) {
if !field.IsValid() {
return 0, false
}
switch field.Kind() {
switch field.Kind() { // nolint:exhaustive
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return field.Int(), true
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
@ -140,7 +140,7 @@ func (sa *StructAccessor) GetFloat(key string) (value float64, ok bool) {
if !field.IsValid() {
return 0, false
}
switch field.Kind() {
switch field.Kind() { // nolint:exhaustive
case reflect.Float32, reflect.Float64:
return field.Float(), true
default:

View file

@ -13,8 +13,6 @@ type Accessor interface {
GetFloat(key string) (value float64, ok bool)
GetBool(key string) (value bool, ok bool)
Exists(key string) bool
Set(key string, value interface{}) error
Type() string
}

View file

@ -44,11 +44,13 @@ var (
F64: 42.42,
B: true,
}
testJSONBytes, _ = json.Marshal(testStruct)
testJSONBytes, _ = json.Marshal(testStruct) //nolint:errchkjson
testJSON = string(testJSONBytes)
)
func testGetString(t *testing.T, acc Accessor, key string, shouldSucceed bool, expectedValue string) {
t.Helper()
v, ok := acc.GetString(key)
switch {
case !ok && shouldSucceed:
@ -62,6 +64,8 @@ func testGetString(t *testing.T, acc Accessor, key string, shouldSucceed bool, e
}
func testGetStringArray(t *testing.T, acc Accessor, key string, shouldSucceed bool, expectedValue []string) {
t.Helper()
v, ok := acc.GetStringArray(key)
switch {
case !ok && shouldSucceed:
@ -75,6 +79,8 @@ func testGetStringArray(t *testing.T, acc Accessor, key string, shouldSucceed bo
}
func testGetInt(t *testing.T, acc Accessor, key string, shouldSucceed bool, expectedValue int64) {
t.Helper()
v, ok := acc.GetInt(key)
switch {
case !ok && shouldSucceed:
@ -88,6 +94,8 @@ func testGetInt(t *testing.T, acc Accessor, key string, shouldSucceed bool, expe
}
func testGetFloat(t *testing.T, acc Accessor, key string, shouldSucceed bool, expectedValue float64) {
t.Helper()
v, ok := acc.GetFloat(key)
switch {
case !ok && shouldSucceed:
@ -101,6 +109,8 @@ func testGetFloat(t *testing.T, acc Accessor, key string, shouldSucceed bool, ex
}
func testGetBool(t *testing.T, acc Accessor, key string, shouldSucceed bool, expectedValue bool) {
t.Helper()
v, ok := acc.GetBool(key)
switch {
case !ok && shouldSucceed:
@ -114,6 +124,8 @@ func testGetBool(t *testing.T, acc Accessor, key string, shouldSucceed bool, exp
}
func testExists(t *testing.T, acc Accessor, key string, shouldSucceed bool) {
t.Helper()
ok := acc.Exists(key)
switch {
case !ok && shouldSucceed:
@ -124,6 +136,8 @@ func testExists(t *testing.T, acc Accessor, key string, shouldSucceed bool) {
}
func testSet(t *testing.T, acc Accessor, key string, shouldSucceed bool, valueToSet interface{}) {
t.Helper()
err := acc.Set(key, valueToSet)
switch {
case err != nil && shouldSucceed:
@ -134,8 +148,9 @@ func testSet(t *testing.T, acc Accessor, key string, shouldSucceed bool, valueTo
}
func TestAccessor(t *testing.T) {
t.Parallel()
// Test interface compliance
// Test interface compliance.
accs := []Accessor{
NewJSONAccessor(&testJSON),
NewJSONBytesAccessor(&testJSONBytes),
@ -273,5 +288,4 @@ func TestAccessor(t *testing.T) {
for _, acc := range accs {
testExists(t, acc, "X", false)
}
}

View file

@ -15,12 +15,10 @@ type Example struct {
Score int
}
var (
exampleDB = NewInterface(&Options{
Internal: true,
Local: true,
})
)
var exampleDB = NewInterface(&Options{
Internal: true,
Local: true,
})
// GetExample gets an Example from the database.
func GetExample(key string) (*Example, error) {
@ -32,20 +30,20 @@ func GetExample(key string) (*Example, error) {
// unwrap
if r.IsWrapped() {
// only allocate a new struct, if we need it
new := &Example{}
err = record.Unwrap(r, new)
newExample := &Example{}
err = record.Unwrap(r, newExample)
if err != nil {
return nil, err
}
return new, nil
return newExample, nil
}
// or adjust type
new, ok := r.(*Example)
newExample, ok := r.(*Example)
if !ok {
return nil, fmt.Errorf("record not of type *Example, but %T", r)
}
return new, nil
return newExample, nil
}
func (e *Example) Save() error {
@ -58,10 +56,10 @@ func (e *Example) SaveAs(key string) error {
}
func NewExample(key, name string, score int) *Example {
new := &Example{
newExample := &Example{
Name: name,
Score: score,
}
new.SetKey(key)
return new
newExample.SetKey(key)
return newExample
}

View file

@ -14,6 +14,7 @@ import (
// A Controller takes care of all the extra database logic.
type Controller struct {
database *Database
storage storage.Interface
shadowDelete bool
@ -25,8 +26,9 @@ type Controller struct {
}
// newController creates a new controller for a storage.
func newController(storageInt storage.Interface, shadowDelete bool) *Controller {
func newController(database *Database, storageInt storage.Interface, shadowDelete bool) *Controller {
return &Controller{
database: database,
storage: storageInt,
shadowDelete: shadowDelete,
}
@ -76,7 +78,7 @@ func (c *Controller) Get(key string) (record.Record, error) {
return r, nil
}
// Get returns the metadata of the record with the given key.
// GetMeta returns the metadata of the record with the given key.
func (c *Controller) GetMeta(key string) (*record.Meta, error) {
if shuttingDown.IsSet() {
return nil, ErrShuttingDown

View file

@ -8,6 +8,9 @@ import (
"github.com/safing/portbase/database/storage"
)
// StorageTypeInjected is the type of injected databases.
const StorageTypeInjected = "injected"
var (
controllers = make(map[string]*Controller)
controllersLock sync.RWMutex
@ -36,22 +39,27 @@ func getController(name string) (*Controller, error) {
// get db registration
registeredDB, err := getDatabase(name)
if err != nil {
return nil, fmt.Errorf(`could not start database %s: %s`, name, err)
return nil, fmt.Errorf("could not start database %s: %w", name, err)
}
// Check if database is injected.
if registeredDB.StorageType == StorageTypeInjected {
return nil, fmt.Errorf("database storage is not injected")
}
// get location
dbLocation, err := getLocation(name, registeredDB.StorageType)
if err != nil {
return nil, fmt.Errorf(`could not start database %s (type %s): %s`, name, registeredDB.StorageType, err)
return nil, fmt.Errorf("could not start database %s (type %s): %w", name, registeredDB.StorageType, err)
}
// start database
storageInt, err := storage.StartDatabase(name, registeredDB.StorageType, dbLocation)
if err != nil {
return nil, fmt.Errorf(`could not start database %s (type %s): %s`, name, registeredDB.StorageType, err)
return nil, fmt.Errorf("could not start database %s (type %s): %w", name, registeredDB.StorageType, err)
}
controller = newController(storageInt, registeredDB.ShadowDelete)
controller = newController(registeredDB, storageInt, registeredDB.ShadowDelete)
controllers[name] = controller
return controller, nil
}
@ -76,13 +84,23 @@ func InjectDatabase(name string, storageInt storage.Interface) (*Controller, err
// check if database is registered
registeredDB, ok := registry[name]
if !ok {
return nil, fmt.Errorf(`database "%s" not registered`, name)
return nil, fmt.Errorf("database %q not registered", name)
}
if registeredDB.StorageType != "injected" {
return nil, fmt.Errorf(`database not of type "injected"`)
if registeredDB.StorageType != StorageTypeInjected {
return nil, fmt.Errorf("database not of type %q", StorageTypeInjected)
}
controller := newController(storageInt, false)
controller := newController(registeredDB, storageInt, false)
controllers[name] = controller
return controller, nil
}
// Withdraw withdraws an injected database, but leaves the database registered.
func (c *Controller) Withdraw() {
if c != nil && c.Injected() {
controllersLock.Lock()
defer controllersLock.Unlock()
delete(controllers, c.database.Name)
}
}

View file

@ -4,7 +4,7 @@ import (
"time"
)
// Database holds information about registered databases
// Database holds information about a registered database.
type Database struct {
Name string
Description string

View file

@ -2,8 +2,8 @@ package database
import (
"context"
"errors"
"fmt"
"io/ioutil"
"log"
"os"
"reflect"
@ -11,11 +11,9 @@ import (
"testing"
"time"
"github.com/safing/portbase/database/storage"
q "github.com/safing/portbase/database/query"
"github.com/safing/portbase/database/record"
"github.com/safing/portbase/database/storage"
_ "github.com/safing/portbase/database/storage/badger"
_ "github.com/safing/portbase/database/storage/bbolt"
_ "github.com/safing/portbase/database/storage/fstree"
@ -23,7 +21,7 @@ import (
)
func TestMain(m *testing.M) {
testDir, err := ioutil.TempDir("", "portbase-database-testing-")
testDir, err := os.MkdirTemp("", "portbase-database-testing-")
if err != nil {
panic(err)
}
@ -37,7 +35,7 @@ func TestMain(m *testing.M) {
// Clean up the test directory.
// Do not defer, as we end this function with a os.Exit call.
os.RemoveAll(testDir)
_ = os.RemoveAll(testDir)
os.Exit(exitCode)
}
@ -46,7 +44,7 @@ func makeKey(dbName, key string) string {
return fmt.Sprintf("%s:%s", dbName, key)
}
func testDatabase(t *testing.T, storageType string, shadowDelete bool) { //nolint:gocognit,gocyclo
func testDatabase(t *testing.T, storageType string, shadowDelete bool) { //nolint:maintidx,thelper
t.Run(fmt.Sprintf("TestStorage_%s_%v", storageType, shadowDelete), func(t *testing.T) {
dbName := fmt.Sprintf("testing-%s-%v", storageType, shadowDelete)
fmt.Println(dbName)
@ -180,7 +178,7 @@ func testDatabase(t *testing.T, storageType string, shadowDelete bool) { //nolin
// check status individually
_, err = dbController.storage.Get("A")
if err != storage.ErrNotFound {
if !errors.Is(err, storage.ErrNotFound) {
t.Errorf("A should be deleted and purged, err=%s", err)
}
B1, err := dbController.storage.Get("B")
@ -208,13 +206,13 @@ func testDatabase(t *testing.T, storageType string, shadowDelete bool) { //nolin
B2, err := dbController.storage.Get("B")
if err == nil {
t.Errorf("B should be deleted and purged, meta: %+v", B2.Meta())
} else if err != storage.ErrNotFound {
} else if !errors.Is(err, storage.ErrNotFound) {
t.Errorf("B should be deleted and purged, err=%s", err)
}
C2, err := dbController.storage.Get("C")
if err == nil {
t.Errorf("C should be deleted and purged, meta: %+v", C2.Meta())
} else if err != storage.ErrNotFound {
} else if !errors.Is(err, storage.ErrNotFound) {
t.Errorf("C should be deleted and purged, err=%s", err)
}
@ -233,11 +231,11 @@ func testDatabase(t *testing.T, storageType string, shadowDelete bool) { //nolin
if err != nil {
t.Fatal(err)
}
})
}
func TestDatabaseSystem(t *testing.T) {
func TestDatabaseSystem(t *testing.T) { //nolint:tparallel
t.Parallel()
// panic after 10 seconds, to check for locks
finished := make(chan struct{})
@ -282,6 +280,8 @@ func TestDatabaseSystem(t *testing.T) {
}
func countRecords(t *testing.T, db *Interface, query *q.Query) int {
t.Helper()
_, err := query.Check()
if err != nil {
t.Fatal(err)

View file

@ -1,63 +1,62 @@
/*
Package database provides a universal interface for interacting with the database.
A Lazy Database
# A Lazy Database
The database system can handle Go structs as well as serialized data by the dsd package.
While data is in transit within the system, it does not know which form it currently has. Only when it reaches its destination, it must ensure that it is either of a certain type or dump it.
Record Interface
# Record Interface
The database system uses the Record interface to transparently handle all types of structs that get saved in the database. Structs include the Base struct to fulfill most parts of the Record interface.
Boilerplate Code:
type Example struct {
record.Base
sync.Mutex
type Example struct {
record.Base
sync.Mutex
Name string
Score int
}
Name string
Score int
}
var (
db = database.NewInterface(nil)
)
var (
db = database.NewInterface(nil)
)
// GetExample gets an Example from the database.
func GetExample(key string) (*Example, error) {
r, err := db.Get(key)
if err != nil {
return nil, err
}
// GetExample gets an Example from the database.
func GetExample(key string) (*Example, error) {
r, err := db.Get(key)
if err != nil {
return nil, err
}
// unwrap
if r.IsWrapped() {
// only allocate a new struct, if we need it
new := &Example{}
err = record.Unwrap(r, new)
if err != nil {
return nil, err
}
return new, nil
}
// unwrap
if r.IsWrapped() {
// only allocate a new struct, if we need it
new := &Example{}
err = record.Unwrap(r, new)
if err != nil {
return nil, err
}
return new, nil
}
// or adjust type
new, ok := r.(*Example)
if !ok {
return nil, fmt.Errorf("record not of type *Example, but %T", r)
}
return new, nil
}
// or adjust type
new, ok := r.(*Example)
if !ok {
return nil, fmt.Errorf("record not of type *Example, but %T", r)
}
return new, nil
}
func (e *Example) Save() error {
return db.Put(e)
}
func (e *Example) SaveAs(key string) error {
e.SetKey(key)
return db.PutNew(e)
}
func (e *Example) Save() error {
return db.Put(e)
}
func (e *Example) SaveAs(key string) error {
e.SetKey(key)
return db.PutNew(e)
}
*/
package database

View file

@ -4,7 +4,7 @@ import (
"errors"
)
// Errors
// Errors.
var (
ErrNotFound = errors.New("database entry not found")
ErrPermissionDenied = errors.New("access to database record denied")

View file

@ -5,8 +5,7 @@ import (
)
// HookBase implements the Hook interface and provides dummy functions to reduce boilerplate.
type HookBase struct {
}
type HookBase struct{}
// UsesPreGet implements the Hook interface and returns false.
func (b *HookBase) UsesPreGet() bool {

View file

@ -120,19 +120,19 @@ func NewInterface(opts *Options) *Interface {
opts = &Options{}
}
new := &Interface{
newIface := &Interface{
options: opts,
}
if opts.CacheSize > 0 {
cacheBuilder := gcache.New(opts.CacheSize).ARC()
if opts.DelayCachedWrites != "" {
cacheBuilder.EvictedFunc(new.cacheEvictHandler)
new.writeCache = make(map[string]record.Record, opts.CacheSize/2)
new.triggerCacheWrite = make(chan struct{})
cacheBuilder.EvictedFunc(newIface.cacheEvictHandler)
newIface.writeCache = make(map[string]record.Record, opts.CacheSize/2)
newIface.triggerCacheWrite = make(chan struct{})
}
new.cache = cacheBuilder.Build()
newIface.cache = cacheBuilder.Build()
}
return new
return newIface
}
// Exists return whether a record with the given key exists.
@ -157,7 +157,7 @@ func (i *Interface) Get(key string) (record.Record, error) {
return r, err
}
func (i *Interface) getRecord(dbName string, dbKey string, mustBeWriteable bool) (r record.Record, db *Controller, err error) {
func (i *Interface) getRecord(dbName string, dbKey string, mustBeWriteable bool) (r record.Record, db *Controller, err error) { //nolint:unparam
if dbName == "" {
dbName, dbKey = record.ParseKey(dbKey)
}
@ -201,7 +201,7 @@ func (i *Interface) getRecord(dbName string, dbKey string, mustBeWriteable bool)
return r, db, nil
}
func (i *Interface) getMeta(dbName string, dbKey string, mustBeWriteable bool) (m *record.Meta, db *Controller, err error) {
func (i *Interface) getMeta(dbName string, dbKey string, mustBeWriteable bool) (m *record.Meta, db *Controller, err error) { //nolint:unparam
if dbName == "" {
dbName, dbKey = record.ParseKey(dbKey)
}
@ -258,7 +258,7 @@ func (i *Interface) InsertValue(key string, attribute string, value interface{})
err = acc.Set(attribute, value)
if err != nil {
return fmt.Errorf("failed to set value with %s: %s", acc.Type(), err)
return fmt.Errorf("failed to set value with %s: %w", acc.Type(), err)
}
i.options.Apply(r)
@ -271,7 +271,7 @@ func (i *Interface) Put(r record.Record) (err error) {
var db *Controller
if !i.options.HasAllPermissions() {
_, db, err = i.getMeta(r.DatabaseName(), r.DatabaseKey(), true)
if err != nil && err != ErrNotFound {
if err != nil && !errors.Is(err, ErrNotFound) {
return err
}
} else {
@ -309,7 +309,7 @@ func (i *Interface) PutNew(r record.Record) (err error) {
var db *Controller
if !i.options.HasAllPermissions() {
_, db, err = i.getMeta(r.DatabaseName(), r.DatabaseKey(), true)
if err != nil && err != ErrNotFound {
if err != nil && !errors.Is(err, ErrNotFound) {
return err
}
} else {
@ -344,11 +344,13 @@ func (i *Interface) PutNew(r record.Record) (err error) {
return db.Put(r)
}
// PutMany stores many records in the database. Warning: This is nearly a direct database access and omits many things:
// PutMany stores many records in the database.
// Warning: This is nearly a direct database access and omits many things:
// - Record locking
// - Hooks
// - Subscriptions
// - Caching
// Use with care.
func (i *Interface) PutMany(dbName string) (put func(record.Record) error) {
interfaceBatch := make(chan record.Record, 100)
@ -519,6 +521,8 @@ func (i *Interface) Delete(key string) error {
}
// Query executes the given query on the database.
// Will not see data that is in the write cache, waiting to be written.
// Use with care with caching.
func (i *Interface) Query(q *query.Query) (*iterator.Iterator, error) {
_, err := q.Check()
if err != nil {
@ -530,7 +534,7 @@ func (i *Interface) Query(q *query.Query) (*iterator.Iterator, error) {
return nil, err
}
// FIXME:
// TODO: Finish caching system integration.
// Flush the cache before we query the database.
// i.FlushCache()

View file

@ -45,7 +45,7 @@ func (i *Interface) DelayedCacheWriter(ctx context.Context) error {
i.flushWriteCache(0)
case <-thresholdWriteTicker.C:
// Often check if the the write cache has filled up to a certain degree and
// Often check if the write cache has filled up to a certain degree and
// flush it to storage before we start evicting to-be-written entries and
// slow down the hot path again.
i.flushWriteCache(percentThreshold)
@ -57,7 +57,6 @@ func (i *Interface) DelayedCacheWriter(ctx context.Context) error {
// of a total crash.
i.flushWriteCache(0)
}
}
}

View file

@ -8,7 +8,7 @@ import (
"testing"
)
func benchmarkCacheWriting(b *testing.B, storageType string, cacheSize int, sampleSize int, delayWrites bool) { //nolint:gocognit,gocyclo
func benchmarkCacheWriting(b *testing.B, storageType string, cacheSize int, sampleSize int, delayWrites bool) { //nolint:gocognit,gocyclo,thelper
b.Run(fmt.Sprintf("CacheWriting_%s_%d_%d_%v", storageType, cacheSize, sampleSize, delayWrites), func(b *testing.B) {
// Setup Benchmark.
@ -66,11 +66,10 @@ func benchmarkCacheWriting(b *testing.B, storageType string, cacheSize int, samp
// End cache writer and wait
cancelCtx()
wg.Wait()
})
}
func benchmarkCacheReadWrite(b *testing.B, storageType string, cacheSize int, sampleSize int, delayWrites bool) { //nolint:gocognit,gocyclo
func benchmarkCacheReadWrite(b *testing.B, storageType string, cacheSize int, sampleSize int, delayWrites bool) { //nolint:gocognit,gocyclo,thelper
b.Run(fmt.Sprintf("CacheReadWrite_%s_%d_%d_%v", storageType, cacheSize, sampleSize, delayWrites), func(b *testing.B) {
// Setup Benchmark.
@ -135,7 +134,6 @@ func benchmarkCacheReadWrite(b *testing.B, storageType string, cacheSize int, sa
// End cache writer and wait
cancelCtx()
wg.Wait()
})
}

View file

@ -5,8 +5,9 @@ import (
"fmt"
"path/filepath"
"github.com/safing/portbase/utils"
"github.com/tevino/abool"
"github.com/safing/portbase/utils"
)
const (
@ -25,7 +26,7 @@ var (
// InitializeWithPath initializes the database at the specified location using a path.
func InitializeWithPath(dirPath string) error {
return Initialize(utils.NewDirStructure(dirPath, 0755))
return Initialize(utils.NewDirStructure(dirPath, 0o0755))
}
// Initialize initializes the database at the specified location using a dir structure.
@ -34,16 +35,16 @@ func Initialize(dirStructureRoot *utils.DirStructure) error {
rootStructure = dirStructureRoot
// ensure root and databases dirs
databasesStructure = rootStructure.ChildDir(databasesSubDir, 0700)
databasesStructure = rootStructure.ChildDir(databasesSubDir, 0o0700)
err := databasesStructure.Ensure()
if err != nil {
return fmt.Errorf("could not create/open database directory (%s): %s", rootStructure.Path, err)
return fmt.Errorf("could not create/open database directory (%s): %w", rootStructure.Path, err)
}
if registryPersistence.IsSet() {
err = loadRegistry()
if err != nil {
return fmt.Errorf("could not load database registry (%s): %s", filepath.Join(rootStructure.Path, registryFileName), err)
return fmt.Errorf("could not load database registry (%s): %w", filepath.Join(rootStructure.Path, registryFileName), err)
}
}
@ -74,11 +75,11 @@ func Shutdown() (err error) {
// getLocation returns the storage location for the given name and type.
func getLocation(name, storageType string) (string, error) {
location := databasesStructure.ChildDir(name, 0700).ChildDir(storageType, 0700)
location := databasesStructure.ChildDir(name, 0o0700).ChildDir(storageType, 0o0700)
// check location
err := location.Ensure()
if err != nil {
return "", fmt.Errorf(`failed to create/check database dir "%s": %s`, location.Path, err)
return "", fmt.Errorf(`failed to create/check database dir "%s": %w`, location.Path, err)
}
return location.Path, nil
}

View file

@ -0,0 +1,58 @@
package migration
import "errors"
// DiagnosticStep describes one migration step in the Diagnostics.
type DiagnosticStep struct {
Version string
Description string
}
// Diagnostics holds a detailed error report about a failed migration.
type Diagnostics struct { //nolint:errname
// Message holds a human readable message of the encountered
// error.
Message string
// Wrapped must be set to the underlying error that was encountered
// while preparing or executing migrations.
Wrapped error
// StartOfMigration is set to the version of the database before
// any migrations are applied.
StartOfMigration string
// LastSuccessfulMigration is set to the version of the database
// which has been applied successfully before the error happened.
LastSuccessfulMigration string
// TargetVersion is set to the version of the database that the
// migration run aimed for. That is, it's the last available version
// added to the registry.
TargetVersion string
// ExecutionPlan is a list of migration steps that were planned to
// be executed.
ExecutionPlan []DiagnosticStep
// FailedMigration is the description of the migration that has
// failed.
FailedMigration string
}
// Error returns a string representation of the migration error.
func (err *Diagnostics) Error() string {
msg := ""
if err.FailedMigration != "" {
msg = err.FailedMigration + ": "
}
if err.Message != "" {
msg += err.Message + ": "
}
msg += err.Wrapped.Error()
return msg
}
// Unwrap returns the actual error that happened when executing
// a migration. It implements the interface required by the stdlib
// errors package to support errors.Is() and errors.As().
func (err *Diagnostics) Unwrap() error {
if u := errors.Unwrap(err.Wrapped); u != nil {
return u
}
return err.Wrapped
}

View file

@ -0,0 +1,220 @@
package migration
import (
"context"
"errors"
"fmt"
"sort"
"sync"
"time"
"github.com/hashicorp/go-version"
"github.com/safing/portbase/database"
"github.com/safing/portbase/database/record"
"github.com/safing/portbase/formats/dsd"
"github.com/safing/portbase/log"
)
// MigrateFunc is called when a migration should be applied to the
// database. It receives the current version (from) and the target
// version (to) of the database and a dedicated interface for
// interacting with data stored in the DB.
// A dedicated log.ContextTracer is added to ctx for each migration
// run.
type MigrateFunc func(ctx context.Context, from, to *version.Version, dbInterface *database.Interface) error
// Migration represents a registered data-migration that should be applied to
// some database. Migrations are stacked on top and executed in order of increasing
// version number (see Version field).
type Migration struct {
// Description provides a short human-readable description of the
// migration.
Description string
// Version should hold the version of the database/subsystem after
// the migration has been applied.
Version string
// MigrateFuc is executed when the migration should be performed.
MigrateFunc MigrateFunc
}
// Registry holds a migration stack.
type Registry struct {
key string
lock sync.Mutex
migrations []Migration
}
// New creates a new migration registry.
// The key should be the name of the database key that is used to store
// the version of the last successfully applied migration.
func New(key string) *Registry {
return &Registry{
key: key,
}
}
// Add adds one or more migrations to reg.
func (reg *Registry) Add(migrations ...Migration) error {
reg.lock.Lock()
defer reg.lock.Unlock()
for _, m := range migrations {
if _, err := version.NewSemver(m.Version); err != nil {
return fmt.Errorf("migration %q: invalid version %s: %w", m.Description, m.Version, err)
}
reg.migrations = append(reg.migrations, m)
}
return nil
}
// Migrate migrates the database by executing all registered
// migration in order of increasing version numbers. The error
// returned, if not nil, is always of type *Diagnostics.
func (reg *Registry) Migrate(ctx context.Context) (err error) {
reg.lock.Lock()
defer reg.lock.Unlock()
start := time.Now()
log.Infof("migration: migration of %s started", reg.key)
defer func() {
if err != nil {
log.Errorf("migration: migration of %s failed after %s: %s", reg.key, time.Since(start), err)
} else {
log.Infof("migration: migration of %s finished after %s", reg.key, time.Since(start))
}
}()
db := database.NewInterface(&database.Options{
Local: true,
Internal: true,
})
startOfMigration, err := reg.getLatestSuccessfulMigration(db)
if err != nil {
return err
}
execPlan, diag, err := reg.getExecutionPlan(startOfMigration)
if err != nil {
return err
}
if len(execPlan) == 0 {
return nil
}
diag.TargetVersion = execPlan[len(execPlan)-1].Version
// finally, apply our migrations
lastAppliedMigration := startOfMigration
for _, m := range execPlan {
target, _ := version.NewSemver(m.Version) // we can safely ignore the error here
migrationCtx, tracer := log.AddTracer(ctx)
if err := m.MigrateFunc(migrationCtx, lastAppliedMigration, target, db); err != nil {
diag.Wrapped = err
diag.FailedMigration = m.Description
tracer.Errorf("migration: migration for %s failed: %s - %s", reg.key, target.String(), m.Description)
tracer.Submit()
return diag
}
lastAppliedMigration = target
diag.LastSuccessfulMigration = lastAppliedMigration.String()
if err := reg.saveLastSuccessfulMigration(db, target); err != nil {
diag.Message = "failed to persist migration status"
diag.Wrapped = err
diag.FailedMigration = m.Description
}
tracer.Infof("migration: applied migration for %s: %s - %s", reg.key, target.String(), m.Description)
tracer.Submit()
}
// all migrations have been applied successfully, we're done here
return nil
}
func (reg *Registry) getLatestSuccessfulMigration(db *database.Interface) (*version.Version, error) {
// find the latest version stored in the database
rec, err := db.Get(reg.key)
if errors.Is(err, database.ErrNotFound) {
return nil, nil
}
if err != nil {
return nil, &Diagnostics{
Message: "failed to query database for migration status",
Wrapped: err,
}
}
// Unwrap the record to get the actual database
r, ok := rec.(*record.Wrapper)
if !ok {
return nil, &Diagnostics{
Wrapped: errors.New("expected wrapped database record"),
}
}
sv, err := version.NewSemver(string(r.Data))
if err != nil {
return nil, &Diagnostics{
Message: "failed to parse version stored in migration status record",
Wrapped: err,
}
}
return sv, nil
}
func (reg *Registry) saveLastSuccessfulMigration(db *database.Interface, ver *version.Version) error {
r := &record.Wrapper{
Data: []byte(ver.String()),
Format: dsd.RAW,
}
r.SetKey(reg.key)
return db.Put(r)
}
func (reg *Registry) getExecutionPlan(startOfMigration *version.Version) ([]Migration, *Diagnostics, error) {
// create a look-up map for migrations indexed by their semver created a
// list of version (sorted by increasing number) that we use as our execution
// plan.
lm := make(map[string]Migration)
versions := make(version.Collection, 0, len(reg.migrations))
for _, m := range reg.migrations {
ver, err := version.NewSemver(m.Version)
if err != nil {
return nil, nil, &Diagnostics{
Message: "failed to parse version of migration",
Wrapped: err,
FailedMigration: m.Description,
}
}
lm[ver.String()] = m // use .String() for a normalized string representation
versions = append(versions, ver)
}
sort.Sort(versions)
diag := new(Diagnostics)
if startOfMigration != nil {
diag.StartOfMigration = startOfMigration.String()
}
// prepare our diagnostics and the execution plan
execPlan := make([]Migration, 0, len(versions))
for _, ver := range versions {
// skip an migration that has already been applied.
if startOfMigration != nil && startOfMigration.GreaterThanOrEqual(ver) {
continue
}
m := lm[ver.String()]
diag.ExecutionPlan = append(diag.ExecutionPlan, DiagnosticStep{
Description: m.Description,
Version: ver.String(),
})
execPlan = append(execPlan, m)
}
return execPlan, diag, nil
}

View file

@ -15,7 +15,6 @@ type boolCondition struct {
}
func newBoolCondition(key string, operator uint8, value interface{}) *boolCondition {
var parsedValue bool
switch v := value.(type) {

View file

@ -15,7 +15,6 @@ type floatCondition struct {
}
func newFloatCondition(key string, operator uint8, value interface{}) *floatCondition {
var parsedValue float64
switch v := value.(type) {

View file

@ -15,7 +15,6 @@ type intCondition struct {
}
func newIntCondition(key string, operator uint8, value interface{}) *intCondition {
var parsedValue int64
switch v := value.(type) {

View file

@ -15,7 +15,6 @@ type stringSliceCondition struct {
}
func newStringSliceCondition(key string, operator uint8, value interface{}) *stringSliceCondition {
switch v := value.(type) {
case string:
parsedValue := strings.Split(v, ",")
@ -42,7 +41,6 @@ func newStringSliceCondition(key string, operator uint8, value interface{}) *str
operator: errorPresent,
}
}
}
func (c *stringSliceCondition) complies(acc accessor.Accessor) bool {

View file

@ -13,7 +13,7 @@ type Condition interface {
string() string
}
// Operators
// Operators.
const (
Equals uint8 = iota // int
GreaterThan // int

View file

@ -3,6 +3,8 @@ package query
import "testing"
func testSuccess(t *testing.T, c Condition) {
t.Helper()
err := c.check()
if err != nil {
t.Errorf("failed: %s", err)
@ -10,6 +12,8 @@ func testSuccess(t *testing.T, c Condition) {
}
func TestInterfaces(t *testing.T) {
t.Parallel()
testSuccess(t, newIntCondition("banana", Equals, uint(1)))
testSuccess(t, newIntCondition("banana", Equals, uint8(1)))
testSuccess(t, newIntCondition("banana", Equals, uint16(1)))
@ -41,6 +45,8 @@ func TestInterfaces(t *testing.T) {
}
func testCondError(t *testing.T, c Condition) {
t.Helper()
err := c.check()
if err == nil {
t.Error("should fail")
@ -48,6 +54,8 @@ func testCondError(t *testing.T, c Condition) {
}
func TestConditionErrors(t *testing.T) {
t.Parallel()
// test invalid value types
testCondError(t, newBoolCondition("banana", Is, 1))
testCondError(t, newFloatCondition("banana", FloatEquals, true))
@ -68,6 +76,8 @@ func TestConditionErrors(t *testing.T) {
}
func TestWhere(t *testing.T) {
t.Parallel()
c := Where("", 254, nil)
err := c.check()
if err == nil {

View file

@ -3,6 +3,8 @@ package query
import "testing"
func TestGetOpName(t *testing.T) {
t.Parallel()
if getOpName(254) != "[unknown]" {
t.Error("unexpected output")
}

View file

@ -14,6 +14,7 @@ type snippet struct {
}
// ParseQuery parses a plaintext query. Special characters (that must be escaped with a '\') are: `\()` and any whitespaces.
//
//nolint:gocognit
func ParseQuery(query string) (*Query, error) {
snippets, err := extractSnippets(query)
@ -121,7 +122,6 @@ func ParseQuery(query string) (*Query, error) {
}
func extractSnippets(text string) (snippets []*snippet, err error) {
skip := false
start := -1
inParenthesis := false
@ -193,21 +193,22 @@ func extractSnippets(text string) (snippets []*snippet, err error) {
}
return snippets, nil
}
//nolint:gocognit
func parseAndOr(getSnippet func() (*snippet, error), remainingSnippets func() int, rootCondition bool) (Condition, error) {
var isOr = false
var typeSet = false
var wrapInNot = false
var expectingMore = true
var conditions []Condition
var (
isOr = false
typeSet = false
wrapInNot = false
expectingMore = true
conditions []Condition
)
for {
if !expectingMore && rootCondition && remainingSnippets() == 0 {
// advance snippetsPos by one, as it will be set back by 1
getSnippet() //nolint:errcheck
_, _ = getSnippet()
if len(conditions) == 1 {
return conditions[0], nil
}
@ -331,21 +332,19 @@ func parseCondition(firstSnippet *snippet, getSnippet func() (*snippet, error))
return Where(firstSnippet.text, operator, value.text), nil
}
var (
escapeReplacer = regexp.MustCompile(`\\([^\\])`)
)
var escapeReplacer = regexp.MustCompile(`\\([^\\])`)
// prepToken removes surrounding parenthesis and escape characters.
func prepToken(text string) string {
return escapeReplacer.ReplaceAllString(strings.Trim(text, "\""), "$1")
}
// escapeString correctly escapes a snippet for printing
// escapeString correctly escapes a snippet for printing.
func escapeString(token string) string {
// check if token contains characters that need to be escaped
if strings.ContainsAny(token, "()\"\\\t\r\n ") {
// put the token in parenthesis and only escape \ and "
return fmt.Sprintf("\"%s\"", strings.Replace(token, "\"", "\\\"", -1))
return fmt.Sprintf("\"%s\"", strings.ReplaceAll(token, "\"", "\\\""))
}
return token
}

View file

@ -8,6 +8,8 @@ import (
)
func TestExtractSnippets(t *testing.T) {
t.Parallel()
text1 := `query test: where ( "bananas" > 100 and monkeys.# <= "12")or(coconuts < 10 "and" area > 50) or name sameas Julian or name matches ^King\ `
result1 := []*snippet{
{text: "query", globalPosition: 1},
@ -58,6 +60,8 @@ func TestExtractSnippets(t *testing.T) {
}
func testParsing(t *testing.T, queryText string, expectedResult *Query) {
t.Helper()
_, err := expectedResult.Check()
if err != nil {
t.Errorf("failed to create query: %s", err)
@ -84,6 +88,8 @@ func testParsing(t *testing.T, queryText string, expectedResult *Query) {
}
func TestParseQuery(t *testing.T) {
t.Parallel()
text1 := `query test: where (bananas > 100 and monkeys.# <= 12) or not (coconuts < 10 and area not > 50) or name sameas Julian or name matches "^King " orderby name limit 10 offset 20`
result1 := New("test:").Where(Or(
And(
@ -131,6 +137,8 @@ func TestParseQuery(t *testing.T) {
}
func testParseError(t *testing.T, queryText string, expectedErrorString string) {
t.Helper()
_, err := ParseQuery(queryText)
if err == nil {
t.Errorf("should fail to parse: %s", queryText)
@ -142,6 +150,8 @@ func testParseError(t *testing.T, queryText string, expectedErrorString string)
}
func TestParseErrors(t *testing.T) {
t.Parallel()
// syntax
testParseError(t, `query`, `unexpected end at position 5`)
testParseError(t, `query test: where`, `unexpected end at position 17`)

View file

@ -8,9 +8,8 @@ import (
"github.com/safing/portbase/formats/dsd"
)
var (
// copied from https://github.com/tidwall/gjson/blob/master/gjson_test.go
testJSON = `{"age":100, "name":{"here":"B\\\"R"},
// copied from https://github.com/tidwall/gjson/blob/master/gjson_test.go
var testJSON = `{"age":100, "name":{"here":"B\\\"R"},
"noop":{"what is a wren?":"a bird"},
"happy":true,"immortal":false,
"items":[1,2,3,{"tags":[1,2,3],"points":[[1,2],[3,4]]},4,5,6,7],
@ -46,11 +45,11 @@ var (
"lastly":{"yay":"final"},
"temperature": 120.413
}`
)
func testQuery(t *testing.T, r record.Record, shouldMatch bool, condition Condition) {
q := New("test:").Where(condition).MustBeValid()
t.Helper()
q := New("test:").Where(condition).MustBeValid()
// fmt.Printf("%s\n", q.Print())
matched := q.Matches(r)
@ -63,6 +62,7 @@ func testQuery(t *testing.T, r record.Record, shouldMatch bool, condition Condit
}
func TestQuery(t *testing.T) {
t.Parallel()
// if !gjson.Valid(testJSON) {
// t.Fatal("test json is invalid")
@ -110,5 +110,4 @@ func TestQuery(t *testing.T) {
testQuery(t, r, true, Where("happy", Exists, nil))
testQuery(t, r, true, Where("created", Matches, "^2014-[0-9]{2}-[0-9]{2}T"))
}

View file

@ -44,6 +44,13 @@ func (b *Base) SetKey(key string) {
}
}
// ResetKey resets the database name and key.
// Use with caution!
func (b *Base) ResetKey() {
b.dbName = ""
b.dbKey = ""
}
// Key returns the key of the database record.
// As the key must be set before any usage and can only be set once, this
// function may be used without locking the record.
@ -122,14 +129,14 @@ func (b *Base) MarshalRecord(self Record) ([]byte, error) {
c := container.New([]byte{1})
// meta encoding
metaSection, err := dsd.Dump(b.meta, GenCode)
metaSection, err := dsd.Dump(b.meta, dsd.GenCode)
if err != nil {
return nil, err
}
c.AppendAsBlock(metaSection)
// data
dataSection, err := b.Marshal(self, JSON)
dataSection, err := b.Marshal(self, dsd.JSON)
if err != nil {
return nil, err
}

View file

@ -3,11 +3,11 @@ package record
import "testing"
func TestBaseRecord(t *testing.T) {
t.Parallel()
// check model interface compliance
var m Record
b := &TestRecord{}
m = b
_ = m
}

View file

@ -1,15 +0,0 @@
package record
import (
"github.com/safing/portbase/formats/dsd"
)
// Reimport DSD storage types
const (
AUTO = dsd.AUTO
STRING = dsd.STRING // S
BYTES = dsd.BYTES // X
JSON = dsd.JSON // J
BSON = dsd.BSON // B
GenCode = dsd.GenCode // G
)

View file

@ -24,22 +24,16 @@ import (
"github.com/safing/portbase/container"
"github.com/safing/portbase/formats/dsd"
"github.com/safing/portbase/formats/varint"
// Colfer
// "github.com/safing/portbase/database/model/model"
// XDR
// xdr2 "github.com/davecgh/go-xdr/xdr2"
)
var (
testMeta = &Meta{
Created: time.Now().Unix(),
Modified: time.Now().Unix(),
Expires: time.Now().Unix(),
Deleted: time.Now().Unix(),
secret: true,
cronjewel: true,
}
)
var testMeta = &Meta{
Created: time.Now().Unix(),
Modified: time.Now().Unix(),
Expires: time.Now().Unix(),
Deleted: time.Now().Unix(),
secret: true,
cronjewel: true,
}
func BenchmarkAllocateBytes(b *testing.B) {
for i := 0; i < b.N; i++ {
@ -49,8 +43,8 @@ func BenchmarkAllocateBytes(b *testing.B) {
func BenchmarkAllocateStruct1(b *testing.B) {
for i := 0; i < b.N; i++ {
var new Meta
_ = new
var newMeta Meta
_ = newMeta
}
}
@ -61,7 +55,6 @@ func BenchmarkAllocateStruct2(b *testing.B) {
}
func BenchmarkMetaSerializeContainer(b *testing.B) {
// Start benchmark
for i := 0; i < b.N; i++ {
c := container.New()
@ -80,11 +73,9 @@ func BenchmarkMetaSerializeContainer(b *testing.B) {
c.AppendNumber(0)
}
}
}
func BenchmarkMetaUnserializeContainer(b *testing.B) {
// Setup
c := container.New()
c.AppendNumber(uint64(testMeta.Created))
@ -157,11 +148,9 @@ func BenchmarkMetaUnserializeContainer(b *testing.B) {
return
}
}
}
func BenchmarkMetaSerializeVarInt(b *testing.B) {
// Start benchmark
for i := 0; i < b.N; i++ {
encoded := make([]byte, 33)
@ -197,13 +186,10 @@ func BenchmarkMetaSerializeVarInt(b *testing.B) {
default:
encoded[offset] = 0
}
offset++
}
}
func BenchmarkMetaUnserializeVarInt(b *testing.B) {
// Setup
encoded := make([]byte, 33)
offset := 0
@ -295,106 +281,9 @@ func BenchmarkMetaUnserializeVarInt(b *testing.B) {
return
}
}
}
// func BenchmarkMetaSerializeWithXDR2(b *testing.B) {
//
// // Setup
// var w bytes.Buffer
//
// // Reset timer for precise results
// b.ResetTimer()
//
// // Start benchmark
// for i := 0; i < b.N; i++ {
// w.Reset()
// _, err := xdr2.Marshal(&w, testMeta)
// if err != nil {
// b.Errorf("failed to serialize with xdr2: %s", err)
// return
// }
// }
//
// }
// func BenchmarkMetaUnserializeWithXDR2(b *testing.B) {
//
// // Setup
// var w bytes.Buffer
// _, err := xdr2.Marshal(&w, testMeta)
// if err != nil {
// b.Errorf("failed to serialize with xdr2: %s", err)
// }
// encodedData := w.Bytes()
//
// // Reset timer for precise results
// b.ResetTimer()
//
// // Start benchmark
// for i := 0; i < b.N; i++ {
// var newMeta Meta
// _, err := xdr2.Unmarshal(bytes.NewReader(encodedData), &newMeta)
// if err != nil {
// b.Errorf("failed to unserialize with xdr2: %s", err)
// return
// }
// }
//
// }
// func BenchmarkMetaSerializeWithColfer(b *testing.B) {
//
// testColf := &model.Course{
// Created: time.Now().Unix(),
// Modified: time.Now().Unix(),
// Expires: time.Now().Unix(),
// Deleted: time.Now().Unix(),
// Secret: true,
// Cronjewel: true,
// }
//
// // Setup
// for i := 0; i < b.N; i++ {
// _, err := testColf.MarshalBinary()
// if err != nil {
// b.Errorf("failed to serialize with colfer: %s", err)
// return
// }
// }
//
// }
// func BenchmarkMetaUnserializeWithColfer(b *testing.B) {
//
// testColf := &model.Course{
// Created: time.Now().Unix(),
// Modified: time.Now().Unix(),
// Expires: time.Now().Unix(),
// Deleted: time.Now().Unix(),
// Secret: true,
// Cronjewel: true,
// }
// encodedData, err := testColf.MarshalBinary()
// if err != nil {
// b.Errorf("failed to serialize with colfer: %s", err)
// return
// }
//
// // Setup
// for i := 0; i < b.N; i++ {
// var testUnColf model.Course
// err := testUnColf.UnmarshalBinary(encodedData)
// if err != nil {
// b.Errorf("failed to unserialize with colfer: %s", err)
// return
// }
// }
//
// }
func BenchmarkMetaSerializeWithCodegen(b *testing.B) {
for i := 0; i < b.N; i++ {
_, err := testMeta.GenCodeMarshal(nil)
if err != nil {
@ -402,11 +291,9 @@ func BenchmarkMetaSerializeWithCodegen(b *testing.B) {
return
}
}
}
func BenchmarkMetaUnserializeWithCodegen(b *testing.B) {
// Setup
encodedData, err := testMeta.GenCodeMarshal(nil)
if err != nil {
@ -426,25 +313,21 @@ func BenchmarkMetaUnserializeWithCodegen(b *testing.B) {
return
}
}
}
func BenchmarkMetaSerializeWithDSDJSON(b *testing.B) {
for i := 0; i < b.N; i++ {
_, err := dsd.Dump(testMeta, JSON)
_, err := dsd.Dump(testMeta, dsd.JSON)
if err != nil {
b.Errorf("failed to serialize with DSD/JSON: %s", err)
return
}
}
}
func BenchmarkMetaUnserializeWithDSDJSON(b *testing.B) {
// Setup
encodedData, err := dsd.Dump(testMeta, JSON)
encodedData, err := dsd.Dump(testMeta, dsd.JSON)
if err != nil {
b.Errorf("failed to serialize with DSD/JSON: %s", err)
return
@ -462,5 +345,4 @@ func BenchmarkMetaUnserializeWithDSDJSON(b *testing.B) {
return
}
}
}

View file

@ -2,18 +2,9 @@ package record
import (
"fmt"
"io"
"time"
"unsafe"
)
var (
_ = unsafe.Sizeof(0)
_ = io.ReadFull
_ = time.Now()
)
// GenCodeSize returns the size of the gencode marshalled byte slice
// GenCodeSize returns the size of the gencode marshalled byte slice.
func (m *Meta) GenCodeSize() (s int) {
s += 34
return
@ -133,24 +124,16 @@ func (m *Meta) GenCodeUnmarshal(buf []byte) (uint64, error) {
i := uint64(0)
{
m.Created = 0 | (int64(buf[0+0]) << 0) | (int64(buf[1+0]) << 8) | (int64(buf[2+0]) << 16) | (int64(buf[3+0]) << 24) | (int64(buf[4+0]) << 32) | (int64(buf[5+0]) << 40) | (int64(buf[6+0]) << 48) | (int64(buf[7+0]) << 56)
}
{
m.Modified = 0 | (int64(buf[0+8]) << 0) | (int64(buf[1+8]) << 8) | (int64(buf[2+8]) << 16) | (int64(buf[3+8]) << 24) | (int64(buf[4+8]) << 32) | (int64(buf[5+8]) << 40) | (int64(buf[6+8]) << 48) | (int64(buf[7+8]) << 56)
}
{
m.Expires = 0 | (int64(buf[0+16]) << 0) | (int64(buf[1+16]) << 8) | (int64(buf[2+16]) << 16) | (int64(buf[3+16]) << 24) | (int64(buf[4+16]) << 32) | (int64(buf[5+16]) << 40) | (int64(buf[6+16]) << 48) | (int64(buf[7+16]) << 56)
}
{
m.Deleted = 0 | (int64(buf[0+24]) << 0) | (int64(buf[1+24]) << 8) | (int64(buf[2+24]) << 16) | (int64(buf[3+24]) << 24) | (int64(buf[4+24]) << 32) | (int64(buf[5+24]) << 40) | (int64(buf[6+24]) << 48) | (int64(buf[7+24]) << 56)
}
{
m.secret = buf[32] == 1

View file

@ -6,30 +6,30 @@ import (
"time"
)
var (
genCodeTestMeta = &Meta{
Created: time.Now().Unix(),
Modified: time.Now().Unix(),
Expires: time.Now().Unix(),
Deleted: time.Now().Unix(),
secret: true,
cronjewel: true,
}
)
var genCodeTestMeta = &Meta{
Created: time.Now().Unix(),
Modified: time.Now().Unix(),
Expires: time.Now().Unix(),
Deleted: time.Now().Unix(),
secret: true,
cronjewel: true,
}
func TestGenCode(t *testing.T) {
t.Parallel()
encoded, err := genCodeTestMeta.GenCodeMarshal(nil)
if err != nil {
t.Fatal(err)
}
new := &Meta{}
_, err = new.GenCodeUnmarshal(encoded)
newMeta := &Meta{}
_, err = newMeta.GenCodeUnmarshal(encoded)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(genCodeTestMeta, new) {
t.Errorf("objects are not equal, got: %v", new)
if !reflect.DeepEqual(genCodeTestMeta, newMeta) {
t.Errorf("objects are not equal, got: %v", newMeta)
}
}

View file

@ -2,7 +2,7 @@ package record
import "time"
// Meta holds
// Meta holds metadata about the record.
type Meta struct {
Created int64
Modified int64

View file

@ -12,17 +12,21 @@ type Record interface {
DatabaseName() string // test
DatabaseKey() string // config
// Metadata.
Meta() *Meta
SetMeta(meta *Meta)
CreateMeta()
UpdateMeta()
// Serialization.
Marshal(self Record, format uint8) ([]byte, error)
MarshalRecord(self Record) ([]byte, error)
GetAccessor(self Record) accessor.Accessor
// Locking.
Lock()
Unlock()
// Wrapping.
IsWrapped() bool
}

View file

@ -32,21 +32,21 @@ func NewRawWrapper(database, key string, data []byte) (*Wrapper, error) {
metaSection, n, err := varint.GetNextBlock(data[offset:])
if err != nil {
return nil, fmt.Errorf("could not get meta section: %s", err)
return nil, fmt.Errorf("could not get meta section: %w", err)
}
offset += n
newMeta := &Meta{}
_, err = dsd.Load(metaSection, newMeta)
if err != nil {
return nil, fmt.Errorf("could not unmarshal meta section: %s", err)
return nil, fmt.Errorf("could not unmarshal meta section: %w", err)
}
var format uint8 = dsd.NONE
var format uint8 = dsd.RAW
if !newMeta.IsDeleted() {
format, n, err = varint.Unpack8(data[offset:])
if err != nil {
return nil, fmt.Errorf("could not get dsd format: %s", err)
return nil, fmt.Errorf("could not get dsd format: %w", err)
}
offset += n
}
@ -79,7 +79,7 @@ func NewWrapper(key string, meta *Meta, format uint8, data []byte) (*Wrapper, er
}, nil
}
// Marshal marshals the object, without the database key or metadata
// Marshal marshals the object, without the database key or metadata.
func (w *Wrapper) Marshal(r Record, format uint8) ([]byte, error) {
if w.Meta() == nil {
return nil, errors.New("missing meta")
@ -89,7 +89,7 @@ func (w *Wrapper) Marshal(r Record, format uint8) ([]byte, error) {
return nil, nil
}
if format != AUTO && format != w.Format {
if format != dsd.AUTO && format != w.Format {
return nil, errors.New("could not dump model, wrapped object format mismatch")
}
@ -112,14 +112,14 @@ func (w *Wrapper) MarshalRecord(r Record) ([]byte, error) {
c := container.New([]byte{1})
// meta
metaSection, err := dsd.Dump(w.meta, GenCode)
metaSection, err := dsd.Dump(w.meta, dsd.GenCode)
if err != nil {
return nil, err
}
c.AppendAsBlock(metaSection)
// data
dataSection, err := w.Marshal(r, JSON)
dataSection, err := w.Marshal(r, dsd.AUTO)
if err != nil {
return nil, err
}
@ -134,26 +134,26 @@ func (w *Wrapper) IsWrapped() bool {
}
// Unwrap unwraps data into a record.
func Unwrap(wrapped, new Record) error {
func Unwrap(wrapped, r Record) error {
wrapper, ok := wrapped.(*Wrapper)
if !ok {
return fmt.Errorf("cannot unwrap %T", wrapped)
}
_, err := dsd.LoadAsFormat(wrapper.Data, wrapper.Format, new)
err := dsd.LoadAsFormat(wrapper.Data, wrapper.Format, r)
if err != nil {
return fmt.Errorf("failed to unwrap %T: %s", new, err)
return fmt.Errorf("failed to unwrap %T: %w", r, err)
}
new.SetKey(wrapped.Key())
new.SetMeta(wrapped.Meta())
r.SetKey(wrapped.Key())
r.SetMeta(wrapped.Meta())
return nil
}
// GetAccessor returns an accessor for this record, if available.
func (w *Wrapper) GetAccessor(self Record) accessor.Accessor {
if w.Format == JSON && len(w.Data) > 0 {
if w.Format == dsd.JSON && len(w.Data) > 0 {
return accessor.NewJSONBytesAccessor(&w.Data)
}
return nil

View file

@ -3,9 +3,12 @@ package record
import (
"bytes"
"testing"
"github.com/safing/portbase/formats/dsd"
)
func TestWrapper(t *testing.T) {
t.Parallel()
// check model interface compliance
var m Record
@ -18,18 +21,18 @@ func TestWrapper(t *testing.T) {
encodedTestData := []byte(`J{"a": "b"}`)
// test wrapper
wrapper, err := NewWrapper("test:a", &Meta{}, JSON, testData)
wrapper, err := NewWrapper("test:a", &Meta{}, dsd.JSON, testData)
if err != nil {
t.Fatal(err)
}
if wrapper.Format != JSON {
if wrapper.Format != dsd.JSON {
t.Error("format mismatch")
}
if !bytes.Equal(testData, wrapper.Data) {
t.Error("data mismatch")
}
encoded, err := wrapper.Marshal(wrapper, JSON)
encoded, err := wrapper.Marshal(wrapper, dsd.JSON)
if err != nil {
t.Fatal(err)
}

View file

@ -4,7 +4,7 @@ import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"io/fs"
"os"
"path"
"regexp"
@ -32,7 +32,7 @@ var (
// If the database is already registered, only
// the description and the primary API will be
// updated and the effective object will be returned.
func Register(new *Database) (*Database, error) {
func Register(db *Database) (*Database, error) {
if !initialized.IsSet() {
return nil, errors.New("database not initialized")
}
@ -40,31 +40,31 @@ func Register(new *Database) (*Database, error) {
registryLock.Lock()
defer registryLock.Unlock()
registeredDB, ok := registry[new.Name]
registeredDB, ok := registry[db.Name]
save := false
if ok {
// update database
if registeredDB.Description != new.Description {
registeredDB.Description = new.Description
if registeredDB.Description != db.Description {
registeredDB.Description = db.Description
save = true
}
if registeredDB.ShadowDelete != new.ShadowDelete {
registeredDB.ShadowDelete = new.ShadowDelete
if registeredDB.ShadowDelete != db.ShadowDelete {
registeredDB.ShadowDelete = db.ShadowDelete
save = true
}
} else {
// register new database
if !nameConstraint.MatchString(new.Name) {
if !nameConstraint.MatchString(db.Name) {
return nil, errors.New("database name must only contain alphanumeric and `_-` characters and must be at least 3 characters long")
}
now := time.Now().Round(time.Second)
new.Registered = now
new.LastUpdated = now
new.LastLoaded = time.Time{}
db.Registered = now
db.LastUpdated = now
db.LastLoaded = time.Time{}
registry[new.Name] = new
registry[db.Name] = db
save = true
}
@ -115,23 +115,23 @@ func loadRegistry() error {
// read file
filePath := path.Join(rootStructure.Path, registryFileName)
data, err := ioutil.ReadFile(filePath)
data, err := os.ReadFile(filePath)
if err != nil {
if os.IsNotExist(err) {
if errors.Is(err, fs.ErrNotExist) {
return nil
}
return err
}
// parse
new := make(map[string]*Database)
err = json.Unmarshal(data, &new)
databases := make(map[string]*Database)
err = json.Unmarshal(data, &databases)
if err != nil {
return err
}
// set
registry = new
registry = databases
return nil
}
@ -150,7 +150,7 @@ func saveRegistry(lock bool) error {
// write file
// TODO: write atomically (best effort)
filePath := path.Join(rootStructure.Path, registryFileName)
return ioutil.WriteFile(filePath, data, 0600)
return os.WriteFile(filePath, data, 0o0600)
}
func registryWriter() {

View file

@ -30,7 +30,7 @@ func NewBadger(name, location string) (storage.Interface, error) {
opts := badger.DefaultOptions(location)
db, err := badger.Open(opts)
if err == badger.ErrTruncateNeeded {
if errors.Is(err, badger.ErrTruncateNeeded) {
// clean up after crash
log.Warningf("database/storage: truncating corrupted value log of badger database %s: this may cause data loss", name)
opts.Truncate = true
@ -54,7 +54,7 @@ func (b *Badger) Get(key string) (record.Record, error) {
var err error
item, err = txn.Get([]byte(key))
if err != nil {
if err == badger.ErrKeyNotFound {
if errors.Is(err, badger.ErrKeyNotFound) {
return storage.ErrNotFound
}
return err
@ -114,7 +114,7 @@ func (b *Badger) Put(r record.Record) (record.Record, error) {
func (b *Badger) Delete(key string) error {
return b.db.Update(func(txn *badger.Txn) error {
err := txn.Delete([]byte(key))
if err != nil && err != badger.ErrKeyNotFound {
if err != nil && !errors.Is(err, badger.ErrKeyNotFound) {
return err
}
return nil
@ -125,7 +125,7 @@ func (b *Badger) Delete(key string) error {
func (b *Badger) Query(q *query.Query, local, internal bool) (*iterator.Iterator, error) {
_, err := q.Check()
if err != nil {
return nil, fmt.Errorf("invalid query: %s", err)
return nil, fmt.Errorf("invalid query: %w", err)
}
queryIter := iterator.New()
@ -169,17 +169,17 @@ func (b *Badger) queryExecutor(queryIter *iterator.Iterator, q *query.Query, loc
if err != nil {
return err
}
new, err := record.NewRawWrapper(b.name, r.DatabaseKey(), copiedData)
newWrapper, err := record.NewRawWrapper(b.name, r.DatabaseKey(), copiedData)
if err != nil {
return err
}
select {
case <-queryIter.Done:
return nil
case queryIter.Next <- new:
case queryIter.Next <- newWrapper:
default:
select {
case queryIter.Next <- new:
case queryIter.Next <- newWrapper:
case <-queryIter.Done:
return nil
case <-time.After(1 * time.Minute):

View file

@ -1,9 +1,7 @@
//nolint:unparam,maligned
package badger
import (
"context"
"io/ioutil"
"os"
"reflect"
"sync"
@ -20,7 +18,7 @@ var (
_ storage.Maintainer = &Badger{}
)
type TestRecord struct {
type TestRecord struct { //nolint:maligned
record.Base
sync.Mutex
S string
@ -40,11 +38,15 @@ type TestRecord struct {
}
func TestBadger(t *testing.T) {
testDir, err := ioutil.TempDir("", "testing-")
t.Parallel()
testDir, err := os.MkdirTemp("", "testing-")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(testDir) // clean up
defer func() {
_ = os.RemoveAll(testDir) // clean up
}()
// start
db, err := NewBadger("test", testDir)

View file

@ -16,9 +16,7 @@ import (
"github.com/safing/portbase/database/storage"
)
var (
bucketName = []byte{0}
)
var bucketName = []byte{0}
// BBolt database made pluggable for portbase.
type BBolt struct {
@ -39,10 +37,10 @@ func NewBBolt(name, location string) (storage.Interface, error) {
}
// Open/Create database, retry if there is a timeout.
db, err := bbolt.Open(dbFile, 0600, dbOptions)
db, err := bbolt.Open(dbFile, 0o0600, dbOptions)
for i := 0; i < 5 && err != nil; i++ {
// Try again if there is an error.
db, err = bbolt.Open(dbFile, 0600, dbOptions)
db, err = bbolt.Open(dbFile, 0o0600, dbOptions)
}
if err != nil {
return nil, err
@ -89,7 +87,6 @@ func (b *BBolt) Get(key string) (record.Record, error) {
}
return nil
})
if err != nil {
return nil, err
}
@ -188,7 +185,7 @@ func (b *BBolt) Delete(key string) error {
func (b *BBolt) Query(q *query.Query, local, internal bool) (*iterator.Iterator, error) {
_, err := q.Check()
if err != nil {
return nil, fmt.Errorf("invalid query: %s", err)
return nil, fmt.Errorf("invalid query: %w", err)
}
queryIter := iterator.New()
@ -235,19 +232,19 @@ func (b *BBolt) queryExecutor(queryIter *iterator.Iterator, q *query.Query, loca
duplicate := make([]byte, len(value))
copy(duplicate, value)
new, err := record.NewRawWrapper(b.name, iterWrapper.DatabaseKey(), duplicate)
newWrapper, err := record.NewRawWrapper(b.name, iterWrapper.DatabaseKey(), duplicate)
if err != nil {
return err
}
select {
case <-queryIter.Done:
return nil
case queryIter.Next <- new:
case queryIter.Next <- newWrapper:
default:
select {
case <-queryIter.Done:
return nil
case queryIter.Next <- new:
case queryIter.Next <- newWrapper:
case <-time.After(1 * time.Second):
return errors.New("query timeout")
}

View file

@ -1,9 +1,7 @@
//nolint:unparam,maligned
package bbolt
import (
"context"
"io/ioutil"
"os"
"reflect"
"sync"
@ -22,7 +20,7 @@ var (
_ storage.Purger = &BBolt{}
)
type TestRecord struct {
type TestRecord struct { //nolint:maligned
record.Base
sync.Mutex
S string
@ -42,11 +40,15 @@ type TestRecord struct {
}
func TestBBolt(t *testing.T) {
testDir, err := ioutil.TempDir("", "testing-")
t.Parallel()
testDir, err := os.MkdirTemp("", "testing-")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(testDir) // clean up
defer func() {
_ = os.RemoveAll(testDir) // clean up
}()
// start
db, err := NewBBolt("test", testDir)

View file

@ -2,7 +2,7 @@ package storage
import "errors"
// Errors for storages
// Errors for storages.
var (
ErrNotFound = errors.New("storage entry not found")
)

View file

@ -8,7 +8,7 @@ import (
"context"
"errors"
"fmt"
"io/ioutil"
"io/fs"
"os"
"path/filepath"
"runtime"
@ -23,8 +23,8 @@ import (
)
const (
defaultFileMode = os.FileMode(int(0644))
defaultDirMode = os.FileMode(int(0755))
defaultFileMode = os.FileMode(0o0644)
defaultDirMode = os.FileMode(0o0755)
onWindows = runtime.GOOS == "windows"
)
@ -42,18 +42,18 @@ func init() {
func NewFSTree(name, location string) (storage.Interface, error) {
basePath, err := filepath.Abs(location)
if err != nil {
return nil, fmt.Errorf("fstree: failed to validate path %s: %s", location, err)
return nil, fmt.Errorf("fstree: failed to validate path %s: %w", location, err)
}
file, err := os.Stat(basePath)
if err != nil {
if os.IsNotExist(err) {
if errors.Is(err, fs.ErrNotExist) {
err = os.MkdirAll(basePath, defaultDirMode)
if err != nil {
return nil, fmt.Errorf("fstree: failed to create directory %s: %s", basePath, err)
return nil, fmt.Errorf("fstree: failed to create directory %s: %w", basePath, err)
}
} else {
return nil, fmt.Errorf("fstree: failed to stat path %s: %s", basePath, err)
return nil, fmt.Errorf("fstree: failed to stat path %s: %w", basePath, err)
}
} else {
if !file.IsDir() {
@ -88,12 +88,12 @@ func (fst *FSTree) Get(key string) (record.Record, error) {
return nil, err
}
data, err := ioutil.ReadFile(dstPath)
data, err := os.ReadFile(dstPath)
if err != nil {
if os.IsNotExist(err) {
if errors.Is(err, fs.ErrNotExist) {
return nil, storage.ErrNotFound
}
return nil, fmt.Errorf("fstree: failed to read file %s: %s", dstPath, err)
return nil, fmt.Errorf("fstree: failed to read file %s: %w", dstPath, err)
}
r, err := record.NewRawWrapper(fst.name, key, data)
@ -132,11 +132,11 @@ func (fst *FSTree) Put(r record.Record) (record.Record, error) {
// create dir and try again
err = os.MkdirAll(filepath.Dir(dstPath), defaultDirMode)
if err != nil {
return nil, fmt.Errorf("fstree: failed to create directory %s: %s", filepath.Dir(dstPath), err)
return nil, fmt.Errorf("fstree: failed to create directory %s: %w", filepath.Dir(dstPath), err)
}
err = writeFile(dstPath, data, defaultFileMode)
if err != nil {
return nil, fmt.Errorf("fstree: could not write file %s: %s", dstPath, err)
return nil, fmt.Errorf("fstree: could not write file %s: %w", dstPath, err)
}
}
@ -153,7 +153,7 @@ func (fst *FSTree) Delete(key string) error {
// remove entry
err = os.Remove(dstPath)
if err != nil {
return fmt.Errorf("fstree: could not delete %s: %s", dstPath, err)
return fmt.Errorf("fstree: could not delete %s: %w", dstPath, err)
}
return nil
@ -163,7 +163,7 @@ func (fst *FSTree) Delete(key string) error {
func (fst *FSTree) Query(q *query.Query, local, internal bool) (*iterator.Iterator, error) {
_, err := q.Check()
if err != nil {
return nil, fmt.Errorf("invalid query: %s", err)
return nil, fmt.Errorf("invalid query: %w", err)
}
walkPrefix, err := fst.buildFilePath(q.DatabaseKeyPrefix(), false)
@ -177,10 +177,10 @@ func (fst *FSTree) Query(q *query.Query, local, internal bool) (*iterator.Iterat
walkRoot = walkPrefix
case err == nil:
walkRoot = filepath.Dir(walkPrefix)
case os.IsNotExist(err):
case errors.Is(err, fs.ErrNotExist):
walkRoot = filepath.Dir(walkPrefix)
default: // err != nil
return nil, fmt.Errorf("fstree: could not stat query root %s: %s", walkPrefix, err)
return nil, fmt.Errorf("fstree: could not stat query root %s: %w", walkPrefix, err)
}
queryIter := iterator.New()
@ -191,10 +191,8 @@ func (fst *FSTree) Query(q *query.Query, local, internal bool) (*iterator.Iterat
func (fst *FSTree) queryExecutor(walkRoot string, queryIter *iterator.Iterator, q *query.Query, local, internal bool) {
err := filepath.Walk(walkRoot, func(path string, info os.FileInfo, err error) error {
// check for error
if err != nil {
return fmt.Errorf("fstree: error in walking fs: %s", err)
return fmt.Errorf("fstree: error in walking fs: %w", err)
}
if info.IsDir() {
@ -212,22 +210,22 @@ func (fst *FSTree) queryExecutor(walkRoot string, queryIter *iterator.Iterator,
}
// read file
data, err := ioutil.ReadFile(path)
data, err := os.ReadFile(path)
if err != nil {
if os.IsNotExist(err) {
if errors.Is(err, fs.ErrNotExist) {
return nil
}
return fmt.Errorf("fstree: failed to read file %s: %s", path, err)
return fmt.Errorf("fstree: failed to read file %s: %w", path, err)
}
// parse
key, err := filepath.Rel(fst.basePath, path)
if err != nil {
return fmt.Errorf("fstree: failed to extract key from filepath %s: %s", path, err)
return fmt.Errorf("fstree: failed to extract key from filepath %s: %w", path, err)
}
r, err := record.NewRawWrapper(fst.name, key, data)
if err != nil {
return fmt.Errorf("fstree: failed to load file %s: %s", path, err)
return fmt.Errorf("fstree: failed to load file %s: %w", path, err)
}
if !r.Meta().CheckValidity() {
@ -277,7 +275,7 @@ func (fst *FSTree) Shutdown() error {
return nil
}
// writeFile mirrors ioutil.WriteFile, replacing an existing file with the same
// writeFile mirrors os.WriteFile, replacing an existing file with the same
// name atomically. This is not atomic on Windows, but still an improvement.
// TODO: Replace with github.com/google/renamio.WriteFile as soon as it is fixed on Windows.
// TODO: This has become a wont-fix. Explore other options.

View file

@ -2,7 +2,5 @@ package fstree
import "github.com/safing/portbase/database/storage"
var (
// Compile time interface checks.
_ storage.Interface = &FSTree{}
)
// Compile time interface checks.
var _ storage.Interface = &FSTree{}

View file

@ -113,7 +113,7 @@ func (hm *HashMap) Delete(key string) error {
func (hm *HashMap) Query(q *query.Query, local, internal bool) (*iterator.Iterator, error) {
_, err := q.Check()
if err != nil {
return nil, fmt.Errorf("invalid query: %s", err)
return nil, fmt.Errorf("invalid query: %w", err)
}
queryIter := iterator.New()

View file

@ -1,4 +1,3 @@
//nolint:unparam,maligned
package hashmap
import (
@ -6,10 +5,9 @@ import (
"sync"
"testing"
"github.com/safing/portbase/database/storage"
"github.com/safing/portbase/database/query"
"github.com/safing/portbase/database/record"
"github.com/safing/portbase/database/storage"
)
var (
@ -18,7 +16,7 @@ var (
_ storage.Batcher = &HashMap{}
)
type TestRecord struct {
type TestRecord struct { //nolint:maligned
record.Base
sync.Mutex
S string
@ -38,6 +36,8 @@ type TestRecord struct {
}
func TestHashMap(t *testing.T) {
t.Parallel()
// start
db, err := NewHashMap("test", "")
if err != nil {

View file

@ -10,15 +10,13 @@ import (
"github.com/safing/portbase/database/record"
)
var (
// ErrNotImplemented is returned when a function is not implemented by a storage.
ErrNotImplemented = errors.New("not implemented")
)
// ErrNotImplemented is returned when a function is not implemented by a storage.
var ErrNotImplemented = errors.New("not implemented")
// InjectBase is a dummy base structure to reduce boilerplate code for injected storage interfaces.
type InjectBase struct{}
// Compile time interface check
// Compile time interface check.
var _ Interface = &InjectBase{}
// Get returns a database record.

View file

@ -26,7 +26,7 @@ type Interface interface {
MaintainRecordStates(ctx context.Context, purgeDeletedBefore time.Time, shadowDelete bool) error
}
// Maintainer defines the database storage API for backends that support optimized fetching of only the metadata.
// MetaHandler defines the database storage API for backends that support optimized fetching of only the metadata.
type MetaHandler interface {
GetMeta(key string) (*record.Meta, error)
}

View file

@ -17,7 +17,7 @@ type Sinkhole struct {
}
var (
// Compile time interface check
// Compile time interface checks.
_ storage.Interface = &Sinkhole{}
_ storage.Maintainer = &Sinkhole{}
_ storage.Batcher = &Sinkhole{}
@ -62,7 +62,7 @@ func (s *Sinkhole) PutMany(shadowDelete bool) (chan<- record.Record, <-chan erro
// start handler
go func() {
for range batch {
// nom, nom, nom
// discard everything
}
errs <- nil
}()

Some files were not shown because too many files have changed in this diff Show more