Initial commit

This commit is contained in:
Daniel 2024-06-24 09:01:16 +02:00
commit d2ef3a4fde
31 changed files with 3076 additions and 0 deletions

6
.gitattributes vendored Normal file
View file

@ -0,0 +1,6 @@
# Treat all Go files in this repo as binary, with no git magic updating
# line endings. Windows users contributing to Go will need to use a
# modern version of git and editors capable of LF line endings.
*.go -text diff=golang

11
.github/dependabot.yml vendored Normal file
View file

@ -0,0 +1,11 @@
# To get started with Dependabot version updates, you'll need to specify which
# package ecosystems to update and where the package manifests are located.
# Please see the documentation for all configuration options:
# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
version: 2
updates:
- package-ecosystem: "gomod"
directory: "/"
schedule:
interval: "daily"

55
.github/workflows/go.yml vendored Normal file
View file

@ -0,0 +1,55 @@
name: Go
on:
push:
branches:
- master
- develop
pull_request:
branches:
- master
- develop
jobs:
lint:
name: Linter
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@v3
- name: Setup Go
uses: actions/setup-go@v4
with:
go-version: '^1.21'
- name: Get dependencies
run: go mod download
- name: Run golangci-lint
uses: golangci/golangci-lint-action@v3
with:
version: v1.52.2
only-new-issues: true
args: -c ./.golangci.yml --timeout 15m
- name: Run go vet
run: go vet ./...
test:
name: Test
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@v3
- name: Setup Go
uses: actions/setup-go@v4
with:
go-version: '^1.21'
- name: Get dependencies
run: go mod download
- name: Run tests
run: ./test --test-only

View file

@ -0,0 +1,26 @@
# This workflow responds to first time posters with a greeting message.
# Docs: https://github.com/actions/first-interaction
name: Greet New Users
# This workflow is triggered when a new issue is created.
on:
issues:
types: opened
permissions:
contents: read
issues: write
jobs:
greet:
runs-on: ubuntu-latest
steps:
- uses: actions/first-interaction@v1
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
# Respond to first time issue raisers.
issue-message: |
Greetings and welcome to our community! As this is the first issue you opened here, we wanted to share some useful infos with you:
- 🗣️ Our community on [Discord](https://discord.gg/safing) is super helpful and active. We also have an AI-enabled support bot that knows Portmaster well and can give you immediate help.
- 📖 The [Wiki](https://wiki.safing.io/) answers all common questions and has many important details. If you can't find an answer there, let us know, so we can add anything that's missing.

View file

@ -0,0 +1,22 @@
# This workflow responds with a message when certain labels are added to an issue or PR.
# Docs: https://github.com/dessant/label-actions
name: Label Actions
# This workflow is triggered when a label is added to an issue.
on:
issues:
types: labeled
permissions:
contents: read
issues: write
jobs:
action:
runs-on: ubuntu-latest
steps:
- uses: dessant/label-actions@v3
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
config-path: ".github/label-actions.yml"
process-only: "issues"

42
.github/workflows/issues-stale.yml vendored Normal file
View file

@ -0,0 +1,42 @@
# This workflow warns and then closes stale issues and PRs.
# Docs: https://github.com/actions/stale
name: Close Stale Issues
on:
schedule:
- cron: "17 5 * * 1-5" # run at 5:17 (UTC) on Monday to Friday
workflow_dispatch:
permissions:
contents: read
issues: write
jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v8
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
# Increase max operations.
# When using GITHUB_TOKEN, the rate limit is 1,000 requests per hour per repository.
operations-per-run: 500
# Handle stale issues
stale-issue-label: 'stale'
# Exemptions
exempt-all-issue-assignees: true
exempt-issue-labels: 'support,dependencies,pinned,security'
# Mark as stale
days-before-issue-stale: 63 # 2 months / 9 weeks
stale-issue-message: |
This issue has been automatically marked as inactive because it has not had activity in the past two months.
If no further activity occurs, this issue will be automatically closed in one week in order to increase our focus on active topics.
# Close
days-before-issue-close: 7 # 1 week
close-issue-message: |
This issue has been automatically closed because it has not had recent activity. Thank you for your contributions.
If the issue has not been resolved, you can [find more information in our Wiki](https://wiki.safing.io/) or [continue the conversation on our Discord](https://discord.gg/safing).
# TODO: Handle stale PRs
days-before-pr-stale: 36500 # 100 years - effectively disabled.

4
.gitignore vendored Normal file
View file

@ -0,0 +1,4 @@
go.mod.*
vendor
go.work
go.work.sum

72
.golangci.yml Normal file
View file

@ -0,0 +1,72 @@
# Docs:
# https://golangci-lint.run/usage/linters/
linters:
enable-all: true
disable:
- containedctx
- contextcheck
- cyclop
- depguard
- exhaustivestruct
- exhaustruct
- forbidigo
- funlen
- gochecknoglobals
- gochecknoinits
- gocognit
- gocyclo
- goerr113
- gomnd
- ifshort
- interfacebloat
- interfacer
- ireturn
- lll
- musttag
- nestif
- nilnil
- nlreturn
- noctx
- nolintlint
- nonamedreturns
- nosnakecase
- revive
- tagliatelle
- testpackage
- varnamelen
- whitespace
- wrapcheck
- wsl
linters-settings:
revive:
# See https://github.com/mgechev/revive#available-rules for details.
enable-all-rules: true
gci:
# put imports beginning with prefix after 3rd-party packages;
# only support one prefix
# if not set, use goimports.local-prefixes
local-prefixes: github.com/safing
godox:
# report any comments starting with keywords, this is useful for TODO or FIXME comments that
# might be left in the code accidentally and should be resolved before merging
keywords:
- FIXME
gosec:
# To specify a set of rules to explicitly exclude.
# Available rules: https://github.com/securego/gosec#available-rules
excludes:
- G204 # Variables in commands.
- G304 # Variables in file paths.
- G505 # We need crypto/sha1 for non-security stuff. Using `nolint:` triggers another linter.
issues:
exclude-use-default: false
exclude-rules:
- text: "a blank import .*"
linters:
- golint
- text: "ST1000: at least one file in a package should have a package comment.*"
linters:
- stylecheck

1
AUTHORS Normal file
View file

@ -0,0 +1 @@
All files in this repository (unless otherwise noted) are authored, owned and copyrighted by Safing ICS Technologies GmbH (Austria).

76
CODE_OF_CONDUCT.md Normal file
View file

@ -0,0 +1,76 @@
# Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to making participation in our project and
our community a harassment-free experience for everyone, regardless of age, body
size, disability, ethnicity, sex characteristics, gender identity and expression,
level of experience, education, socio-economic status, nationality, personal
appearance, race, religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment
include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or
advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic
address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable
behavior and are expected to take appropriate and fair corrective action in
response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct, or to ban temporarily or
permanently any contributor for other behaviors that they deem inappropriate,
threatening, offensive, or harmful.
## Scope
This Code of Conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community. Examples of
representing a project or community include using an official project e-mail
address, posting via an official social media account, or acting as an appointed
representative at an online or offline event. Representation of a project may be
further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting the project team at coc@safing.io. All
complaints will be reviewed and investigated and will result in a response that
is deemed necessary and appropriate to the circumstances. The project team is
obligated to maintain confidentiality with regard to the reporter of an incident.
Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good
faith may face temporary or permanent repercussions as determined by other
members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see
https://www.contributor-covenant.org/faq

29
LICENSE Normal file
View file

@ -0,0 +1,29 @@
Copyright (c) 2024 Safing ICS Technologies GmbH. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

16
README.md Normal file
View file

@ -0,0 +1,16 @@
# Go Structures
A small collection of useful data structures and helpers.
## container
A []byte slice on steriods that helps to reduce reallocations.
## dsd
DSD stands for dynamically structured data. It has an identifier for the format used, so file and wire encoding can be simply switched.
This makes it easier / more efficient to store different data types in a k/v data storage.
## varint
This is just a convenience wrapper around `encoding/binary`, because we use varints a lot.

5
TRADEMARKS Normal file
View file

@ -0,0 +1,5 @@
The names "Safing", "Portmaster", "SPN" and their logos are trademarks owned by Safing ICS Technologies GmbH (Austria).
Although our code is free, it is very important that we strictly enforce our trademark rights, in order to be able to protect our users against people who use the marks to commit fraud. This means that, while you have considerable freedom to redistribute and modify our software, there are tight restrictions on your ability to use our names and logos in ways which fall in the domain of trademark law, even when built into binaries that we provide.
This file is licensed under a Creative Commons Attribution-ShareAlike 4.0 International License. Parts of it were taken from https://www.mozilla.org/en-US/foundation/licensing/.

368
container/container.go Normal file
View file

@ -0,0 +1,368 @@
package container
import (
"errors"
"io"
"github.com/safing/portbase/formats/varint"
)
// Container is []byte sclie on steroids, allowing for quick data appending, prepending and fetching.
type Container struct {
compartments [][]byte
offset int
err error
}
// Data Handling
// NewContainer is DEPRECATED, please use New(), it's the same thing.
func NewContainer(data ...[]byte) *Container {
return &Container{
compartments: data,
}
}
// New creates a new container with an optional initial []byte slice. Data will NOT be copied.
func New(data ...[]byte) *Container {
return &Container{
compartments: data,
}
}
// Prepend prepends data. Data will NOT be copied.
func (c *Container) Prepend(data []byte) {
if c.offset < 1 {
c.renewCompartments()
}
c.offset--
c.compartments[c.offset] = data
}
// Append appends the given data. Data will NOT be copied.
func (c *Container) Append(data []byte) {
c.compartments = append(c.compartments, data)
}
// PrependNumber prepends a number (varint encoded).
func (c *Container) PrependNumber(n uint64) {
c.Prepend(varint.Pack64(n))
}
// AppendNumber appends a number (varint encoded).
func (c *Container) AppendNumber(n uint64) {
c.compartments = append(c.compartments, varint.Pack64(n))
}
// PrependInt prepends an int (varint encoded).
func (c *Container) PrependInt(n int) {
c.Prepend(varint.Pack64(uint64(n)))
}
// AppendInt appends an int (varint encoded).
func (c *Container) AppendInt(n int) {
c.compartments = append(c.compartments, varint.Pack64(uint64(n)))
}
// AppendAsBlock appends the length of the data and the data itself. Data will NOT be copied.
func (c *Container) AppendAsBlock(data []byte) {
c.AppendNumber(uint64(len(data)))
c.Append(data)
}
// PrependAsBlock prepends the length of the data and the data itself. Data will NOT be copied.
func (c *Container) PrependAsBlock(data []byte) {
c.Prepend(data)
c.PrependNumber(uint64(len(data)))
}
// AppendContainer appends another Container. Data will NOT be copied.
func (c *Container) AppendContainer(data *Container) {
c.compartments = append(c.compartments, data.compartments...)
}
// AppendContainerAsBlock appends another Container (length and data). Data will NOT be copied.
func (c *Container) AppendContainerAsBlock(data *Container) {
c.AppendNumber(uint64(data.Length()))
c.compartments = append(c.compartments, data.compartments...)
}
// HoldsData returns true if the Container holds any data.
func (c *Container) HoldsData() bool {
for i := c.offset; i < len(c.compartments); i++ {
if len(c.compartments[i]) > 0 {
return true
}
}
return false
}
// Length returns the full length of all bytes held by the container.
func (c *Container) Length() (length int) {
for i := c.offset; i < len(c.compartments); i++ {
length += len(c.compartments[i])
}
return
}
// Replace replaces all held data with a new data slice. Data will NOT be copied.
func (c *Container) Replace(data []byte) {
c.compartments = [][]byte{data}
}
// CompileData concatenates all bytes held by the container and returns it as one single []byte slice. Data will NOT be copied and is NOT consumed.
func (c *Container) CompileData() []byte {
if len(c.compartments) != 1 {
newBuf := make([]byte, c.Length())
copyBuf := newBuf
for i := c.offset; i < len(c.compartments); i++ {
copy(copyBuf, c.compartments[i])
copyBuf = copyBuf[len(c.compartments[i]):]
}
c.compartments = [][]byte{newBuf}
c.offset = 0
}
return c.compartments[0]
}
// Get returns the given amount of bytes. Data MAY be copied and IS consumed.
func (c *Container) Get(n int) ([]byte, error) {
buf := c.Peek(n)
if len(buf) < n {
return nil, errors.New("container: not enough data to return")
}
c.skip(len(buf))
return buf, nil
}
// GetAll returns all data. Data MAY be copied and IS consumed.
func (c *Container) GetAll() []byte {
// TODO: Improve.
buf := c.Peek(c.Length())
c.skip(len(buf))
return buf
}
// GetAsContainer returns the given amount of bytes in a new container. Data will NOT be copied and IS consumed.
func (c *Container) GetAsContainer(n int) (*Container, error) {
newC := c.PeekContainer(n)
if newC == nil {
return nil, errors.New("container: not enough data to return")
}
c.skip(n)
return newC, nil
}
// GetMax returns as much as possible, but the given amount of bytes at maximum. Data MAY be copied and IS consumed.
func (c *Container) GetMax(n int) []byte {
buf := c.Peek(n)
c.skip(len(buf))
return buf
}
// WriteToSlice copies data to the give slice until it is full, or the container is empty. It returns the bytes written and if the container is now empty. Data IS copied and IS consumed.
func (c *Container) WriteToSlice(slice []byte) (n int, containerEmptied bool) {
for i := c.offset; i < len(c.compartments); i++ {
copy(slice, c.compartments[i])
if len(slice) < len(c.compartments[i]) {
// only part was copied
n += len(slice)
c.compartments[i] = c.compartments[i][len(slice):]
c.checkOffset()
return n, false
}
// all was copied
n += len(c.compartments[i])
slice = slice[len(c.compartments[i]):]
c.compartments[i] = nil
c.offset = i + 1
}
c.checkOffset()
return n, true
}
// WriteAllTo writes all the data to the given io.Writer. Data IS NOT copied (but may be by writer) and IS NOT consumed.
func (c *Container) WriteAllTo(writer io.Writer) error {
for i := c.offset; i < len(c.compartments); i++ {
written := 0
for written < len(c.compartments[i]) {
n, err := writer.Write(c.compartments[i][written:])
if err != nil {
return err
}
written += n
}
}
return nil
}
func (c *Container) clean() {
if c.offset > 100 {
c.renewCompartments()
}
}
func (c *Container) renewCompartments() {
baseLength := len(c.compartments) - c.offset + 5
newCompartments := make([][]byte, baseLength, baseLength+5)
copy(newCompartments[5:], c.compartments[c.offset:])
c.compartments = newCompartments
c.offset = 4
}
func (c *Container) carbonCopy() *Container {
newC := &Container{
compartments: make([][]byte, len(c.compartments)),
offset: c.offset,
err: c.err,
}
copy(newC.compartments, c.compartments)
return newC
}
func (c *Container) checkOffset() {
if c.offset >= len(c.compartments) {
c.offset = len(c.compartments) / 2
}
}
// Block Handling
// PrependLength prepends the current full length of all bytes in the container.
func (c *Container) PrependLength() {
c.Prepend(varint.Pack64(uint64(c.Length())))
}
// Peek returns the given amount of bytes. Data MAY be copied and IS NOT consumed.
func (c *Container) Peek(n int) []byte {
// Check requested length.
if n <= 0 {
return nil
}
// Check if the first slice holds enough data.
if len(c.compartments[c.offset]) >= n {
return c.compartments[c.offset][:n]
}
// Start gathering data.
slice := make([]byte, n)
copySlice := slice
n = 0
for i := c.offset; i < len(c.compartments); i++ {
copy(copySlice, c.compartments[i])
if len(copySlice) <= len(c.compartments[i]) {
n += len(copySlice)
return slice[:n]
}
n += len(c.compartments[i])
copySlice = copySlice[len(c.compartments[i]):]
}
return slice[:n]
}
// PeekContainer returns the given amount of bytes in a new container. Data will NOT be copied and IS NOT consumed.
func (c *Container) PeekContainer(n int) (newC *Container) {
// Check requested length.
if n < 0 {
return nil
} else if n == 0 {
return &Container{}
}
newC = &Container{}
for i := c.offset; i < len(c.compartments); i++ {
if n >= len(c.compartments[i]) {
newC.compartments = append(newC.compartments, c.compartments[i])
n -= len(c.compartments[i])
} else {
newC.compartments = append(newC.compartments, c.compartments[i][:n])
n = 0
}
}
if n > 0 {
return nil
}
return newC
}
func (c *Container) skip(n int) {
for i := c.offset; i < len(c.compartments); i++ {
if len(c.compartments[i]) <= n {
n -= len(c.compartments[i])
c.offset = i + 1
c.compartments[i] = nil
if n == 0 {
c.checkOffset()
return
}
} else {
c.compartments[i] = c.compartments[i][n:]
c.checkOffset()
return
}
}
c.checkOffset()
}
// GetNextBlock returns the next block of data defined by a varint. Data MAY be copied and IS consumed.
func (c *Container) GetNextBlock() ([]byte, error) {
blockSize, err := c.GetNextN64()
if err != nil {
return nil, err
}
return c.Get(int(blockSize))
}
// GetNextBlockAsContainer returns the next block of data as a Container defined by a varint. Data will NOT be copied and IS consumed.
func (c *Container) GetNextBlockAsContainer() (*Container, error) {
blockSize, err := c.GetNextN64()
if err != nil {
return nil, err
}
return c.GetAsContainer(int(blockSize))
}
// GetNextN8 parses and returns a varint of type uint8.
func (c *Container) GetNextN8() (uint8, error) {
buf := c.Peek(2)
num, n, err := varint.Unpack8(buf)
if err != nil {
return 0, err
}
c.skip(n)
return num, nil
}
// GetNextN16 parses and returns a varint of type uint16.
func (c *Container) GetNextN16() (uint16, error) {
buf := c.Peek(3)
num, n, err := varint.Unpack16(buf)
if err != nil {
return 0, err
}
c.skip(n)
return num, nil
}
// GetNextN32 parses and returns a varint of type uint32.
func (c *Container) GetNextN32() (uint32, error) {
buf := c.Peek(5)
num, n, err := varint.Unpack32(buf)
if err != nil {
return 0, err
}
c.skip(n)
return num, nil
}
// GetNextN64 parses and returns a varint of type uint64.
func (c *Container) GetNextN64() (uint64, error) {
buf := c.Peek(10)
num, n, err := varint.Unpack64(buf)
if err != nil {
return 0, err
}
c.skip(n)
return num, nil
}

208
container/container_test.go Normal file
View file

@ -0,0 +1,208 @@
package container
import (
"bytes"
"testing"
"github.com/safing/portbase/utils"
)
var (
testData = []byte("The quick brown fox jumps over the lazy dog")
testDataSplitted = [][]byte{
[]byte("T"),
[]byte("he"),
[]byte(" qu"),
[]byte("ick "),
[]byte("brown"),
[]byte(" fox j"),
[]byte("umps ov"),
[]byte("er the l"),
[]byte("azy dog"),
}
)
func TestContainerDataHandling(t *testing.T) {
t.Parallel()
c1 := New(utils.DuplicateBytes(testData))
c1c := c1.carbonCopy()
c2 := New()
for i := 0; i < len(testData); i++ {
oneByte := make([]byte, 1)
c1c.WriteToSlice(oneByte)
c2.Append(oneByte)
}
c2c := c2.carbonCopy()
c3 := New()
for i := len(c2c.compartments) - 1; i >= c2c.offset; i-- {
c3.Prepend(c2c.compartments[i])
}
c3c := c3.carbonCopy()
d4 := make([]byte, len(testData)*2)
n, _ := c3c.WriteToSlice(d4)
d4 = d4[:n]
c3c = c3.carbonCopy()
d5 := make([]byte, len(testData))
for i := 0; i < len(testData); i++ {
c3c.WriteToSlice(d5[i : i+1])
}
c6 := New()
c6.Replace(testData)
c7 := New(testDataSplitted[0])
for i := 1; i < len(testDataSplitted); i++ {
c7.Append(testDataSplitted[i])
}
c8 := New(testDataSplitted...)
for i := 0; i < 110; i++ {
c8.Prepend(nil)
}
c8.clean()
c9 := c8.PeekContainer(len(testData))
c10 := c9.PeekContainer(len(testData) - 1)
c10.Append(testData[len(testData)-1:])
compareMany(t, testData, c1.CompileData(), c2.CompileData(), c3.CompileData(), d4, d5, c6.CompileData(), c7.CompileData(), c8.CompileData(), c9.CompileData(), c10.CompileData())
}
func compareMany(t *testing.T, reference []byte, other ...[]byte) {
t.Helper()
for i, cmp := range other {
if !bytes.Equal(reference, cmp) {
t.Errorf("sample %d does not match reference: sample is '%s'", i+1, string(cmp))
}
}
}
func TestDataFetching(t *testing.T) {
t.Parallel()
c1 := New(utils.DuplicateBytes(testData))
data := c1.GetMax(1)
if string(data[0]) != "T" {
t.Errorf("failed to GetMax(1), got %s, expected %s", string(data), "T")
}
_, err := c1.Get(1000)
if err == nil {
t.Error("should fail")
}
_, err = c1.GetAsContainer(1000)
if err == nil {
t.Error("should fail")
}
}
func TestBlocks(t *testing.T) {
t.Parallel()
c1 := New(utils.DuplicateBytes(testData))
c1.PrependLength()
n, err := c1.GetNextN8()
if err != nil {
t.Errorf("GetNextN8() failed: %s", err)
}
if n != 43 {
t.Errorf("n should be 43, was %d", n)
}
c1.PrependLength()
n2, err := c1.GetNextN16()
if err != nil {
t.Errorf("GetNextN16() failed: %s", err)
}
if n2 != 43 {
t.Errorf("n should be 43, was %d", n2)
}
c1.PrependLength()
n3, err := c1.GetNextN32()
if err != nil {
t.Errorf("GetNextN32() failed: %s", err)
}
if n3 != 43 {
t.Errorf("n should be 43, was %d", n3)
}
c1.PrependLength()
n4, err := c1.GetNextN64()
if err != nil {
t.Errorf("GetNextN64() failed: %s", err)
}
if n4 != 43 {
t.Errorf("n should be 43, was %d", n4)
}
}
func TestContainerBlockHandling(t *testing.T) {
t.Parallel()
c1 := New(utils.DuplicateBytes(testData))
c1.PrependLength()
c1.AppendAsBlock(testData)
c1c := c1.carbonCopy()
c2 := New(nil)
for i := 0; i < c1.Length(); i++ {
oneByte := make([]byte, 1)
c1c.WriteToSlice(oneByte)
c2.Append(oneByte)
}
c3 := New(testDataSplitted[0])
for i := 1; i < len(testDataSplitted); i++ {
c3.Append(testDataSplitted[i])
}
c3.PrependLength()
d1, err := c1.GetNextBlock()
if err != nil {
t.Errorf("GetNextBlock failed: %s", err)
}
d2, err := c1.GetNextBlock()
if err != nil {
t.Errorf("GetNextBlock failed: %s", err)
}
d3, err := c2.GetNextBlock()
if err != nil {
t.Errorf("GetNextBlock failed: %s", err)
}
d4, err := c2.GetNextBlock()
if err != nil {
t.Errorf("GetNextBlock failed: %s", err)
}
d5, err := c3.GetNextBlock()
if err != nil {
t.Errorf("GetNextBlock failed: %s", err)
}
compareMany(t, testData, d1, d2, d3, d4, d5)
}
func TestContainerMisc(t *testing.T) {
t.Parallel()
c1 := New()
d1 := c1.CompileData()
if len(d1) > 0 {
t.Fatalf("empty container should not hold any data")
}
}
func TestDeprecated(t *testing.T) {
t.Parallel()
NewContainer(utils.DuplicateBytes(testData))
}

26
container/doc.go Normal file
View file

@ -0,0 +1,26 @@
// Package container gives you a []byte slice on steroids, allowing for quick data appending, prepending and fetching as well as transparent error transportation.
//
// A Container is basically a [][]byte slice that just appends new []byte slices and only copies things around when necessary.
//
// Byte slices added to the Container are not changed or appended, to not corrupt any other data that may be before and after the given slice.
// If interested, consider the following example to understand why this is important:
//
// package main
//
// import (
// "fmt"
// )
//
// func main() {
// a := []byte{0, 1,2,3,4,5,6,7,8,9}
// fmt.Printf("a: %+v\n", a)
// fmt.Printf("\nmaking changes...\n(we are not changing a directly)\n\n")
// b := a[2:6]
// c := append(b, 10, 11)
// fmt.Printf("b: %+v\n", b)
// fmt.Printf("c: %+v\n", c)
// fmt.Printf("a: %+v\n", a)
// }
//
// run it here: https://play.golang.org/p/xu1BXT3QYeE
package container

View file

@ -0,0 +1,21 @@
package container
import (
"encoding/json"
)
// MarshalJSON serializes the container as a JSON byte array.
func (c *Container) MarshalJSON() ([]byte, error) {
return json.Marshal(c.CompileData())
}
// UnmarshalJSON unserializes a container from a JSON byte array.
func (c *Container) UnmarshalJSON(data []byte) error {
var raw []byte
if err := json.Unmarshal(data, &raw); err != nil {
return err
}
c.compartments = [][]byte{raw}
return nil
}

103
dsd/compression.go Normal file
View file

@ -0,0 +1,103 @@
package dsd
import (
"bytes"
"compress/gzip"
"errors"
"github.com/safing/portbase/formats/varint"
)
// DumpAndCompress stores the interface as a dsd formatted data structure and compresses the resulting data.
func DumpAndCompress(t interface{}, format uint8, compression uint8) ([]byte, error) {
// Check if compression format is valid.
compression, ok := ValidateCompressionFormat(compression)
if !ok {
return nil, ErrIncompatibleFormat
}
// Dump the given data with the given format.
data, err := Dump(t, format)
if err != nil {
return nil, err
}
// prepare writer
packetFormat := varint.Pack8(compression)
buf := bytes.NewBuffer(nil)
buf.Write(packetFormat)
// compress
switch compression {
case GZIP:
// create gzip writer
gzipWriter, err := gzip.NewWriterLevel(buf, gzip.BestCompression)
if err != nil {
return nil, err
}
// write data
n, err := gzipWriter.Write(data)
if err != nil {
return nil, err
}
if n != len(data) {
return nil, errors.New("failed to fully write to gzip compressor")
}
// flush and write gzip footer
err = gzipWriter.Close()
if err != nil {
return nil, err
}
default:
return nil, ErrIncompatibleFormat
}
return buf.Bytes(), nil
}
// DecompressAndLoad decompresses the data using the specified compression format and then loads the resulting data blob into the interface.
func DecompressAndLoad(data []byte, compression uint8, t interface{}) (format uint8, err error) {
// Check if compression format is valid.
_, ok := ValidateCompressionFormat(compression)
if !ok {
return 0, ErrIncompatibleFormat
}
// prepare reader
buf := bytes.NewBuffer(nil)
// decompress
switch compression {
case GZIP:
// create gzip reader
gzipReader, err := gzip.NewReader(bytes.NewBuffer(data))
if err != nil {
return 0, err
}
// read uncompressed data
_, err = buf.ReadFrom(gzipReader)
if err != nil {
return 0, err
}
// flush and verify gzip footer
err = gzipReader.Close()
if err != nil {
return 0, err
}
default:
return 0, ErrIncompatibleFormat
}
// assign decompressed data
data = buf.Bytes()
format, read, err := loadFormat(data)
if err != nil {
return 0, err
}
return format, LoadAsFormat(data[read:], format, t)
}

160
dsd/dsd.go Normal file
View file

@ -0,0 +1,160 @@
package dsd
// dynamic structured data
// check here for some benchmarks: https://github.com/alecthomas/go_serialization_benchmarks
import (
"encoding/json"
"errors"
"fmt"
"io"
"github.com/fxamacker/cbor/v2"
"github.com/ghodss/yaml"
"github.com/vmihailenco/msgpack/v5"
"github.com/safing/portbase/formats/varint"
"github.com/safing/portbase/utils"
)
// Load loads an dsd structured data blob into the given interface.
func Load(data []byte, t interface{}) (format uint8, err error) {
format, read, err := loadFormat(data)
if err != nil {
return 0, err
}
_, ok := ValidateSerializationFormat(format)
if ok {
return format, LoadAsFormat(data[read:], format, t)
}
return DecompressAndLoad(data[read:], format, t)
}
// LoadAsFormat loads a data blob into the interface using the specified format.
func LoadAsFormat(data []byte, format uint8, t interface{}) (err error) {
switch format {
case RAW:
return ErrIsRaw
case JSON:
err = json.Unmarshal(data, t)
if err != nil {
return fmt.Errorf("dsd: failed to unpack json: %w, data: %s", err, utils.SafeFirst16Bytes(data))
}
return nil
case YAML:
err = yaml.Unmarshal(data, t)
if err != nil {
return fmt.Errorf("dsd: failed to unpack yaml: %w, data: %s", err, utils.SafeFirst16Bytes(data))
}
return nil
case CBOR:
err = cbor.Unmarshal(data, t)
if err != nil {
return fmt.Errorf("dsd: failed to unpack cbor: %w, data: %s", err, utils.SafeFirst16Bytes(data))
}
return nil
case MsgPack:
err = msgpack.Unmarshal(data, t)
if err != nil {
return fmt.Errorf("dsd: failed to unpack msgpack: %w, data: %s", err, utils.SafeFirst16Bytes(data))
}
return nil
case GenCode:
genCodeStruct, ok := t.(GenCodeCompatible)
if !ok {
return errors.New("dsd: gencode is not supported by the given data structure")
}
_, err = genCodeStruct.GenCodeUnmarshal(data)
if err != nil {
return fmt.Errorf("dsd: failed to unpack gencode: %w, data: %s", err, utils.SafeFirst16Bytes(data))
}
return nil
default:
return ErrIncompatibleFormat
}
}
func loadFormat(data []byte) (format uint8, read int, err error) {
format, read, err = varint.Unpack8(data)
if err != nil {
return 0, 0, err
}
if len(data) <= read {
return 0, 0, io.ErrUnexpectedEOF
}
return format, read, nil
}
// Dump stores the interface as a dsd formatted data structure.
func Dump(t interface{}, format uint8) ([]byte, error) {
return DumpIndent(t, format, "")
}
// DumpIndent stores the interface as a dsd formatted data structure with indentation, if available.
func DumpIndent(t interface{}, format uint8, indent string) ([]byte, error) {
data, err := dumpWithoutIdentifier(t, format, indent)
if err != nil {
return nil, err
}
// TODO: Find a better way to do this.
return append(varint.Pack8(format), data...), nil
}
func dumpWithoutIdentifier(t interface{}, format uint8, indent string) ([]byte, error) {
format, ok := ValidateSerializationFormat(format)
if !ok {
return nil, ErrIncompatibleFormat
}
var data []byte
var err error
switch format {
case RAW:
var ok bool
data, ok = t.([]byte)
if !ok {
return nil, ErrIncompatibleFormat
}
case JSON:
// TODO: use SetEscapeHTML(false)
if indent != "" {
data, err = json.MarshalIndent(t, "", indent)
} else {
data, err = json.Marshal(t)
}
if err != nil {
return nil, err
}
case YAML:
data, err = yaml.Marshal(t)
if err != nil {
return nil, err
}
case CBOR:
data, err = cbor.Marshal(t)
if err != nil {
return nil, err
}
case MsgPack:
data, err = msgpack.Marshal(t)
if err != nil {
return nil, err
}
case GenCode:
genCodeStruct, ok := t.(GenCodeCompatible)
if !ok {
return nil, errors.New("dsd: gencode is not supported by the given data structure")
}
data, err = genCodeStruct.GenCodeMarshal(nil)
if err != nil {
return nil, fmt.Errorf("dsd: failed to pack gencode struct: %w", err)
}
default:
return nil, ErrIncompatibleFormat
}
return data, nil
}

327
dsd/dsd_test.go Normal file
View file

@ -0,0 +1,327 @@
//nolint:maligned,gocyclo,gocognit
package dsd
import (
"math/big"
"reflect"
"testing"
)
// SimpleTestStruct is used for testing.
type SimpleTestStruct struct {
S string
B byte
}
type ComplexTestStruct struct {
I int
I8 int8
I16 int16
I32 int32
I64 int64
UI uint
UI8 uint8
UI16 uint16
UI32 uint32
UI64 uint64
BI *big.Int
S string
Sp *string
Sa []string
Sap *[]string
B byte
Bp *byte
Ba []byte
Bap *[]byte
M map[string]string
Mp *map[string]string
}
type GenCodeTestStruct struct {
I8 int8
I16 int16
I32 int32
I64 int64
UI8 uint8
UI16 uint16
UI32 uint32
UI64 uint64
S string
Sp *string
Sa []string
Sap *[]string
B byte
Bp *byte
Ba []byte
Bap *[]byte
}
var (
simpleSubject = &SimpleTestStruct{
"a",
0x01,
}
bString = "b"
bBytes byte = 0x02
complexSubject = &ComplexTestStruct{
-1,
-2,
-3,
-4,
-5,
1,
2,
3,
4,
5,
big.NewInt(6),
"a",
&bString,
[]string{"c", "d", "e"},
&[]string{"f", "g", "h"},
0x01,
&bBytes,
[]byte{0x03, 0x04, 0x05},
&[]byte{0x05, 0x06, 0x07},
map[string]string{
"a": "b",
"c": "d",
"e": "f",
},
&map[string]string{
"g": "h",
"i": "j",
"k": "l",
},
}
genCodeSubject = &GenCodeTestStruct{
-2,
-3,
-4,
-5,
2,
3,
4,
5,
"a",
&bString,
[]string{"c", "d", "e"},
&[]string{"f", "g", "h"},
0x01,
&bBytes,
[]byte{0x03, 0x04, 0x05},
&[]byte{0x05, 0x06, 0x07},
}
)
func TestConversion(t *testing.T) { //nolint:maintidx
t.Parallel()
compressionFormats := []uint8{AUTO, GZIP}
formats := []uint8{JSON, CBOR, MsgPack}
for _, compression := range compressionFormats {
for _, format := range formats {
// simple
var b []byte
var err error
if compression != AUTO {
b, err = DumpAndCompress(simpleSubject, format, compression)
} else {
b, err = Dump(simpleSubject, format)
}
if err != nil {
t.Fatalf("Dump error (simple struct): %s", err)
}
si := &SimpleTestStruct{}
_, err = Load(b, si)
if err != nil {
t.Fatalf("Load error (simple struct): %s", err)
}
if !reflect.DeepEqual(simpleSubject, si) {
t.Errorf("Load (simple struct): subject does not match loaded object")
t.Errorf("Encoded: %v", string(b))
t.Errorf("Compared: %v == %v", simpleSubject, si)
}
// complex
if compression != AUTO {
b, err = DumpAndCompress(complexSubject, format, compression)
} else {
b, err = Dump(complexSubject, format)
}
if err != nil {
t.Fatalf("Dump error (complex struct): %s", err)
}
co := &ComplexTestStruct{}
_, err = Load(b, co)
if err != nil {
t.Fatalf("Load error (complex struct): %s", err)
}
if complexSubject.I != co.I {
t.Errorf("Load (complex struct): struct.I is not equal (%v != %v)", complexSubject.I, co.I)
}
if complexSubject.I8 != co.I8 {
t.Errorf("Load (complex struct): struct.I8 is not equal (%v != %v)", complexSubject.I8, co.I8)
}
if complexSubject.I16 != co.I16 {
t.Errorf("Load (complex struct): struct.I16 is not equal (%v != %v)", complexSubject.I16, co.I16)
}
if complexSubject.I32 != co.I32 {
t.Errorf("Load (complex struct): struct.I32 is not equal (%v != %v)", complexSubject.I32, co.I32)
}
if complexSubject.I64 != co.I64 {
t.Errorf("Load (complex struct): struct.I64 is not equal (%v != %v)", complexSubject.I64, co.I64)
}
if complexSubject.UI != co.UI {
t.Errorf("Load (complex struct): struct.UI is not equal (%v != %v)", complexSubject.UI, co.UI)
}
if complexSubject.UI8 != co.UI8 {
t.Errorf("Load (complex struct): struct.UI8 is not equal (%v != %v)", complexSubject.UI8, co.UI8)
}
if complexSubject.UI16 != co.UI16 {
t.Errorf("Load (complex struct): struct.UI16 is not equal (%v != %v)", complexSubject.UI16, co.UI16)
}
if complexSubject.UI32 != co.UI32 {
t.Errorf("Load (complex struct): struct.UI32 is not equal (%v != %v)", complexSubject.UI32, co.UI32)
}
if complexSubject.UI64 != co.UI64 {
t.Errorf("Load (complex struct): struct.UI64 is not equal (%v != %v)", complexSubject.UI64, co.UI64)
}
if complexSubject.BI.Cmp(co.BI) != 0 {
t.Errorf("Load (complex struct): struct.BI is not equal (%v != %v)", complexSubject.BI, co.BI)
}
if complexSubject.S != co.S {
t.Errorf("Load (complex struct): struct.S is not equal (%v != %v)", complexSubject.S, co.S)
}
if !reflect.DeepEqual(complexSubject.Sp, co.Sp) {
t.Errorf("Load (complex struct): struct.Sp is not equal (%v != %v)", complexSubject.Sp, co.Sp)
}
if !reflect.DeepEqual(complexSubject.Sa, co.Sa) {
t.Errorf("Load (complex struct): struct.Sa is not equal (%v != %v)", complexSubject.Sa, co.Sa)
}
if !reflect.DeepEqual(complexSubject.Sap, co.Sap) {
t.Errorf("Load (complex struct): struct.Sap is not equal (%v != %v)", complexSubject.Sap, co.Sap)
}
if complexSubject.B != co.B {
t.Errorf("Load (complex struct): struct.B is not equal (%v != %v)", complexSubject.B, co.B)
}
if !reflect.DeepEqual(complexSubject.Bp, co.Bp) {
t.Errorf("Load (complex struct): struct.Bp is not equal (%v != %v)", complexSubject.Bp, co.Bp)
}
if !reflect.DeepEqual(complexSubject.Ba, co.Ba) {
t.Errorf("Load (complex struct): struct.Ba is not equal (%v != %v)", complexSubject.Ba, co.Ba)
}
if !reflect.DeepEqual(complexSubject.Bap, co.Bap) {
t.Errorf("Load (complex struct): struct.Bap is not equal (%v != %v)", complexSubject.Bap, co.Bap)
}
if !reflect.DeepEqual(complexSubject.M, co.M) {
t.Errorf("Load (complex struct): struct.M is not equal (%v != %v)", complexSubject.M, co.M)
}
if !reflect.DeepEqual(complexSubject.Mp, co.Mp) {
t.Errorf("Load (complex struct): struct.Mp is not equal (%v != %v)", complexSubject.Mp, co.Mp)
}
}
// test all formats
simplifiedFormatTesting := []uint8{JSON, CBOR, MsgPack, GenCode}
for _, format := range simplifiedFormatTesting {
// simple
var b []byte
var err error
if compression != AUTO {
b, err = DumpAndCompress(simpleSubject, format, compression)
} else {
b, err = Dump(simpleSubject, format)
}
if err != nil {
t.Fatalf("Dump error (simple struct): %s", err)
}
si := &SimpleTestStruct{}
_, err = Load(b, si)
if err != nil {
t.Fatalf("Load error (simple struct): %s", err)
}
if !reflect.DeepEqual(simpleSubject, si) {
t.Errorf("Load (simple struct): subject does not match loaded object")
t.Errorf("Encoded: %v", string(b))
t.Errorf("Compared: %v == %v", simpleSubject, si)
}
// complex
b, err = DumpAndCompress(genCodeSubject, format, compression)
if err != nil {
t.Fatalf("Dump error (complex struct): %s", err)
}
co := &GenCodeTestStruct{}
_, err = Load(b, co)
if err != nil {
t.Fatalf("Load error (complex struct): %s", err)
}
if genCodeSubject.I8 != co.I8 {
t.Errorf("Load (complex struct): struct.I8 is not equal (%v != %v)", genCodeSubject.I8, co.I8)
}
if genCodeSubject.I16 != co.I16 {
t.Errorf("Load (complex struct): struct.I16 is not equal (%v != %v)", genCodeSubject.I16, co.I16)
}
if genCodeSubject.I32 != co.I32 {
t.Errorf("Load (complex struct): struct.I32 is not equal (%v != %v)", genCodeSubject.I32, co.I32)
}
if genCodeSubject.I64 != co.I64 {
t.Errorf("Load (complex struct): struct.I64 is not equal (%v != %v)", genCodeSubject.I64, co.I64)
}
if genCodeSubject.UI8 != co.UI8 {
t.Errorf("Load (complex struct): struct.UI8 is not equal (%v != %v)", genCodeSubject.UI8, co.UI8)
}
if genCodeSubject.UI16 != co.UI16 {
t.Errorf("Load (complex struct): struct.UI16 is not equal (%v != %v)", genCodeSubject.UI16, co.UI16)
}
if genCodeSubject.UI32 != co.UI32 {
t.Errorf("Load (complex struct): struct.UI32 is not equal (%v != %v)", genCodeSubject.UI32, co.UI32)
}
if genCodeSubject.UI64 != co.UI64 {
t.Errorf("Load (complex struct): struct.UI64 is not equal (%v != %v)", genCodeSubject.UI64, co.UI64)
}
if genCodeSubject.S != co.S {
t.Errorf("Load (complex struct): struct.S is not equal (%v != %v)", genCodeSubject.S, co.S)
}
if !reflect.DeepEqual(genCodeSubject.Sp, co.Sp) {
t.Errorf("Load (complex struct): struct.Sp is not equal (%v != %v)", genCodeSubject.Sp, co.Sp)
}
if !reflect.DeepEqual(genCodeSubject.Sa, co.Sa) {
t.Errorf("Load (complex struct): struct.Sa is not equal (%v != %v)", genCodeSubject.Sa, co.Sa)
}
if !reflect.DeepEqual(genCodeSubject.Sap, co.Sap) {
t.Errorf("Load (complex struct): struct.Sap is not equal (%v != %v)", genCodeSubject.Sap, co.Sap)
}
if genCodeSubject.B != co.B {
t.Errorf("Load (complex struct): struct.B is not equal (%v != %v)", genCodeSubject.B, co.B)
}
if !reflect.DeepEqual(genCodeSubject.Bp, co.Bp) {
t.Errorf("Load (complex struct): struct.Bp is not equal (%v != %v)", genCodeSubject.Bp, co.Bp)
}
if !reflect.DeepEqual(genCodeSubject.Ba, co.Ba) {
t.Errorf("Load (complex struct): struct.Ba is not equal (%v != %v)", genCodeSubject.Ba, co.Ba)
}
if !reflect.DeepEqual(genCodeSubject.Bap, co.Bap) {
t.Errorf("Load (complex struct): struct.Bap is not equal (%v != %v)", genCodeSubject.Bap, co.Bap)
}
}
}
}

73
dsd/format.go Normal file
View file

@ -0,0 +1,73 @@
package dsd
import "errors"
// Errors.
var (
ErrIncompatibleFormat = errors.New("dsd: format is incompatible with operation")
ErrIsRaw = errors.New("dsd: given data is in raw format")
ErrUnknownFormat = errors.New("dsd: format is unknown")
)
// Format types.
const (
AUTO = 0
// Serialization types.
RAW = 1
CBOR = 67 // C
GenCode = 71 // G
JSON = 74 // J
MsgPack = 77 // M
YAML = 89 // Y
// Compression types.
GZIP = 90 // Z
// Special types.
LIST = 76 // L
)
// Default Formats.
var (
DefaultSerializationFormat uint8 = JSON
DefaultCompressionFormat uint8 = GZIP
)
// ValidateSerializationFormat validates if the format is for serialization,
// and returns the validated format as well as the result of the validation.
// If called on the AUTO format, it returns the default serialization format.
func ValidateSerializationFormat(format uint8) (validatedFormat uint8, ok bool) {
switch format {
case AUTO:
return DefaultSerializationFormat, true
case RAW:
return format, true
case CBOR:
return format, true
case GenCode:
return format, true
case JSON:
return format, true
case YAML:
return format, true
case MsgPack:
return format, true
default:
return 0, false
}
}
// ValidateCompressionFormat validates if the format is for compression,
// and returns the validated format as well as the result of the validation.
// If called on the AUTO format, it returns the default compression format.
func ValidateCompressionFormat(format uint8) (validatedFormat uint8, ok bool) {
switch format {
case AUTO:
return DefaultCompressionFormat, true
case GZIP:
return format, true
default:
return 0, false
}
}

824
dsd/gencode_test.go Normal file
View file

@ -0,0 +1,824 @@
//nolint:nakedret,unconvert,gocognit,wastedassign,gofumpt
package dsd
func (d *SimpleTestStruct) Size() (s uint64) {
{
l := uint64(len(d.S))
{
t := l
for t >= 0x80 {
t >>= 7
s++
}
s++
}
s += l
}
s++
return
}
func (d *SimpleTestStruct) GenCodeMarshal(buf []byte) ([]byte, error) {
size := d.Size()
{
if uint64(cap(buf)) >= size {
buf = buf[:size]
} else {
buf = make([]byte, size)
}
}
i := uint64(0)
{
l := uint64(len(d.S))
{
t := uint64(l)
for t >= 0x80 {
buf[i+0] = byte(t) | 0x80
t >>= 7
i++
}
buf[i+0] = byte(t)
i++
}
copy(buf[i+0:], d.S)
i += l
}
{
buf[i+0] = d.B
}
return buf[:i+1], nil
}
func (d *SimpleTestStruct) GenCodeUnmarshal(buf []byte) (uint64, error) {
i := uint64(0)
{
l := uint64(0)
{
bs := uint8(7)
t := uint64(buf[i+0] & 0x7F)
for buf[i+0]&0x80 == 0x80 {
i++
t |= uint64(buf[i+0]&0x7F) << bs
bs += 7
}
i++
l = t
}
d.S = string(buf[i+0 : i+0+l])
i += l
}
{
d.B = buf[i+0]
}
return i + 1, nil
}
func (d *GenCodeTestStruct) Size() (s uint64) {
{
l := uint64(len(d.S))
{
t := l
for t >= 0x80 {
t >>= 7
s++
}
s++
}
s += l
}
{
if d.Sp != nil {
{
l := uint64(len((*d.Sp)))
{
t := l
for t >= 0x80 {
t >>= 7
s++
}
s++
}
s += l
}
s += 0
}
}
{
l := uint64(len(d.Sa))
{
t := l
for t >= 0x80 {
t >>= 7
s++
}
s++
}
for k0 := range d.Sa {
{
l := uint64(len(d.Sa[k0]))
{
t := l
for t >= 0x80 {
t >>= 7
s++
}
s++
}
s += l
}
}
}
{
if d.Sap != nil {
{
l := uint64(len((*d.Sap)))
{
t := l
for t >= 0x80 {
t >>= 7
s++
}
s++
}
for k0 := range *d.Sap {
{
l := uint64(len((*d.Sap)[k0]))
{
t := l
for t >= 0x80 {
t >>= 7
s++
}
s++
}
s += l
}
}
}
s += 0
}
}
{
if d.Bp != nil {
s++
}
}
{
l := uint64(len(d.Ba))
{
t := l
for t >= 0x80 {
t >>= 7
s++
}
s++
}
s += l
}
{
if d.Bap != nil {
{
l := uint64(len((*d.Bap)))
{
t := l
for t >= 0x80 {
t >>= 7
s++
}
s++
}
s += l
}
s += 0
}
}
s += 35
return
}
func (d *GenCodeTestStruct) GenCodeMarshal(buf []byte) ([]byte, error) { //nolint:maintidx
size := d.Size()
{
if uint64(cap(buf)) >= size {
buf = buf[:size]
} else {
buf = make([]byte, size)
}
}
i := uint64(0)
{
buf[0+0] = byte(d.I8 >> 0)
}
{
buf[0+1] = byte(d.I16 >> 0)
buf[1+1] = byte(d.I16 >> 8)
}
{
buf[0+3] = byte(d.I32 >> 0)
buf[1+3] = byte(d.I32 >> 8)
buf[2+3] = byte(d.I32 >> 16)
buf[3+3] = byte(d.I32 >> 24)
}
{
buf[0+7] = byte(d.I64 >> 0)
buf[1+7] = byte(d.I64 >> 8)
buf[2+7] = byte(d.I64 >> 16)
buf[3+7] = byte(d.I64 >> 24)
buf[4+7] = byte(d.I64 >> 32)
buf[5+7] = byte(d.I64 >> 40)
buf[6+7] = byte(d.I64 >> 48)
buf[7+7] = byte(d.I64 >> 56)
}
{
buf[0+15] = byte(d.UI8 >> 0)
}
{
buf[0+16] = byte(d.UI16 >> 0)
buf[1+16] = byte(d.UI16 >> 8)
}
{
buf[0+18] = byte(d.UI32 >> 0)
buf[1+18] = byte(d.UI32 >> 8)
buf[2+18] = byte(d.UI32 >> 16)
buf[3+18] = byte(d.UI32 >> 24)
}
{
buf[0+22] = byte(d.UI64 >> 0)
buf[1+22] = byte(d.UI64 >> 8)
buf[2+22] = byte(d.UI64 >> 16)
buf[3+22] = byte(d.UI64 >> 24)
buf[4+22] = byte(d.UI64 >> 32)
buf[5+22] = byte(d.UI64 >> 40)
buf[6+22] = byte(d.UI64 >> 48)
buf[7+22] = byte(d.UI64 >> 56)
}
{
l := uint64(len(d.S))
{
t := uint64(l)
for t >= 0x80 {
buf[i+30] = byte(t) | 0x80
t >>= 7
i++
}
buf[i+30] = byte(t)
i++
}
copy(buf[i+30:], d.S)
i += l
}
{
if d.Sp == nil {
buf[i+30] = 0
} else {
buf[i+30] = 1
{
l := uint64(len((*d.Sp)))
{
t := uint64(l)
for t >= 0x80 {
buf[i+31] = byte(t) | 0x80
t >>= 7
i++
}
buf[i+31] = byte(t)
i++
}
copy(buf[i+31:], (*d.Sp))
i += l
}
i += 0
}
}
{
l := uint64(len(d.Sa))
{
t := uint64(l)
for t >= 0x80 {
buf[i+31] = byte(t) | 0x80
t >>= 7
i++
}
buf[i+31] = byte(t)
i++
}
for k0 := range d.Sa {
{
l := uint64(len(d.Sa[k0]))
{
t := uint64(l)
for t >= 0x80 {
buf[i+31] = byte(t) | 0x80
t >>= 7
i++
}
buf[i+31] = byte(t)
i++
}
copy(buf[i+31:], d.Sa[k0])
i += l
}
}
}
{
if d.Sap == nil {
buf[i+31] = 0
} else {
buf[i+31] = 1
{
l := uint64(len((*d.Sap)))
{
t := uint64(l)
for t >= 0x80 {
buf[i+32] = byte(t) | 0x80
t >>= 7
i++
}
buf[i+32] = byte(t)
i++
}
for k0 := range *d.Sap {
{
l := uint64(len((*d.Sap)[k0]))
{
t := uint64(l)
for t >= 0x80 {
buf[i+32] = byte(t) | 0x80
t >>= 7
i++
}
buf[i+32] = byte(t)
i++
}
copy(buf[i+32:], (*d.Sap)[k0])
i += l
}
}
}
i += 0
}
}
{
buf[i+32] = d.B
}
{
if d.Bp == nil {
buf[i+33] = 0
} else {
buf[i+33] = 1
{
buf[i+34] = (*d.Bp)
}
i++
}
}
{
l := uint64(len(d.Ba))
{
t := uint64(l)
for t >= 0x80 {
buf[i+34] = byte(t) | 0x80
t >>= 7
i++
}
buf[i+34] = byte(t)
i++
}
copy(buf[i+34:], d.Ba)
i += l
}
{
if d.Bap == nil {
buf[i+34] = 0
} else {
buf[i+34] = 1
{
l := uint64(len((*d.Bap)))
{
t := uint64(l)
for t >= 0x80 {
buf[i+35] = byte(t) | 0x80
t >>= 7
i++
}
buf[i+35] = byte(t)
i++
}
copy(buf[i+35:], (*d.Bap))
i += l
}
i += 0
}
}
return buf[:i+35], nil
}
func (d *GenCodeTestStruct) GenCodeUnmarshal(buf []byte) (uint64, error) { //nolint:maintidx
i := uint64(0)
{
d.I8 = 0 | (int8(buf[i+0+0]) << 0)
}
{
d.I16 = 0 | (int16(buf[i+0+1]) << 0) | (int16(buf[i+1+1]) << 8)
}
{
d.I32 = 0 | (int32(buf[i+0+3]) << 0) | (int32(buf[i+1+3]) << 8) | (int32(buf[i+2+3]) << 16) | (int32(buf[i+3+3]) << 24)
}
{
d.I64 = 0 | (int64(buf[i+0+7]) << 0) | (int64(buf[i+1+7]) << 8) | (int64(buf[i+2+7]) << 16) | (int64(buf[i+3+7]) << 24) | (int64(buf[i+4+7]) << 32) | (int64(buf[i+5+7]) << 40) | (int64(buf[i+6+7]) << 48) | (int64(buf[i+7+7]) << 56)
}
{
d.UI8 = 0 | (uint8(buf[i+0+15]) << 0)
}
{
d.UI16 = 0 | (uint16(buf[i+0+16]) << 0) | (uint16(buf[i+1+16]) << 8)
}
{
d.UI32 = 0 | (uint32(buf[i+0+18]) << 0) | (uint32(buf[i+1+18]) << 8) | (uint32(buf[i+2+18]) << 16) | (uint32(buf[i+3+18]) << 24)
}
{
d.UI64 = 0 | (uint64(buf[i+0+22]) << 0) | (uint64(buf[i+1+22]) << 8) | (uint64(buf[i+2+22]) << 16) | (uint64(buf[i+3+22]) << 24) | (uint64(buf[i+4+22]) << 32) | (uint64(buf[i+5+22]) << 40) | (uint64(buf[i+6+22]) << 48) | (uint64(buf[i+7+22]) << 56)
}
{
l := uint64(0)
{
bs := uint8(7)
t := uint64(buf[i+30] & 0x7F)
for buf[i+30]&0x80 == 0x80 {
i++
t |= uint64(buf[i+30]&0x7F) << bs
bs += 7
}
i++
l = t
}
d.S = string(buf[i+30 : i+30+l])
i += l
}
{
if buf[i+30] == 1 {
if d.Sp == nil {
d.Sp = new(string)
}
{
l := uint64(0)
{
bs := uint8(7)
t := uint64(buf[i+31] & 0x7F)
for buf[i+31]&0x80 == 0x80 {
i++
t |= uint64(buf[i+31]&0x7F) << bs
bs += 7
}
i++
l = t
}
(*d.Sp) = string(buf[i+31 : i+31+l])
i += l
}
i += 0
} else {
d.Sp = nil
}
}
{
l := uint64(0)
{
bs := uint8(7)
t := uint64(buf[i+31] & 0x7F)
for buf[i+31]&0x80 == 0x80 {
i++
t |= uint64(buf[i+31]&0x7F) << bs
bs += 7
}
i++
l = t
}
if uint64(cap(d.Sa)) >= l {
d.Sa = d.Sa[:l]
} else {
d.Sa = make([]string, l)
}
for k0 := range d.Sa {
{
l := uint64(0)
{
bs := uint8(7)
t := uint64(buf[i+31] & 0x7F)
for buf[i+31]&0x80 == 0x80 {
i++
t |= uint64(buf[i+31]&0x7F) << bs
bs += 7
}
i++
l = t
}
d.Sa[k0] = string(buf[i+31 : i+31+l])
i += l
}
}
}
{
if buf[i+31] == 1 {
if d.Sap == nil {
d.Sap = new([]string)
}
{
l := uint64(0)
{
bs := uint8(7)
t := uint64(buf[i+32] & 0x7F)
for buf[i+32]&0x80 == 0x80 {
i++
t |= uint64(buf[i+32]&0x7F) << bs
bs += 7
}
i++
l = t
}
if uint64(cap((*d.Sap))) >= l {
(*d.Sap) = (*d.Sap)[:l]
} else {
(*d.Sap) = make([]string, l)
}
for k0 := range *d.Sap {
{
l := uint64(0)
{
bs := uint8(7)
t := uint64(buf[i+32] & 0x7F)
for buf[i+32]&0x80 == 0x80 {
i++
t |= uint64(buf[i+32]&0x7F) << bs
bs += 7
}
i++
l = t
}
(*d.Sap)[k0] = string(buf[i+32 : i+32+l])
i += l
}
}
}
i += 0
} else {
d.Sap = nil
}
}
{
d.B = buf[i+32]
}
{
if buf[i+33] == 1 {
if d.Bp == nil {
d.Bp = new(byte)
}
{
(*d.Bp) = buf[i+34]
}
i++
} else {
d.Bp = nil
}
}
{
l := uint64(0)
{
bs := uint8(7)
t := uint64(buf[i+34] & 0x7F)
for buf[i+34]&0x80 == 0x80 {
i++
t |= uint64(buf[i+34]&0x7F) << bs
bs += 7
}
i++
l = t
}
if uint64(cap(d.Ba)) >= l {
d.Ba = d.Ba[:l]
} else {
d.Ba = make([]byte, l)
}
copy(d.Ba, buf[i+34:])
i += l
}
{
if buf[i+34] == 1 {
if d.Bap == nil {
d.Bap = new([]byte)
}
{
l := uint64(0)
{
bs := uint8(7)
t := uint64(buf[i+35] & 0x7F)
for buf[i+35]&0x80 == 0x80 {
i++
t |= uint64(buf[i+35]&0x7F) << bs
bs += 7
}
i++
l = t
}
if uint64(cap((*d.Bap))) >= l {
(*d.Bap) = (*d.Bap)[:l]
} else {
(*d.Bap) = make([]byte, l)
}
copy((*d.Bap), buf[i+35:])
i += l
}
i += 0
} else {
d.Bap = nil
}
}
return i + 35, nil
}

178
dsd/http.go Normal file
View file

@ -0,0 +1,178 @@
package dsd
import (
"bytes"
"errors"
"fmt"
"io"
"net/http"
"strings"
)
// HTTP Related Errors.
var (
ErrMissingBody = errors.New("dsd: missing http body")
ErrMissingContentType = errors.New("dsd: missing http content type")
)
const (
httpHeaderContentType = "Content-Type"
)
// LoadFromHTTPRequest loads the data from the body into the given interface.
func LoadFromHTTPRequest(r *http.Request, t interface{}) (format uint8, err error) {
return loadFromHTTP(r.Body, r.Header.Get(httpHeaderContentType), t)
}
// LoadFromHTTPResponse loads the data from the body into the given interface.
// Closing the body is left to the caller.
func LoadFromHTTPResponse(resp *http.Response, t interface{}) (format uint8, err error) {
return loadFromHTTP(resp.Body, resp.Header.Get(httpHeaderContentType), t)
}
func loadFromHTTP(body io.Reader, mimeType string, t interface{}) (format uint8, err error) {
// Read full body.
data, err := io.ReadAll(body)
if err != nil {
return 0, fmt.Errorf("dsd: failed to read http body: %w", err)
}
// Load depending on mime type.
return MimeLoad(data, mimeType, t)
}
// RequestHTTPResponseFormat sets the Accept header to the given format.
func RequestHTTPResponseFormat(r *http.Request, format uint8) (mimeType string, err error) {
// Get mime type.
mimeType, ok := FormatToMimeType[format]
if !ok {
return "", ErrIncompatibleFormat
}
// Request response format.
r.Header.Set("Accept", mimeType)
return mimeType, nil
}
// DumpToHTTPRequest dumps the given data to the HTTP request using the given
// format. It also sets the Accept header to the same format.
func DumpToHTTPRequest(r *http.Request, t interface{}, format uint8) error {
// Get mime type and set request format.
mimeType, err := RequestHTTPResponseFormat(r, format)
if err != nil {
return err
}
// Serialize data.
data, err := dumpWithoutIdentifier(t, format, "")
if err != nil {
return fmt.Errorf("dsd: failed to serialize: %w", err)
}
// Add data to request.
r.Header.Set("Content-Type", mimeType)
r.Body = io.NopCloser(bytes.NewReader(data))
return nil
}
// DumpToHTTPResponse dumpts the given data to the HTTP response, using the
// format defined in the request's Accept header.
func DumpToHTTPResponse(w http.ResponseWriter, r *http.Request, t interface{}) error {
// Serialize data based on accept header.
data, mimeType, _, err := MimeDump(t, r.Header.Get("Accept"))
if err != nil {
return fmt.Errorf("dsd: failed to serialize: %w", err)
}
// Write data to response
w.Header().Set("Content-Type", mimeType)
_, err = w.Write(data)
if err != nil {
return fmt.Errorf("dsd: failed to write response: %w", err)
}
return nil
}
// MimeLoad loads the given data into the interface based on the given mime type accept header.
func MimeLoad(data []byte, accept string, t interface{}) (format uint8, err error) {
// Find format.
format = FormatFromAccept(accept)
if format == 0 {
return 0, ErrIncompatibleFormat
}
// Load data.
err = LoadAsFormat(data, format, t)
return format, err
}
// MimeDump dumps the given interface based on the given mime type accept header.
func MimeDump(t any, accept string) (data []byte, mimeType string, format uint8, err error) {
// Find format.
format = FormatFromAccept(accept)
if format == AUTO {
return nil, "", 0, ErrIncompatibleFormat
}
// Serialize and return.
data, err = dumpWithoutIdentifier(t, format, "")
return data, mimeType, format, err
}
// FormatFromAccept returns the format for the given accept definition.
// The accept parameter matches the format of the HTTP Accept header.
// Special cases, in this order:
// - If accept is an empty string: returns default serialization format.
// - If accept contains no supported format, but a wildcard: returns default serialization format.
// - If accept contains no supported format, and no wildcard: returns AUTO format.
func FormatFromAccept(accept string) (format uint8) {
if accept == "" {
return DefaultSerializationFormat
}
var foundWildcard bool
for _, mimeType := range strings.Split(accept, ",") {
// Clean mime type.
mimeType = strings.TrimSpace(mimeType)
mimeType, _, _ = strings.Cut(mimeType, ";")
if strings.Contains(mimeType, "/") {
_, mimeType, _ = strings.Cut(mimeType, "/")
}
mimeType = strings.ToLower(mimeType)
// Check if mime type is supported.
format, ok := MimeTypeToFormat[mimeType]
if ok {
return format
}
// Return default mime type as fallback if any mimetype is okay.
if mimeType == "*" {
foundWildcard = true
}
}
if foundWildcard {
return DefaultSerializationFormat
}
return AUTO
}
// Format and MimeType mappings.
var (
FormatToMimeType = map[uint8]string{
CBOR: "application/cbor",
JSON: "application/json",
MsgPack: "application/msgpack",
YAML: "application/yaml",
}
MimeTypeToFormat = map[string]uint8{
"cbor": CBOR,
"json": JSON,
"msgpack": MsgPack,
"yaml": YAML,
"yml": YAML,
}
)

45
dsd/http_test.go Normal file
View file

@ -0,0 +1,45 @@
package dsd
import (
"mime"
"testing"
"github.com/stretchr/testify/assert"
)
func TestMimeTypes(t *testing.T) {
t.Parallel()
// Test static maps.
for _, mimeType := range FormatToMimeType {
cleaned, _, err := mime.ParseMediaType(mimeType)
assert.NoError(t, err, "mime type must be parse-able")
assert.Equal(t, mimeType, cleaned, "mime type should be clean in map already")
}
for mimeType := range MimeTypeToFormat {
cleaned, _, err := mime.ParseMediaType(mimeType)
assert.NoError(t, err, "mime type must be parse-able")
assert.Equal(t, mimeType, cleaned, "mime type should be clean in map already")
}
// Test assumptions.
for accept, format := range map[string]uint8{
"application/json, image/webp": JSON,
"image/webp, application/json": JSON,
"application/json;q=0.9, image/webp": JSON,
"*": DefaultSerializationFormat,
"*/*": DefaultSerializationFormat,
"text/yAMl": YAML,
" * , yaml ": YAML,
"yaml;charset ,*": YAML,
"xml,*": DefaultSerializationFormat,
"text/xml, text/other": AUTO,
"text/*": DefaultSerializationFormat,
"yaml ;charset": AUTO, // Invalid mimetype format.
"": DefaultSerializationFormat,
"x": AUTO,
} {
derivedFormat := FormatFromAccept(accept)
assert.Equal(t, format, derivedFormat, "assumption for %q should hold", accept)
}
}

9
dsd/interfaces.go Normal file
View file

@ -0,0 +1,9 @@
package dsd
// GenCodeCompatible is an interface to identify and use gencode compatible structs.
type GenCodeCompatible interface {
// GenCodeMarshal gencode marshalls the struct into the given byte array, or a new one if its too small.
GenCodeMarshal(buf []byte) ([]byte, error)
// GenCodeUnmarshal gencode unmarshalls the struct and returns the bytes read.
GenCodeUnmarshal(buf []byte) (uint64, error)
}

23
dsd/tests.gencode Normal file
View file

@ -0,0 +1,23 @@
struct SimpleTestStruct {
S string
B byte
}
struct GenCodeTestStructure {
I8 int8
I16 int16
I32 int32
I64 int64
UI8 uint8
UI16 uint16
UI32 uint32
UI64 uint64
S string
Sp *string
Sa []string
Sap *[]string
B byte
Bp *byte
Ba []byte
Bap *[]byte
}

26
go.mod Normal file
View file

@ -0,0 +1,26 @@
module github.com/safing/structures
go 1.21.1
toolchain go1.21.2
require (
github.com/fxamacker/cbor/v2 v2.7.0
github.com/ghodss/yaml v1.0.0
github.com/safing/portbase v0.19.5
github.com/stretchr/testify v1.8.4
github.com/vmihailenco/msgpack/v5 v5.4.1
)
require (
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/gofrs/uuid v4.4.0+incompatible // indirect
github.com/kr/pretty v0.2.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/tevino/abool v1.2.0 // indirect
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
github.com/x448/float16 v0.8.4 // indirect
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

34
go.sum Normal file
View file

@ -0,0 +1,34 @@
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA=
github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/safing/portbase v0.19.5 h1:3/8odzlvb629tHPwdj/sthSeJcwZHYrqA6YuvNUZzNc=
github.com/safing/portbase v0.19.5/go.mod h1:Qrh3ck+7VZloFmnozCs9Hj8godhJAi55cmiDiC7BwTc=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/tevino/abool v1.2.0 h1:heAkClL8H6w+mK5md9dzsuohKeXHUpY7Vw0ZCKW+huA=
github.com/tevino/abool v1.2.0/go.mod h1:qc66Pna1RiIsPa7O4Egxxs9OqkuxDX55zznh9K07Tzg=
github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IUPn0Bjt8=
github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok=
github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

48
varint/helpers.go Normal file
View file

@ -0,0 +1,48 @@
package varint
import "errors"
// PrependLength prepends the varint encoded length of the byte slice to itself.
func PrependLength(data []byte) []byte {
return append(Pack64(uint64(len(data))), data...)
}
// GetNextBlock extract the integer from the beginning of the given byte slice and returns the remaining bytes, the extracted integer, and whether there was an error.
func GetNextBlock(data []byte) ([]byte, int, error) {
l, n, err := Unpack64(data)
if err != nil {
return nil, 0, err
}
length := int(l)
totalLength := length + n
if totalLength > len(data) {
return nil, 0, errors.New("varint: not enough data for given block length")
}
return data[n:totalLength], totalLength, nil
}
// EncodedSize returns the size required to varint-encode an uint.
func EncodedSize(n uint64) (size int) {
switch {
case n < 1<<7: // < 128
return 1
case n < 1<<14: // < 16384
return 2
case n < 1<<21: // < 2097152
return 3
case n < 1<<28: // < 268435456
return 4
case n < 1<<35: // < 34359738368
return 5
case n < 1<<42: // < 4398046511104
return 6
case n < 1<<49: // < 562949953421312
return 7
case n < 1<<56: // < 72057594037927936
return 8
case n < 1<<63: // < 9223372036854775808
return 9
default:
return 10
}
}

97
varint/varint.go Normal file
View file

@ -0,0 +1,97 @@
package varint
import (
"encoding/binary"
"errors"
)
// ErrBufTooSmall is returned when there is not enough data for parsing a varint.
var ErrBufTooSmall = errors.New("varint: buf too small")
// Pack8 packs a uint8 into a VarInt.
func Pack8(n uint8) []byte {
if n < 128 {
return []byte{n}
}
return []byte{n, 0x01}
}
// Pack16 packs a uint16 into a VarInt.
func Pack16(n uint16) []byte {
buf := make([]byte, 3)
w := binary.PutUvarint(buf, uint64(n))
return buf[:w]
}
// Pack32 packs a uint32 into a VarInt.
func Pack32(n uint32) []byte {
buf := make([]byte, 5)
w := binary.PutUvarint(buf, uint64(n))
return buf[:w]
}
// Pack64 packs a uint64 into a VarInt.
func Pack64(n uint64) []byte {
buf := make([]byte, 10)
w := binary.PutUvarint(buf, n)
return buf[:w]
}
// Unpack8 unpacks a VarInt into a uint8. It returns the extracted int, how many bytes were used and an error.
func Unpack8(blob []byte) (uint8, int, error) {
if len(blob) < 1 {
return 0, 0, ErrBufTooSmall
}
if blob[0] < 128 {
return blob[0], 1, nil
}
if len(blob) < 2 {
return 0, 0, ErrBufTooSmall
}
if blob[1] != 0x01 {
return 0, 0, errors.New("varint: encoded integer greater than 255 (uint8)")
}
return blob[0], 1, nil
}
// Unpack16 unpacks a VarInt into a uint16. It returns the extracted int, how many bytes were used and an error.
func Unpack16(blob []byte) (uint16, int, error) {
n, r := binary.Uvarint(blob)
if r == 0 {
return 0, 0, ErrBufTooSmall
}
if r < 0 {
return 0, 0, errors.New("varint: encoded integer greater than 18446744073709551615 (uint64)")
}
if n > 65535 {
return 0, 0, errors.New("varint: encoded integer greater than 65535 (uint16)")
}
return uint16(n), r, nil
}
// Unpack32 unpacks a VarInt into a uint32. It returns the extracted int, how many bytes were used and an error.
func Unpack32(blob []byte) (uint32, int, error) {
n, r := binary.Uvarint(blob)
if r == 0 {
return 0, 0, ErrBufTooSmall
}
if r < 0 {
return 0, 0, errors.New("varint: encoded integer greater than 18446744073709551615 (uint64)")
}
if n > 4294967295 {
return 0, 0, errors.New("varint: encoded integer greater than 4294967295 (uint32)")
}
return uint32(n), r, nil
}
// Unpack64 unpacks a VarInt into a uint64. It returns the extracted int, how many bytes were used and an error.
func Unpack64(blob []byte) (uint64, int, error) {
n, r := binary.Uvarint(blob)
if r == 0 {
return 0, 0, ErrBufTooSmall
}
if r < 0 {
return 0, 0, errors.New("varint: encoded integer greater than 18446744073709551615 (uint64)")
}
return n, r, nil
}

141
varint/varint_test.go Normal file
View file

@ -0,0 +1,141 @@
//nolint:gocognit
package varint
import (
"bytes"
"testing"
)
func TestConversion(t *testing.T) {
t.Parallel()
subjects := []struct {
intType uint8
bytes []byte
integer uint64
}{
{8, []byte{0x00}, 0},
{8, []byte{0x01}, 1},
{8, []byte{0x7F}, 127},
{8, []byte{0x80, 0x01}, 128},
{8, []byte{0xFF, 0x01}, 255},
{16, []byte{0x80, 0x02}, 256},
{16, []byte{0xFF, 0x7F}, 16383},
{16, []byte{0x80, 0x80, 0x01}, 16384},
{16, []byte{0xFF, 0xFF, 0x03}, 65535},
{32, []byte{0x80, 0x80, 0x04}, 65536},
{32, []byte{0xFF, 0xFF, 0x7F}, 2097151},
{32, []byte{0x80, 0x80, 0x80, 0x01}, 2097152},
{32, []byte{0xFF, 0xFF, 0xFF, 0x07}, 16777215},
{32, []byte{0x80, 0x80, 0x80, 0x08}, 16777216},
{32, []byte{0xFF, 0xFF, 0xFF, 0x7F}, 268435455},
{32, []byte{0x80, 0x80, 0x80, 0x80, 0x01}, 268435456},
{32, []byte{0xFF, 0xFF, 0xFF, 0xFF, 0x0F}, 4294967295},
{64, []byte{0x80, 0x80, 0x80, 0x80, 0x10}, 4294967296},
{64, []byte{0xFF, 0xFF, 0xFF, 0xFF, 0x7F}, 34359738367},
{64, []byte{0x80, 0x80, 0x80, 0x80, 0x80, 0x01}, 34359738368},
{64, []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x1F}, 1099511627775},
{64, []byte{0x80, 0x80, 0x80, 0x80, 0x80, 0x20}, 1099511627776},
{64, []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F}, 4398046511103},
{64, []byte{0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x01}, 4398046511104},
{64, []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F}, 281474976710655},
{64, []byte{0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x40}, 281474976710656},
{64, []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F}, 562949953421311},
{64, []byte{0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x01}, 562949953421312},
{64, []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F}, 72057594037927935},
{64, []byte{0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x01}, 72057594037927936},
{64, []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F}, 9223372036854775807},
{64, []byte{0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x01}, 9223372036854775808},
{64, []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01}, 18446744073709551615},
}
for _, subject := range subjects {
actualInteger, _, err := Unpack64(subject.bytes)
if err != nil || actualInteger != subject.integer {
t.Errorf("Unpack64 %d: expected %d, actual %d", subject.bytes, subject.integer, actualInteger)
}
actualBytes := Pack64(subject.integer)
if err != nil || !bytes.Equal(actualBytes, subject.bytes) {
t.Errorf("Pack64 %d: expected %d, actual %d", subject.integer, subject.bytes, actualBytes)
}
if subject.intType <= 32 {
actualInteger, _, err := Unpack32(subject.bytes)
if err != nil || actualInteger != uint32(subject.integer) {
t.Errorf("Unpack32 %d: expected %d, actual %d", subject.bytes, subject.integer, actualInteger)
}
actualBytes := Pack32(uint32(subject.integer))
if err != nil || !bytes.Equal(actualBytes, subject.bytes) {
t.Errorf("Pack32 %d: expected %d, actual %d", subject.integer, subject.bytes, actualBytes)
}
}
if subject.intType <= 16 {
actualInteger, _, err := Unpack16(subject.bytes)
if err != nil || actualInteger != uint16(subject.integer) {
t.Errorf("Unpack16 %d: expected %d, actual %d", subject.bytes, subject.integer, actualInteger)
}
actualBytes := Pack16(uint16(subject.integer))
if err != nil || !bytes.Equal(actualBytes, subject.bytes) {
t.Errorf("Pack16 %d: expected %d, actual %d", subject.integer, subject.bytes, actualBytes)
}
}
if subject.intType <= 8 {
actualInteger, _, err := Unpack8(subject.bytes)
if err != nil || actualInteger != uint8(subject.integer) {
t.Errorf("Unpack8 %d: expected %d, actual %d", subject.bytes, subject.integer, actualInteger)
}
actualBytes := Pack8(uint8(subject.integer))
if err != nil || !bytes.Equal(actualBytes, subject.bytes) {
t.Errorf("Pack8 %d: expected %d, actual %d", subject.integer, subject.bytes, actualBytes)
}
}
}
}
func TestFails(t *testing.T) {
t.Parallel()
subjects := []struct {
intType uint8
bytes []byte
}{
{32, []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01}},
{64, []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x02}},
{64, []byte{0xFF}},
}
for _, subject := range subjects {
if subject.intType == 64 {
_, _, err := Unpack64(subject.bytes)
if err == nil {
t.Errorf("Unpack64 %d: expected error while unpacking.", subject.bytes)
}
}
_, _, err := Unpack32(subject.bytes)
if err == nil {
t.Errorf("Unpack32 %d: expected error while unpacking.", subject.bytes)
}
_, _, err = Unpack16(subject.bytes)
if err == nil {
t.Errorf("Unpack16 %d: expected error while unpacking.", subject.bytes)
}
_, _, err = Unpack8(subject.bytes)
if err == nil {
t.Errorf("Unpack8 %d: expected error while unpacking.", subject.bytes)
}
}
}