Initial commit
Proof-of-concept implementation. Bugs will occur.
This commit is contained in:
1
.gitignore
vendored
Normal file
1
.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
faketunes.yaml
|
||||||
43
README.md
Normal file
43
README.md
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
# faketunes
|
||||||
|
|
||||||
|
A proof-of-concept virtual storage with ALAC music files as a FUSE filesystem.
|
||||||
|
|
||||||
|
## What it does?
|
||||||
|
|
||||||
|
Let's assume you have a music library of FLACs with different sample rates and bitrates. This folder is having the following structure:
|
||||||
|
|
||||||
|
```
|
||||||
|
/library/Artist/Album/01 - Track Name.flac
|
||||||
|
```
|
||||||
|
|
||||||
|
(this is the default music library structure for [beets](https://beets.io/).
|
||||||
|
|
||||||
|
Let's also assume you have an iPod classic with the original firmware and you want to sync your library to it. But you also don't want to convert all your huge library into other directory for iTunes to consume, taking twice as much space for your music library collection and duplicating files.
|
||||||
|
|
||||||
|
This is when `faketunes` comes handy. It makes a virtual FUSE filesystem that represents all your FLACs as ALACs into a single folder called `Music` inside a path you chose. You can then mount that folder as a network share and point iTunes on Mac or Windows to use it as a music library folder. After that, you can add all the files into the iTunes library, and in the end, sync your iPod Classic.
|
||||||
|
|
||||||
|
There are no actual ALAC files in the `Music` directory: they're all virtual. On the first attempt to access the file, `faketunes` will generate with `ffmpeg` an ALAC file with proper metadata (taken from your source FLAC files) and album art, place it in the cache and serve it. You can tune the cache size in the config (see below). All subsequental reads for the file will be provided from cache as long as the converted file is present: otherwise, `ffmpeg` will be run again.
|
||||||
|
|
||||||
|
The goals of the project:
|
||||||
|
|
||||||
|
- Make a virtual filesystem to serve files to iTunes and to not convert the FLAC music library for iPod Classic separately
|
||||||
|
- Track the original library and make sure there are no missing unconverted files.
|
||||||
|
- Reduce hard drive space usage (HDDs are not cheap anymore because of AI...)
|
||||||
|
- Be as fast and reliable as possible.
|
||||||
|
- To learn something new about file systems in general.
|
||||||
|
|
||||||
|
## Status of the project
|
||||||
|
|
||||||
|
Currently, the project is in _alpha_ state. It (somewhat) works, and serves as a proof that making such a virtual filesystem is possible in the first place. Optimizations and patches are welcome.
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
- Linux host (Docker image will come later)
|
||||||
|
- FUSE support (and `fusermount3` command present in the `$PATH`)
|
||||||
|
- `ffmpeg` and `ffprobe` installed on the system.
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
By default, `faketunes` searches for config at the `/etc/faketunes.yml`. You can override the config path by providing the environment variable `FAKETUNES_CONFIG` with the desired path.
|
||||||
|
|
||||||
|
See `faketunes.example.yaml` file in the repo for the configuration example.
|
||||||
64
cmd/faketunes/faketunes.go
Normal file
64
cmd/faketunes/faketunes.go
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"source.hodakov.me/hdkv/faketunes/internal/application"
|
||||||
|
"source.hodakov.me/hdkv/faketunes/internal/domains"
|
||||||
|
"source.hodakov.me/hdkv/faketunes/internal/domains/cacher"
|
||||||
|
"source.hodakov.me/hdkv/faketunes/internal/domains/filesystem"
|
||||||
|
"source.hodakov.me/hdkv/faketunes/internal/domains/transcoder"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
|
||||||
|
app := application.New(ctx)
|
||||||
|
|
||||||
|
app.Logger().Info("Starting faketunes...")
|
||||||
|
|
||||||
|
err := app.InitConfig()
|
||||||
|
if err != nil {
|
||||||
|
app.Logger().Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
app.InitLogger()
|
||||||
|
|
||||||
|
app.RegisterDomain(domains.FilesystemName, filesystem.New(app))
|
||||||
|
app.RegisterDomain(domains.CacherName, cacher.New(app))
|
||||||
|
app.RegisterDomain(domains.TranscoderName, transcoder.New(app))
|
||||||
|
|
||||||
|
err = app.ConnectDependencies()
|
||||||
|
if err != nil {
|
||||||
|
app.Logger().Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = app.StartDomains()
|
||||||
|
if err != nil {
|
||||||
|
app.Logger().Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
app.Logger().Info("Started faketunes")
|
||||||
|
|
||||||
|
// CTRL+C handler.
|
||||||
|
interrupt := make(chan os.Signal, 1)
|
||||||
|
signal.Notify(interrupt)
|
||||||
|
shutdownDone := make(chan bool, 1)
|
||||||
|
go func() {
|
||||||
|
signalThing := <-interrupt
|
||||||
|
if signalThing == syscall.SIGTERM || signalThing == syscall.SIGINT {
|
||||||
|
app.Logger().WithField("signal", signalThing.String()).
|
||||||
|
Info("Got terminating signal, shutting down...")
|
||||||
|
|
||||||
|
cancel()
|
||||||
|
|
||||||
|
shutdownDone <- true
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
<-shutdownDone
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
||||||
10
faketunes.example.yaml
Normal file
10
faketunes.example.yaml
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
paths:
|
||||||
|
source: ./src # Source directory with FLAC music library
|
||||||
|
destination: ./dest/ # Destination directory for the virtual FS:
|
||||||
|
# the ALACS will be in the directory "./Music" inside
|
||||||
|
faketunes:
|
||||||
|
log_level: debug # Log level
|
||||||
|
cache_size: 8192 # Cache size in megabytes
|
||||||
|
|
||||||
|
transcoding:
|
||||||
|
parallel: 4 # Maximum amount of parallel transcodings
|
||||||
11
go.mod
Normal file
11
go.mod
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
module source.hodakov.me/hdkv/faketunes
|
||||||
|
|
||||||
|
go 1.25.7
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/goccy/go-yaml v1.19.2
|
||||||
|
github.com/hanwen/go-fuse/v2 v2.9.0
|
||||||
|
github.com/sirupsen/logrus v1.9.4
|
||||||
|
)
|
||||||
|
|
||||||
|
require golang.org/x/sys v0.28.0 // indirect
|
||||||
20
go.sum
Normal file
20
go.sum
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/goccy/go-yaml v1.19.2 h1:PmFC1S6h8ljIz6gMRBopkjP1TVT7xuwrButHID66PoM=
|
||||||
|
github.com/goccy/go-yaml v1.19.2/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA=
|
||||||
|
github.com/hanwen/go-fuse/v2 v2.9.0 h1:0AOGUkHtbOVeyGLr0tXupiid1Vg7QB7M6YUcdmVdC58=
|
||||||
|
github.com/hanwen/go-fuse/v2 v2.9.0/go.mod h1:yE6D2PqWwm3CbYRxFXV9xUd8Md5d6NG0WBs5spCswmI=
|
||||||
|
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||||
|
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||||
|
github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg=
|
||||||
|
github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/sirupsen/logrus v1.9.4 h1:TsZE7l11zFCLZnZ+teH4Umoq5BhEIfIzfRDZ1Uzql2w=
|
||||||
|
github.com/sirupsen/logrus v1.9.4/go.mod h1:ftWc9WdOfJ0a92nsE2jF5u5ZwH8Bv2zdeOC42RjbV2g=
|
||||||
|
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||||
|
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||||
|
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
|
||||||
|
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
118
internal/application/application.go
Normal file
118
internal/application/application.go
Normal file
@@ -0,0 +1,118 @@
|
|||||||
|
package application
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"runtime"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"source.hodakov.me/hdkv/faketunes/internal/configuration"
|
||||||
|
"source.hodakov.me/hdkv/faketunes/internal/domains"
|
||||||
|
)
|
||||||
|
|
||||||
|
type App struct {
|
||||||
|
ctx context.Context
|
||||||
|
logger *logrus.Entry
|
||||||
|
config *configuration.Config
|
||||||
|
|
||||||
|
domains map[string]domains.Domain
|
||||||
|
domainsMutex sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *App) Config() *configuration.Config {
|
||||||
|
return a.config
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *App) Context() context.Context {
|
||||||
|
return a.ctx
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *App) Logger() *logrus.Entry {
|
||||||
|
return a.logger
|
||||||
|
}
|
||||||
|
|
||||||
|
func New(ctx context.Context) *App {
|
||||||
|
var m runtime.MemStats
|
||||||
|
runtime.ReadMemStats(&m)
|
||||||
|
|
||||||
|
app := new(App)
|
||||||
|
|
||||||
|
// Initialize standard logger with memory stats and context attached permanently.
|
||||||
|
logger := logrus.StandardLogger()
|
||||||
|
|
||||||
|
logger.SetFormatter(&logrus.TextFormatter{
|
||||||
|
FullTimestamp: true,
|
||||||
|
})
|
||||||
|
|
||||||
|
app.logger = logger.WithContext(ctx).WithFields(logrus.Fields{
|
||||||
|
"memalloc": fmt.Sprintf("%dMB", m.Alloc/1024/1024),
|
||||||
|
"memsys": fmt.Sprintf("%dMB", m.Sys/1024/1024),
|
||||||
|
"numgc": fmt.Sprintf("%d", m.NumGC),
|
||||||
|
})
|
||||||
|
|
||||||
|
app.ctx = ctx
|
||||||
|
|
||||||
|
app.domains = make(map[string]domains.Domain)
|
||||||
|
|
||||||
|
return app
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *App) InitConfig() error {
|
||||||
|
config, err := configuration.New()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("%w: %w (%w)", ErrApplication, ErrConfigInitializationError, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
a.config = config
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *App) InitLogger() {
|
||||||
|
a.logger.Logger.SetLevel(a.config.FakeTunes.LogLevel)
|
||||||
|
|
||||||
|
a.logger.WithField("log level", a.config.FakeTunes.LogLevel).Debug("Set log level")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *App) RegisterDomain(name string, implementation domains.Domain) {
|
||||||
|
a.domainsMutex.Lock()
|
||||||
|
defer a.domainsMutex.Unlock()
|
||||||
|
|
||||||
|
a.domains[name] = implementation
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *App) RetrieveDomain(name string) any {
|
||||||
|
a.domainsMutex.RLock()
|
||||||
|
defer a.domainsMutex.RUnlock()
|
||||||
|
|
||||||
|
return a.domains[name]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *App) ConnectDependencies() error {
|
||||||
|
a.domainsMutex.RLock()
|
||||||
|
defer a.domainsMutex.RUnlock()
|
||||||
|
|
||||||
|
for _, domain := range a.domains {
|
||||||
|
err := domain.ConnectDependencies()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("%w: %w (%w)", ErrApplication, ErrConnectDependencies, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *App) StartDomains() error {
|
||||||
|
a.domainsMutex.RLock()
|
||||||
|
defer a.domainsMutex.RUnlock()
|
||||||
|
|
||||||
|
for _, domain := range a.domains {
|
||||||
|
err := domain.Start()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("%w: %w (%w)", ErrApplication, ErrDomainInit, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
10
internal/application/errors.go
Normal file
10
internal/application/errors.go
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
package application
|
||||||
|
|
||||||
|
import "errors"
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrApplication = errors.New("application")
|
||||||
|
ErrConfigInitializationError = errors.New("config initialization error")
|
||||||
|
ErrConnectDependencies = errors.New("failed to connect dependencies")
|
||||||
|
ErrDomainInit = errors.New("failed to initialize domains")
|
||||||
|
)
|
||||||
49
internal/configuration/config.go
Normal file
49
internal/configuration/config.go
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
package configuration
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/goccy/go-yaml"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Config struct {
|
||||||
|
Paths Paths `yaml:"paths"`
|
||||||
|
FakeTunes FakeTunes `yaml:"faketunes"`
|
||||||
|
Transcoding Transcoding `yaml:"transcoding"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type FakeTunes struct {
|
||||||
|
CacheSize int64 `yaml:"cache_size"`
|
||||||
|
LogLevel logrus.Level `yaml:"log_level"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Paths struct {
|
||||||
|
Source string `yaml:"source"`
|
||||||
|
Destination string `yaml:"destination"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Transcoding struct {
|
||||||
|
Parallel int64 `yaml:"parallel"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func New() (*Config, error) {
|
||||||
|
fakeTunesCfgPath := "/etc/faketunes.yaml"
|
||||||
|
if customPath, ok := os.LookupEnv("FAKETUNES_CONFIG"); ok {
|
||||||
|
fakeTunesCfgPath = customPath
|
||||||
|
}
|
||||||
|
|
||||||
|
rawConfig, err := os.ReadFile(fakeTunesCfgPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("%w: %w (%w)", ErrConfiguration, ErrCantReadConfigFile, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
config := new(Config)
|
||||||
|
err = yaml.Unmarshal(rawConfig, config)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("%w: %w (%w)", ErrConfiguration, ErrCantParseConfigFile, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return config, nil
|
||||||
|
}
|
||||||
10
internal/configuration/errors.go
Normal file
10
internal/configuration/errors.go
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
package configuration
|
||||||
|
|
||||||
|
import "errors"
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrConfiguration = errors.New("configuration")
|
||||||
|
ErrCantReadConfigFile = errors.New("can't read config file")
|
||||||
|
ErrCantParseConfigFile = errors.New("can't parse config file")
|
||||||
|
ErrSourceDirectoryDoesNotExist = errors.New("source directory does not exist")
|
||||||
|
)
|
||||||
10
internal/domains/cacher.go
Normal file
10
internal/domains/cacher.go
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
package domains
|
||||||
|
|
||||||
|
import "source.hodakov.me/hdkv/faketunes/internal/domains/cacher/dto"
|
||||||
|
|
||||||
|
const CacherName = "cacher"
|
||||||
|
|
||||||
|
type Cacher interface {
|
||||||
|
GetStat(sourcePath string) (int64, error)
|
||||||
|
GetFileDTO(sourcePath string) (*dto.CacheItem, error)
|
||||||
|
}
|
||||||
56
internal/domains/cacher/cacher.go
Normal file
56
internal/domains/cacher/cacher.go
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
package cacher
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"source.hodakov.me/hdkv/faketunes/internal/application"
|
||||||
|
"source.hodakov.me/hdkv/faketunes/internal/domains"
|
||||||
|
"source.hodakov.me/hdkv/faketunes/internal/domains/cacher/models"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
_ domains.Cacher = new(Cacher)
|
||||||
|
_ domains.Domain = new(Cacher)
|
||||||
|
)
|
||||||
|
|
||||||
|
type Cacher struct {
|
||||||
|
app *application.App
|
||||||
|
|
||||||
|
transcoder domains.Transcoder
|
||||||
|
|
||||||
|
cacheDir string
|
||||||
|
cacheMutex sync.RWMutex
|
||||||
|
currentSize int64
|
||||||
|
maxSize int64
|
||||||
|
items map[string]*models.CacheItem
|
||||||
|
stat map[string]*models.CacherStat
|
||||||
|
}
|
||||||
|
|
||||||
|
func New(app *application.App) *Cacher {
|
||||||
|
return &Cacher{
|
||||||
|
app: app,
|
||||||
|
cacheDir: app.Config().Paths.Destination + "./.cache",
|
||||||
|
maxSize: app.Config().FakeTunes.CacheSize * 1024 * 1024,
|
||||||
|
items: make(map[string]*models.CacheItem, 0),
|
||||||
|
stat: make(map[string]*models.CacherStat, 0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cacher) ConnectDependencies() error {
|
||||||
|
transcoder, ok := c.app.RetrieveDomain(domains.TranscoderName).(domains.Transcoder)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"%w: %w (%s)", ErrCacher, ErrConnectDependencies,
|
||||||
|
"transcoder domain interface conversion failed",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.transcoder = transcoder
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cacher) Start() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
37
internal/domains/cacher/cleanup.go
Normal file
37
internal/domains/cacher/cleanup.go
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
package cacher
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (c *Cacher) cleanup() error {
|
||||||
|
for c.currentSize > c.maxSize && len(c.items) > 0 {
|
||||||
|
var (
|
||||||
|
itemKey string
|
||||||
|
itemSize int64
|
||||||
|
oldestTime time.Time
|
||||||
|
)
|
||||||
|
|
||||||
|
for key, item := range c.items {
|
||||||
|
if itemKey == "" || item.Updated.Before(oldestTime) {
|
||||||
|
itemKey = key
|
||||||
|
oldestTime = item.Updated
|
||||||
|
itemSize = item.Size
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if itemKey != "" {
|
||||||
|
err := os.Remove(c.items[itemKey].Path)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("%w: %w (%w)", ErrCacher, ErrFailedToDeleteCachedFile, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
delete(c.items, itemKey)
|
||||||
|
c.currentSize -= itemSize
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
9
internal/domains/cacher/dto/cache_item.go
Normal file
9
internal/domains/cacher/dto/cache_item.go
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
package dto
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
type CacheItem struct {
|
||||||
|
Path string
|
||||||
|
Size int64
|
||||||
|
Updated time.Time
|
||||||
|
}
|
||||||
11
internal/domains/cacher/errors.go
Normal file
11
internal/domains/cacher/errors.go
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
package cacher
|
||||||
|
|
||||||
|
import "errors"
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrCacher = errors.New("cacher")
|
||||||
|
ErrConnectDependencies = errors.New("failed to connect dependencies")
|
||||||
|
ErrFailedToDeleteCachedFile = errors.New("failed to delete cached file")
|
||||||
|
ErrFailedToGetSourceFile = errors.New("failed to get source file")
|
||||||
|
ErrFailedToTranscodeFile = errors.New("failed to transcode file")
|
||||||
|
)
|
||||||
98
internal/domains/cacher/files.go
Normal file
98
internal/domains/cacher/files.go
Normal file
@@ -0,0 +1,98 @@
|
|||||||
|
package cacher
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/md5"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"source.hodakov.me/hdkv/faketunes/internal/domains/cacher/dto"
|
||||||
|
"source.hodakov.me/hdkv/faketunes/internal/domains/cacher/models"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetFileDTO gets the ALAC file from cache or transcodes one with transcoder if needed.
|
||||||
|
func (c *Cacher) GetFileDTO(sourcePath string) (*dto.CacheItem, error) {
|
||||||
|
item, err := c.getFile(sourcePath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("%w: %w (%w)", ErrCacher, ErrFailedToGetSourceFile, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return models.CacheItemModelToDTO(item), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cacher) getFile(sourcePath string) (*models.CacheItem, error) {
|
||||||
|
sourceFileInfo, err := os.Stat(sourcePath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("%w: %w (%w)", ErrCacher, ErrFailedToGetSourceFile, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
keyData := fmt.Sprintf("%s:%d", sourcePath, sourceFileInfo.ModTime().UnixNano())
|
||||||
|
hash := md5.Sum([]byte(keyData))
|
||||||
|
cacheKey := fmt.Sprintf("%x", hash)
|
||||||
|
cacheFilePath := filepath.Join(c.cacheDir, cacheKey+".m4a")
|
||||||
|
|
||||||
|
c.cacheMutex.Lock()
|
||||||
|
defer c.cacheMutex.Unlock()
|
||||||
|
|
||||||
|
// Check if file information exists in cache
|
||||||
|
if item, ok := c.items[cacheKey]; ok {
|
||||||
|
if _, err := os.Stat(item.Path); err != nil {
|
||||||
|
// File exists in cache and on disk
|
||||||
|
item.Updated = time.Now().UTC()
|
||||||
|
|
||||||
|
c.updateCachedStat(sourcePath, item.Size)
|
||||||
|
|
||||||
|
return item, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if file exists on disk but information about it doesn't exist in
|
||||||
|
// the memory (for example, after application restart).
|
||||||
|
if cachedFileInfo, err := os.Stat(cacheFilePath); err == nil {
|
||||||
|
// Verify that the file on disk is newer than the source file and has content.
|
||||||
|
// If that's the case, return the item information and store it in memory.
|
||||||
|
if cachedFileInfo.ModTime().After(sourceFileInfo.ModTime()) &&
|
||||||
|
cachedFileInfo.Size() > 1024 {
|
||||||
|
item := &models.CacheItem{
|
||||||
|
Path: cacheFilePath,
|
||||||
|
Size: cachedFileInfo.Size(),
|
||||||
|
Updated: time.Now().UTC(),
|
||||||
|
}
|
||||||
|
c.items[cacheKey] = item
|
||||||
|
c.currentSize += cachedFileInfo.Size()
|
||||||
|
|
||||||
|
c.updateCachedStat(sourcePath, item.Size)
|
||||||
|
|
||||||
|
return item, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// File does not exist on disk, need to transcode.
|
||||||
|
// Register in the queue
|
||||||
|
c.transcoder.QueueChannel() <- struct{}{}
|
||||||
|
defer func() {
|
||||||
|
<-c.transcoder.QueueChannel()
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Convert file
|
||||||
|
size, err := c.transcoder.Convert(sourcePath, cacheFilePath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("%w: %w (%w)", ErrCacher, ErrFailedToTranscodeFile, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add converted file information to cache
|
||||||
|
item := &models.CacheItem{
|
||||||
|
Path: cacheFilePath,
|
||||||
|
Size: size,
|
||||||
|
Updated: time.Now(),
|
||||||
|
}
|
||||||
|
c.items[cacheKey] = item
|
||||||
|
c.currentSize += size
|
||||||
|
|
||||||
|
c.updateCachedStat(sourcePath, size)
|
||||||
|
// TODO: run cleanup on inotify events.
|
||||||
|
c.cleanup()
|
||||||
|
|
||||||
|
return item, nil
|
||||||
|
}
|
||||||
21
internal/domains/cacher/models/cache_item.go
Normal file
21
internal/domains/cacher/models/cache_item.go
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
package models
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"source.hodakov.me/hdkv/faketunes/internal/domains/cacher/dto"
|
||||||
|
)
|
||||||
|
|
||||||
|
type CacheItem struct {
|
||||||
|
Path string
|
||||||
|
Size int64
|
||||||
|
Updated time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
func CacheItemModelToDTO(item *CacheItem) *dto.CacheItem {
|
||||||
|
return &dto.CacheItem{
|
||||||
|
Path: item.Path,
|
||||||
|
Size: item.Size,
|
||||||
|
Updated: item.Updated,
|
||||||
|
}
|
||||||
|
}
|
||||||
9
internal/domains/cacher/models/cacher_stat.go
Normal file
9
internal/domains/cacher/models/cacher_stat.go
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
package models
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
// CacherStat is representing information about a single object size in cache.
|
||||||
|
type CacherStat struct {
|
||||||
|
Size int64
|
||||||
|
Created time.Time
|
||||||
|
}
|
||||||
64
internal/domains/cacher/stats.go
Normal file
64
internal/domains/cacher/stats.go
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
package cacher
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/md5"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"source.hodakov.me/hdkv/faketunes/internal/domains/cacher/models"
|
||||||
|
)
|
||||||
|
|
||||||
|
// getStat returns file size without triggering conversion (for ls/stat)
|
||||||
|
func (c *Cacher) GetStat(sourcePath string) (int64, error) {
|
||||||
|
// First check cache
|
||||||
|
if size, ok := c.getCachedStat(sourcePath); ok {
|
||||||
|
return size, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if we have a cached converted file
|
||||||
|
info, err := os.Stat(sourcePath)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
keyData := fmt.Sprintf("%s:%d", sourcePath, info.ModTime().UnixNano())
|
||||||
|
hash := md5.Sum([]byte(keyData))
|
||||||
|
key := fmt.Sprintf("%x", hash)
|
||||||
|
cachePath := filepath.Join(c.cacheDir, key+".m4a")
|
||||||
|
|
||||||
|
// Check if converted file exists and is valid
|
||||||
|
if cacheInfo, err := os.Stat(cachePath); err == nil {
|
||||||
|
if cacheInfo.ModTime().After(info.ModTime()) && cacheInfo.Size() > 1024 {
|
||||||
|
c.updateCachedStat(sourcePath, cacheInfo.Size())
|
||||||
|
|
||||||
|
return cacheInfo.Size(), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return estimated size (FLAC file size as placeholder)
|
||||||
|
return info.Size(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateCachedStat updates the stat cache
|
||||||
|
func (c *Cacher) updateCachedStat(sourcePath string, size int64) {
|
||||||
|
c.cacheMutex.Lock()
|
||||||
|
defer c.cacheMutex.Unlock()
|
||||||
|
|
||||||
|
c.stat[sourcePath] = &models.CacherStat{
|
||||||
|
Size: size,
|
||||||
|
Created: time.Now(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// getCachedStat returns cached file stats
|
||||||
|
func (c *Cacher) getCachedStat(sourcePath string) (int64, bool) {
|
||||||
|
c.cacheMutex.RLock()
|
||||||
|
defer c.cacheMutex.RUnlock()
|
||||||
|
|
||||||
|
if stat, ok := c.stat[sourcePath]; ok {
|
||||||
|
return stat.Size, true
|
||||||
|
}
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
6
internal/domains/domain.go
Normal file
6
internal/domains/domain.go
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
package domains
|
||||||
|
|
||||||
|
type Domain interface {
|
||||||
|
ConnectDependencies() error
|
||||||
|
Start() error
|
||||||
|
}
|
||||||
5
internal/domains/filesystem.go
Normal file
5
internal/domains/filesystem.go
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
package domains
|
||||||
|
|
||||||
|
const FilesystemName = "filesystem"
|
||||||
|
|
||||||
|
type Filesystem interface{}
|
||||||
53
internal/domains/filesystem/directories.go
Normal file
53
internal/domains/filesystem/directories.go
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
package filesystem
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (f *FS) prepareDirectories() error {
|
||||||
|
if _, err := os.Stat(f.sourceDir); os.IsNotExist(err) {
|
||||||
|
return fmt.Errorf("%w: %w (%w)", ErrFilesystem, ErrNoSource, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
f.app.Logger().WithField("path", f.sourceDir).Info("Got source directory")
|
||||||
|
|
||||||
|
// Clean destination directory
|
||||||
|
if _, err := os.Stat(f.destinationDir); err == nil {
|
||||||
|
f.app.Logger().WithField("path", f.destinationDir).Info(
|
||||||
|
"Cleaning up the destination mountpoint",
|
||||||
|
)
|
||||||
|
|
||||||
|
// Try to unmount the destination FS if that was mounted before.
|
||||||
|
exec.Command("fusermount3", "-u", f.destinationDir).Run()
|
||||||
|
time.Sleep(5 * time.Second)
|
||||||
|
|
||||||
|
// Clean the destination
|
||||||
|
err := os.RemoveAll(f.destinationDir)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("%w: %w (%w)", ErrFilesystem, ErrFailedToCleanupDestination, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the structure for the virtual filesystem.
|
||||||
|
for _, dir := range []string{f.destinationDir, f.cacheDir, f.metadataDir} {
|
||||||
|
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||||
|
f.app.Logger().WithField("path", dir).Error("Operation on directory was unsuccessful")
|
||||||
|
|
||||||
|
return fmt.Errorf("%w: %w (%w)", ErrFilesystem, ErrFailedToCreateDestinationDirectory, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
f.app.Logger().WithFields(logrus.Fields{
|
||||||
|
"source directory": f.sourceDir,
|
||||||
|
"virtual filesystem mount": f.destinationDir,
|
||||||
|
"cache directory": f.cacheDir,
|
||||||
|
"metadata directory": f.metadataDir,
|
||||||
|
}).Debug("Filesystem directories prepared")
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
12
internal/domains/filesystem/errors.go
Normal file
12
internal/domains/filesystem/errors.go
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
package filesystem
|
||||||
|
|
||||||
|
import "errors"
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrFilesystem = errors.New("filesystem")
|
||||||
|
ErrConnectDependencies = errors.New("failed to connect dependencies")
|
||||||
|
ErrFailedToPrepareDirectories = errors.New("failed to prepare directories")
|
||||||
|
ErrNoSource = errors.New("source does not exist")
|
||||||
|
ErrFailedToCleanupDestination = errors.New("failed to clean up destination directory")
|
||||||
|
ErrFailedToCreateDestinationDirectory = errors.New("failed to create destination directory")
|
||||||
|
)
|
||||||
75
internal/domains/filesystem/file.go
Normal file
75
internal/domains/filesystem/file.go
Normal file
@@ -0,0 +1,75 @@
|
|||||||
|
package filesystem
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/hanwen/go-fuse/v2/fs"
|
||||||
|
"github.com/hanwen/go-fuse/v2/fuse"
|
||||||
|
)
|
||||||
|
|
||||||
|
type File struct {
|
||||||
|
file *os.File
|
||||||
|
fileMutex sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
_ = (fs.FileReader)((*File)(nil))
|
||||||
|
_ = (fs.FileWriter)((*File)(nil))
|
||||||
|
_ = (fs.FileFlusher)((*File)(nil))
|
||||||
|
_ = (fs.FileReleaser)((*File)(nil))
|
||||||
|
)
|
||||||
|
|
||||||
|
func (fi *File) Read(ctx context.Context, dest []byte, off int64) (fuse.ReadResult, syscall.Errno) {
|
||||||
|
fi.fileMutex.Lock()
|
||||||
|
defer fi.fileMutex.Unlock()
|
||||||
|
|
||||||
|
_, err := fi.file.Seek(off, io.SeekStart)
|
||||||
|
if err != nil {
|
||||||
|
return nil, syscall.EIO
|
||||||
|
}
|
||||||
|
|
||||||
|
n, err := fi.file.Read(dest)
|
||||||
|
if err != nil && err != io.EOF {
|
||||||
|
return nil, syscall.EIO
|
||||||
|
}
|
||||||
|
|
||||||
|
return fuse.ReadResultData(dest[:n]), 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fi *File) Write(ctx context.Context, data []byte, off int64) (written uint32, errno syscall.Errno) {
|
||||||
|
fi.fileMutex.Lock()
|
||||||
|
defer fi.fileMutex.Unlock()
|
||||||
|
|
||||||
|
n, err := fi.file.WriteAt(data, off)
|
||||||
|
if err != nil {
|
||||||
|
return 0, syscall.EIO
|
||||||
|
}
|
||||||
|
|
||||||
|
return uint32(n), 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fi *File) Flush(ctx context.Context) syscall.Errno {
|
||||||
|
fi.fileMutex.Lock()
|
||||||
|
defer fi.fileMutex.Unlock()
|
||||||
|
|
||||||
|
if err := fi.file.Sync(); err != nil {
|
||||||
|
return syscall.EIO
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fi *File) Release(ctx context.Context) syscall.Errno {
|
||||||
|
fi.fileMutex.Lock()
|
||||||
|
defer fi.fileMutex.Unlock()
|
||||||
|
|
||||||
|
if err := fi.file.Close(); err != nil {
|
||||||
|
return syscall.EIO
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0
|
||||||
|
}
|
||||||
66
internal/domains/filesystem/filesystem.go
Normal file
66
internal/domains/filesystem/filesystem.go
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
package filesystem
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"source.hodakov.me/hdkv/faketunes/internal/application"
|
||||||
|
"source.hodakov.me/hdkv/faketunes/internal/domains"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
_ domains.Filesystem = new(FS)
|
||||||
|
_ domains.Domain = new(FS)
|
||||||
|
)
|
||||||
|
|
||||||
|
type FS struct {
|
||||||
|
app *application.App
|
||||||
|
|
||||||
|
cacher domains.Cacher
|
||||||
|
|
||||||
|
sourceDir string
|
||||||
|
destinationDir string
|
||||||
|
cacheDir string
|
||||||
|
metadataDir string
|
||||||
|
|
||||||
|
inodeCounter uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
func New(app *application.App) *FS {
|
||||||
|
return &FS{
|
||||||
|
app: app,
|
||||||
|
|
||||||
|
sourceDir: app.Config().Paths.Source,
|
||||||
|
destinationDir: app.Config().Paths.Destination + "/Music",
|
||||||
|
cacheDir: app.Config().Paths.Destination + "/.cache",
|
||||||
|
metadataDir: app.Config().Paths.Destination + "/.metadata",
|
||||||
|
|
||||||
|
inodeCounter: 1000, // Start counting inodes after the reserved ones
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *FS) ConnectDependencies() error {
|
||||||
|
cacher, ok := f.app.RetrieveDomain(domains.CacherName).(domains.Cacher)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"%w: %w (%s)", ErrFilesystem, ErrConnectDependencies,
|
||||||
|
"cacher domain interface conversion failed",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
f.cacher = cacher
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *FS) Start() error {
|
||||||
|
err := f.prepareDirectories()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("%w: %w (%w)", ErrFilesystem, ErrFailedToPrepareDirectories, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
f.mount()
|
||||||
|
}()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
7
internal/domains/filesystem/inode.go
Normal file
7
internal/domains/filesystem/inode.go
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
package filesystem
|
||||||
|
|
||||||
|
import "sync/atomic"
|
||||||
|
|
||||||
|
func (f *FS) nextInode() uint64 {
|
||||||
|
return atomic.AddUint64(&f.inodeCounter, 1)
|
||||||
|
}
|
||||||
52
internal/domains/filesystem/mount.go
Normal file
52
internal/domains/filesystem/mount.go
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
package filesystem
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/hanwen/go-fuse/v2/fs"
|
||||||
|
"github.com/hanwen/go-fuse/v2/fuse"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (f *FS) mount() {
|
||||||
|
rootDir := f.NewRootDirectory()
|
||||||
|
|
||||||
|
// Populate mount options
|
||||||
|
opts := &fs.Options{
|
||||||
|
MountOptions: fuse.MountOptions{
|
||||||
|
Name: "faketunes",
|
||||||
|
FsName: "faketunes",
|
||||||
|
DisableXAttrs: false, // Enable xattr support for macOS
|
||||||
|
Debug: false,
|
||||||
|
// AllowOther: true,
|
||||||
|
Options: []string{
|
||||||
|
"default_permissions",
|
||||||
|
"fsname=flac2alac",
|
||||||
|
"nosuid",
|
||||||
|
"nodev",
|
||||||
|
"noexec",
|
||||||
|
"ro",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
NullPermissions: false,
|
||||||
|
Logger: log.New(os.Stdout, "FUSE: ", log.LstdFlags),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Redirect FUSE logs to logrus
|
||||||
|
log.SetOutput(f.app.Logger().WithField("fuse debug logs", true).WriterLevel(logrus.DebugLevel))
|
||||||
|
|
||||||
|
// Do an actual mount
|
||||||
|
server, err := fs.Mount(f.destinationDir, rootDir, opts)
|
||||||
|
if err != nil {
|
||||||
|
f.app.Logger().WithError(err).Fatal("Failed to start filesystem")
|
||||||
|
}
|
||||||
|
defer server.Unmount()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-f.app.Context().Done():
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
server.Wait()
|
||||||
|
}
|
||||||
|
}
|
||||||
158
internal/domains/filesystem/music_app_metadata.go
Normal file
158
internal/domains/filesystem/music_app_metadata.go
Normal file
@@ -0,0 +1,158 @@
|
|||||||
|
package filesystem
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hanwen/go-fuse/v2/fs"
|
||||||
|
"github.com/hanwen/go-fuse/v2/fuse"
|
||||||
|
)
|
||||||
|
|
||||||
|
type MusicAppMetadataFile struct {
|
||||||
|
fs.Inode
|
||||||
|
|
||||||
|
f *FS
|
||||||
|
path string
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
_ = (fs.NodeGetattrer)((*MusicAppMetadataFile)(nil))
|
||||||
|
_ = (fs.NodeOpener)((*MusicAppMetadataFile)(nil))
|
||||||
|
_ = (fs.NodeCreater)((*MusicAppMetadataFile)(nil))
|
||||||
|
_ = (fs.NodeWriter)((*MusicAppMetadataFile)(nil))
|
||||||
|
_ = (fs.NodeSetattrer)((*MusicAppMetadataFile)(nil))
|
||||||
|
_ = (fs.NodeUnlinker)((*MusicAppMetadataFile)(nil))
|
||||||
|
)
|
||||||
|
|
||||||
|
func (m *MusicAppMetadataFile) Getattr(ctx context.Context, fh fs.FileHandle, out *fuse.AttrOut) syscall.Errno {
|
||||||
|
info, err := os.Stat(m.path)
|
||||||
|
if err != nil {
|
||||||
|
out.Mode = fuse.S_IFREG | 0644
|
||||||
|
out.Nlink = 1
|
||||||
|
out.Ino = m.StableAttr().Ino
|
||||||
|
out.Size = 0
|
||||||
|
out.Mtime = uint64(time.Now().Unix())
|
||||||
|
out.Atime = out.Mtime
|
||||||
|
out.Ctime = out.Mtime
|
||||||
|
out.Blocks = 1
|
||||||
|
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
out.Mode = fuse.S_IFREG | uint32(info.Mode())
|
||||||
|
out.Nlink = 1
|
||||||
|
out.Ino = m.StableAttr().Ino
|
||||||
|
out.Size = uint64(info.Size())
|
||||||
|
out.Mtime = uint64(info.ModTime().Unix())
|
||||||
|
out.Atime = out.Mtime
|
||||||
|
out.Ctime = out.Mtime
|
||||||
|
out.Blocks = (out.Size + 511) / 512
|
||||||
|
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MusicAppMetadataFile) Setattr(ctx context.Context, fh fs.FileHandle, in *fuse.SetAttrIn, out *fuse.AttrOut) syscall.Errno {
|
||||||
|
info, err := os.Stat(m.path)
|
||||||
|
if err != nil {
|
||||||
|
out.Mode = fuse.S_IFREG | 0644
|
||||||
|
out.Nlink = 1
|
||||||
|
out.Ino = m.StableAttr().Ino
|
||||||
|
out.Size = 0
|
||||||
|
out.Mtime = uint64(time.Now().Unix())
|
||||||
|
out.Atime = out.Mtime
|
||||||
|
out.Ctime = out.Mtime
|
||||||
|
out.Blocks = 1
|
||||||
|
} else {
|
||||||
|
out.Mode = fuse.S_IFREG | uint32(info.Mode())
|
||||||
|
out.Nlink = 1
|
||||||
|
out.Ino = m.StableAttr().Ino
|
||||||
|
out.Size = uint64(info.Size())
|
||||||
|
out.Mtime = uint64(info.ModTime().Unix())
|
||||||
|
out.Atime = out.Mtime
|
||||||
|
out.Ctime = out.Mtime
|
||||||
|
out.Blocks = (out.Size + 511) / 512
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MusicAppMetadataFile) Create(ctx context.Context, name string, flags uint32, mode uint32, out *fuse.EntryOut) (node *fs.Inode, fh fs.FileHandle, fuseFlags uint32, errno syscall.Errno) {
|
||||||
|
file, err := os.Create(m.path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, 0, syscall.EIO
|
||||||
|
}
|
||||||
|
|
||||||
|
ch := m.NewInode(ctx, &MusicAppMetadataFile{path: m.path}, fs.StableAttr{
|
||||||
|
Mode: fuse.S_IFREG,
|
||||||
|
Ino: m.f.nextInode(),
|
||||||
|
})
|
||||||
|
|
||||||
|
out.Mode = fuse.S_IFREG | 0644
|
||||||
|
out.Nlink = 1
|
||||||
|
out.Ino = ch.StableAttr().Ino
|
||||||
|
out.Size = 0
|
||||||
|
out.Mtime = uint64(time.Now().Unix())
|
||||||
|
out.Atime = out.Mtime
|
||||||
|
out.Ctime = out.Mtime
|
||||||
|
out.Blocks = 1
|
||||||
|
|
||||||
|
return ch, &File{file: file}, fuse.FOPEN_DIRECT_IO, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MusicAppMetadataFile) Open(ctx context.Context, flags uint32) (fh fs.FileHandle, fuseFlags uint32, errno syscall.Errno) {
|
||||||
|
if _, err := os.Stat(m.path); os.IsNotExist(err) {
|
||||||
|
if err := os.WriteFile(m.path, []byte{}, 0644); err != nil {
|
||||||
|
return nil, 0, syscall.EIO
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
file, err := os.OpenFile(m.path, int(flags), 0644)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, syscall.EIO
|
||||||
|
}
|
||||||
|
|
||||||
|
return &File{file: file}, fuse.FOPEN_DIRECT_IO, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MusicAppMetadataFile) Write(ctx context.Context, fh fs.FileHandle, data []byte, off int64) (written uint32, errno syscall.Errno) {
|
||||||
|
handle, ok := fh.(*File)
|
||||||
|
if !ok {
|
||||||
|
return 0, syscall.EBADF
|
||||||
|
}
|
||||||
|
|
||||||
|
n, err := handle.file.WriteAt(data, off)
|
||||||
|
if err != nil {
|
||||||
|
return 0, syscall.EIO
|
||||||
|
}
|
||||||
|
|
||||||
|
return uint32(n), 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MusicAppMetadataFile) Unlink(ctx context.Context, name string) syscall.Errno {
|
||||||
|
if err := os.Remove(m.path); err != nil {
|
||||||
|
return syscall.ENOENT
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *FS) isiTunesMetadata(name string) bool {
|
||||||
|
name = strings.ToLower(name)
|
||||||
|
|
||||||
|
return strings.HasPrefix(name, ".") ||
|
||||||
|
strings.Contains(name, "albumart") ||
|
||||||
|
strings.Contains(name, "folder") ||
|
||||||
|
strings.Contains(name, "itunes") ||
|
||||||
|
strings.HasSuffix(name, ".itl") ||
|
||||||
|
strings.HasSuffix(name, ".xml") ||
|
||||||
|
strings.HasSuffix(name, ".db")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *FS) NewMusicAppMetadataFile(path string) *MusicAppMetadataFile {
|
||||||
|
return &MusicAppMetadataFile{
|
||||||
|
f: f,
|
||||||
|
path: path,
|
||||||
|
}
|
||||||
|
}
|
||||||
287
internal/domains/filesystem/music_directory.go
Normal file
287
internal/domains/filesystem/music_directory.go
Normal file
@@ -0,0 +1,287 @@
|
|||||||
|
package filesystem
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hanwen/go-fuse/v2/fs"
|
||||||
|
"github.com/hanwen/go-fuse/v2/fuse"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Any non-root directory is a MusicDirectory
|
||||||
|
|
||||||
|
type MusicDir struct {
|
||||||
|
fs.Inode
|
||||||
|
|
||||||
|
f *FS
|
||||||
|
path string
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
_ = (fs.NodeGetattrer)((*MusicDir)(nil))
|
||||||
|
_ = (fs.NodeLookuper)((*MusicDir)(nil))
|
||||||
|
_ = (fs.NodeReaddirer)((*MusicDir)(nil))
|
||||||
|
_ = (fs.NodeCreater)((*MusicDir)(nil))
|
||||||
|
_ = (fs.NodeGetxattrer)((*MusicDir)(nil))
|
||||||
|
_ = (fs.NodeSetxattrer)((*MusicDir)(nil))
|
||||||
|
_ = (fs.NodeRemovexattrer)((*MusicDir)(nil))
|
||||||
|
_ = (fs.NodeListxattrer)((*MusicDir)(nil))
|
||||||
|
)
|
||||||
|
|
||||||
|
func (d *MusicDir) Getattr(ctx context.Context, f fs.FileHandle, out *fuse.AttrOut) syscall.Errno {
|
||||||
|
out.Mode = fuse.S_IFDIR | 0755
|
||||||
|
out.Nlink = 2 // Minimum . and ..
|
||||||
|
out.Ino = d.StableAttr().Ino
|
||||||
|
out.Size = 4096
|
||||||
|
|
||||||
|
// Get actual mod time from filesystem if possible
|
||||||
|
if info, err := os.Stat(d.path); err == nil {
|
||||||
|
out.Mtime = uint64(info.ModTime().Unix())
|
||||||
|
out.Atime = out.Mtime
|
||||||
|
out.Ctime = out.Mtime
|
||||||
|
|
||||||
|
// Count actual subdirectories for accurate nlink
|
||||||
|
if entries, err := os.ReadDir(d.path); err == nil {
|
||||||
|
for _, entry := range entries {
|
||||||
|
if entry.IsDir() {
|
||||||
|
out.Nlink++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
now := uint64(time.Now().Unix())
|
||||||
|
out.Mtime = now
|
||||||
|
out.Atime = now
|
||||||
|
out.Ctime = now
|
||||||
|
}
|
||||||
|
|
||||||
|
out.Blocks = 1
|
||||||
|
out.Blksize = 512
|
||||||
|
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *MusicDir) Getxattr(ctx context.Context, attr string, dest []byte) (uint32, syscall.Errno) {
|
||||||
|
// Same implementation as RootDir
|
||||||
|
switch attr {
|
||||||
|
case "user.org.netatalk.Metadata":
|
||||||
|
fallthrough
|
||||||
|
case "com.apple.FinderInfo":
|
||||||
|
fallthrough
|
||||||
|
case "com.apple.ResourceFork":
|
||||||
|
if len(dest) > 0 {
|
||||||
|
return 0, 0
|
||||||
|
}
|
||||||
|
return 0, 0
|
||||||
|
default:
|
||||||
|
return 0, syscall.ENODATA
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *MusicDir) Setxattr(ctx context.Context, attr string, data []byte, flags uint32) syscall.Errno {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *MusicDir) Removexattr(ctx context.Context, attr string) syscall.Errno {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *MusicDir) Listxattr(ctx context.Context, dest []byte) (uint32, syscall.Errno) {
|
||||||
|
return 0, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *MusicDir) Create(ctx context.Context, name string, flags uint32, mode uint32, out *fuse.EntryOut) (*fs.Inode, fs.FileHandle, uint32, syscall.Errno) {
|
||||||
|
if d.f.isiTunesMetadata(name) {
|
||||||
|
metaPath := filepath.Join(d.f.metadataDir, name)
|
||||||
|
|
||||||
|
file, err := os.Create(metaPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, 0, syscall.EIO
|
||||||
|
}
|
||||||
|
|
||||||
|
ch := d.NewInode(
|
||||||
|
ctx,
|
||||||
|
d.f.NewMusicAppMetadataFile(metaPath),
|
||||||
|
fs.StableAttr{
|
||||||
|
Mode: fuse.S_IFREG,
|
||||||
|
Ino: d.f.nextInode(),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
out.Mode = fuse.S_IFREG | 0644
|
||||||
|
out.Nlink = 1
|
||||||
|
out.Ino = ch.StableAttr().Ino
|
||||||
|
out.Size = 0
|
||||||
|
out.Mtime = uint64(time.Now().Unix())
|
||||||
|
out.Atime = out.Mtime
|
||||||
|
out.Ctime = out.Mtime
|
||||||
|
out.Blocks = 1
|
||||||
|
|
||||||
|
return ch, &File{file: file}, fuse.FOPEN_DIRECT_IO, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, nil, 0, syscall.EPERM
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *MusicDir) Lookup(ctx context.Context, name string, out *fuse.EntryOut) (*fs.Inode, syscall.Errno) {
|
||||||
|
// Handle .m4a virtual files
|
||||||
|
if strings.HasSuffix(strings.ToLower(name), ".m4a") {
|
||||||
|
flacName := name[:len(name)-4] + ".flac"
|
||||||
|
flacPath := filepath.Join(d.path, flacName)
|
||||||
|
|
||||||
|
if _, err := os.Stat(flacPath); err == nil {
|
||||||
|
ch := d.NewInode(
|
||||||
|
ctx,
|
||||||
|
d.f.NewMusicFile(flacPath, name, false),
|
||||||
|
fs.StableAttr{
|
||||||
|
Mode: fuse.S_IFREG,
|
||||||
|
Ino: d.f.nextInode(),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
out.Mode = fuse.S_IFREG | 0444
|
||||||
|
out.Nlink = 1
|
||||||
|
out.Ino = ch.StableAttr().Ino
|
||||||
|
|
||||||
|
if size, err := d.f.cacher.GetStat(flacPath); err == nil {
|
||||||
|
out.Size = uint64(size)
|
||||||
|
} else {
|
||||||
|
out.Size = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
out.Mtime = uint64(time.Now().Unix())
|
||||||
|
out.Atime = out.Mtime
|
||||||
|
out.Ctime = out.Mtime
|
||||||
|
out.Blocks = (out.Size + 511) / 512
|
||||||
|
|
||||||
|
return ch, 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check real file or directory
|
||||||
|
fullPath := filepath.Join(d.path, name)
|
||||||
|
info, err := os.Stat(fullPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, syscall.ENOENT
|
||||||
|
}
|
||||||
|
|
||||||
|
if info.IsDir() {
|
||||||
|
ch := d.NewInode(
|
||||||
|
ctx, d.f.NewMusicDirectory(fullPath),
|
||||||
|
fs.StableAttr{
|
||||||
|
Mode: fuse.S_IFDIR,
|
||||||
|
Ino: d.f.nextInode(),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
out.Mode = fuse.S_IFDIR | 0755
|
||||||
|
out.Nlink = 2
|
||||||
|
out.Ino = ch.StableAttr().Ino
|
||||||
|
out.Size = 4096
|
||||||
|
out.Mtime = uint64(info.ModTime().Unix())
|
||||||
|
out.Atime = out.Mtime
|
||||||
|
out.Ctime = out.Mtime
|
||||||
|
out.Blocks = 1
|
||||||
|
|
||||||
|
return ch, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Regular file (non-FLAC)
|
||||||
|
isMeta := d.f.isiTunesMetadata(name)
|
||||||
|
ch := d.NewInode(ctx, d.f.NewMusicFile(fullPath, name, isMeta),
|
||||||
|
fs.StableAttr{
|
||||||
|
Mode: fuse.S_IFREG,
|
||||||
|
Ino: d.f.nextInode(),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
if isMeta {
|
||||||
|
out.Mode = fuse.S_IFREG | 0644
|
||||||
|
} else {
|
||||||
|
out.Mode = fuse.S_IFREG | 0444
|
||||||
|
}
|
||||||
|
|
||||||
|
out.Nlink = 1
|
||||||
|
out.Ino = ch.StableAttr().Ino
|
||||||
|
out.Size = uint64(info.Size())
|
||||||
|
out.Mtime = uint64(info.ModTime().Unix())
|
||||||
|
out.Atime = out.Mtime
|
||||||
|
out.Ctime = out.Mtime
|
||||||
|
out.Blocks = (out.Size + 511) / 512
|
||||||
|
|
||||||
|
return ch, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *MusicDir) Readdir(ctx context.Context) (fs.DirStream, syscall.Errno) {
|
||||||
|
d.f.app.Logger().WithField("path", d.path).Debug("Readdir called on directory")
|
||||||
|
|
||||||
|
var dirEntries []fuse.DirEntry
|
||||||
|
|
||||||
|
dirEntries = append(dirEntries, fuse.DirEntry{
|
||||||
|
Name: ".",
|
||||||
|
Mode: fuse.S_IFDIR | 0755,
|
||||||
|
Ino: d.StableAttr().Ino,
|
||||||
|
})
|
||||||
|
dirEntries = append(dirEntries, fuse.DirEntry{
|
||||||
|
Name: "..",
|
||||||
|
Mode: fuse.S_IFDIR | 0755,
|
||||||
|
Ino: 1, // Parent (root) inode
|
||||||
|
})
|
||||||
|
|
||||||
|
entries, err := os.ReadDir(d.path)
|
||||||
|
if err != nil {
|
||||||
|
d.f.app.Logger().WithError(err).WithField("path", d.path).Error(
|
||||||
|
"Error reading directory",
|
||||||
|
)
|
||||||
|
|
||||||
|
return fs.NewListDirStream(dirEntries), 0
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, entry := range entries {
|
||||||
|
name := entry.Name()
|
||||||
|
|
||||||
|
if strings.HasPrefix(name, ".") && !d.f.isiTunesMetadata(name) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
mode := fuse.S_IFREG | 0444
|
||||||
|
if entry.IsDir() {
|
||||||
|
mode = fuse.S_IFDIR | 0755
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert .flac to .m4a in directory listing
|
||||||
|
if strings.HasSuffix(strings.ToLower(name), ".flac") {
|
||||||
|
name = name[:len(name)-5] + ".m4a"
|
||||||
|
if !d.f.isiTunesMetadata(name) {
|
||||||
|
mode = fuse.S_IFREG | 0644
|
||||||
|
}
|
||||||
|
} else if !d.f.isiTunesMetadata(name) {
|
||||||
|
mode = fuse.S_IFREG | 0644
|
||||||
|
}
|
||||||
|
|
||||||
|
dirEntries = append(dirEntries, fuse.DirEntry{
|
||||||
|
Name: name,
|
||||||
|
Mode: uint32(mode),
|
||||||
|
Ino: d.f.nextInode(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
d.f.app.Logger().WithFields(logrus.Fields{
|
||||||
|
"path": d.path,
|
||||||
|
"directory entries": len(dirEntries),
|
||||||
|
}).Debug("Returning directory entries")
|
||||||
|
|
||||||
|
return fs.NewListDirStream(dirEntries), 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *FS) NewMusicDirectory(path string) *MusicDir {
|
||||||
|
return &MusicDir{
|
||||||
|
f: f,
|
||||||
|
path: path,
|
||||||
|
}
|
||||||
|
}
|
||||||
146
internal/domains/filesystem/music_file.go
Normal file
146
internal/domains/filesystem/music_file.go
Normal file
@@ -0,0 +1,146 @@
|
|||||||
|
package filesystem
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hanwen/go-fuse/v2/fs"
|
||||||
|
"github.com/hanwen/go-fuse/v2/fuse"
|
||||||
|
)
|
||||||
|
|
||||||
|
type MusicFile struct {
|
||||||
|
fs.Inode
|
||||||
|
|
||||||
|
f *FS
|
||||||
|
sourcePath string
|
||||||
|
virtualName string
|
||||||
|
isMetaFile bool
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
_ = (fs.NodeGetattrer)((*MusicFile)(nil))
|
||||||
|
_ = (fs.NodeOpener)((*MusicFile)(nil))
|
||||||
|
_ = (fs.NodeSetattrer)((*MusicFile)(nil))
|
||||||
|
)
|
||||||
|
|
||||||
|
func (f *MusicFile) Getattr(ctx context.Context, fh fs.FileHandle, out *fuse.AttrOut) syscall.Errno {
|
||||||
|
if f.isMetaFile {
|
||||||
|
metaPath := filepath.Join(f.f.metadataDir, f.virtualName)
|
||||||
|
|
||||||
|
if info, err := os.Stat(metaPath); err == nil {
|
||||||
|
out.Mode = fuse.S_IFREG | 0644
|
||||||
|
out.Nlink = 1
|
||||||
|
out.Ino = f.StableAttr().Ino
|
||||||
|
out.Size = uint64(info.Size())
|
||||||
|
out.Mtime = uint64(info.ModTime().Unix())
|
||||||
|
out.Atime = out.Mtime
|
||||||
|
out.Ctime = out.Mtime
|
||||||
|
out.Blocks = (out.Size + 511) / 512
|
||||||
|
} else {
|
||||||
|
out.Mode = fuse.S_IFREG | 0644
|
||||||
|
out.Nlink = 1
|
||||||
|
out.Ino = f.StableAttr().Ino
|
||||||
|
out.Size = 0
|
||||||
|
out.Mtime = uint64(time.Now().Unix())
|
||||||
|
out.Atime = out.Mtime
|
||||||
|
out.Ctime = out.Mtime
|
||||||
|
out.Blocks = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
out.Mode = fuse.S_IFREG | 0444
|
||||||
|
out.Nlink = 1
|
||||||
|
out.Ino = f.StableAttr().Ino
|
||||||
|
out.Blocks = 1
|
||||||
|
|
||||||
|
if size, err := f.f.cacher.GetStat(f.sourcePath); err == nil {
|
||||||
|
out.Size = uint64(size)
|
||||||
|
out.Blocks = (out.Size + 511) / 512
|
||||||
|
} else {
|
||||||
|
out.Size = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
out.Mtime = uint64(time.Now().Unix())
|
||||||
|
out.Atime = out.Mtime
|
||||||
|
out.Ctime = out.Mtime
|
||||||
|
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *MusicFile) Setattr(ctx context.Context, fh fs.FileHandle, in *fuse.SetAttrIn, out *fuse.AttrOut) syscall.Errno {
|
||||||
|
if f.isMetaFile {
|
||||||
|
metaPath := filepath.Join(f.f.metadataDir, f.virtualName)
|
||||||
|
if info, err := os.Stat(metaPath); err == nil {
|
||||||
|
out.Mode = fuse.S_IFREG | 0644
|
||||||
|
out.Nlink = 1
|
||||||
|
out.Ino = f.StableAttr().Ino
|
||||||
|
out.Size = uint64(info.Size())
|
||||||
|
out.Mtime = uint64(info.ModTime().Unix())
|
||||||
|
out.Atime = out.Mtime
|
||||||
|
out.Ctime = out.Mtime
|
||||||
|
out.Blocks = (out.Size + 511) / 512
|
||||||
|
} else {
|
||||||
|
out.Mode = fuse.S_IFREG | 0644
|
||||||
|
out.Nlink = 1
|
||||||
|
out.Ino = f.StableAttr().Ino
|
||||||
|
out.Size = 0
|
||||||
|
out.Mtime = uint64(time.Now().Unix())
|
||||||
|
out.Atime = out.Mtime
|
||||||
|
out.Ctime = out.Mtime
|
||||||
|
out.Blocks = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
return syscall.EPERM
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *MusicFile) Open(ctx context.Context, flags uint32) (fh fs.FileHandle, fuseFlags uint32, errno syscall.Errno) {
|
||||||
|
if f.isMetaFile {
|
||||||
|
metaPath := filepath.Join(f.f.metadataDir, f.virtualName)
|
||||||
|
|
||||||
|
file, err := os.OpenFile(metaPath, int(flags), 0644)
|
||||||
|
if err != nil && os.IsNotExist(err) {
|
||||||
|
file, err = os.Create(metaPath)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, syscall.EIO
|
||||||
|
}
|
||||||
|
|
||||||
|
return &File{file: file}, fuse.FOPEN_DIRECT_IO, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
if flags&fuse.O_ANYWRITE != 0 {
|
||||||
|
return nil, 0, syscall.EPERM
|
||||||
|
}
|
||||||
|
|
||||||
|
entry, err := f.f.cacher.GetFileDTO(f.sourcePath)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Failed to convert %s to ALAC: %v", filepath.Base(f.sourcePath), err)
|
||||||
|
|
||||||
|
return nil, 0, syscall.EIO
|
||||||
|
}
|
||||||
|
|
||||||
|
file, err := os.Open(entry.Path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, syscall.EIO
|
||||||
|
}
|
||||||
|
|
||||||
|
return &File{file: file}, fuse.FOPEN_KEEP_CACHE, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *FS) NewMusicFile(sourcePath, virtualName string, isMetaFile bool) *MusicFile {
|
||||||
|
return &MusicFile{
|
||||||
|
f: f,
|
||||||
|
sourcePath: sourcePath,
|
||||||
|
virtualName: virtualName,
|
||||||
|
isMetaFile: isMetaFile,
|
||||||
|
}
|
||||||
|
}
|
||||||
323
internal/domains/filesystem/root.go
Normal file
323
internal/domains/filesystem/root.go
Normal file
@@ -0,0 +1,323 @@
|
|||||||
|
package filesystem
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hanwen/go-fuse/v2/fs"
|
||||||
|
"github.com/hanwen/go-fuse/v2/fuse"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
type RootDirectory struct {
|
||||||
|
fs.Inode
|
||||||
|
|
||||||
|
f *FS
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
_ = (fs.NodeGetattrer)((*RootDirectory)(nil))
|
||||||
|
_ = (fs.NodeLookuper)((*RootDirectory)(nil))
|
||||||
|
_ = (fs.NodeReaddirer)((*RootDirectory)(nil))
|
||||||
|
_ = (fs.NodeCreater)((*RootDirectory)(nil))
|
||||||
|
_ = (fs.NodeGetxattrer)((*RootDirectory)(nil))
|
||||||
|
_ = (fs.NodeSetxattrer)((*RootDirectory)(nil))
|
||||||
|
_ = (fs.NodeRemovexattrer)((*RootDirectory)(nil))
|
||||||
|
_ = (fs.NodeListxattrer)((*RootDirectory)(nil))
|
||||||
|
)
|
||||||
|
|
||||||
|
func (r *RootDirectory) Create(
|
||||||
|
ctx context.Context, name string, flags uint32, mode uint32, out *fuse.EntryOut,
|
||||||
|
) (*fs.Inode, fs.FileHandle, uint32, syscall.Errno) {
|
||||||
|
if r.f.isiTunesMetadata(name) {
|
||||||
|
metaPath := filepath.Join(r.f.metadataDir, name)
|
||||||
|
|
||||||
|
file, err := os.Create(metaPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, 0, syscall.EIO
|
||||||
|
}
|
||||||
|
|
||||||
|
ch := r.NewInode(
|
||||||
|
ctx,
|
||||||
|
r.f.NewMusicAppMetadataFile(metaPath),
|
||||||
|
fs.StableAttr{
|
||||||
|
Mode: fuse.S_IFREG,
|
||||||
|
Ino: r.f.nextInode(),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
out.Mode = fuse.S_IFREG | 0644
|
||||||
|
out.Nlink = 1
|
||||||
|
out.Ino = ch.StableAttr().Ino
|
||||||
|
out.Size = 0
|
||||||
|
out.Mtime = uint64(time.Now().Unix())
|
||||||
|
out.Atime = out.Mtime
|
||||||
|
out.Ctime = out.Mtime
|
||||||
|
out.Blocks = 1
|
||||||
|
|
||||||
|
return ch, &File{file: file}, fuse.FOPEN_DIRECT_IO, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, nil, 0, syscall.EPERM
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RootDirectory) Lookup(ctx context.Context, name string, out *fuse.EntryOut) (*fs.Inode, syscall.Errno) {
|
||||||
|
if r.f.isiTunesMetadata(name) {
|
||||||
|
metaPath := filepath.Join(r.f.metadataDir, name)
|
||||||
|
ch := r.NewInode(
|
||||||
|
ctx,
|
||||||
|
r.f.NewMusicAppMetadataFile(metaPath),
|
||||||
|
fs.StableAttr{
|
||||||
|
Mode: fuse.S_IFREG,
|
||||||
|
Ino: r.f.nextInode(),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
out.Mode = fuse.S_IFREG | 0644
|
||||||
|
out.Nlink = 1
|
||||||
|
out.Ino = ch.StableAttr().Ino
|
||||||
|
|
||||||
|
if info, err := os.Stat(metaPath); err == nil {
|
||||||
|
out.Size = uint64(info.Size())
|
||||||
|
out.Mtime = uint64(info.ModTime().Unix())
|
||||||
|
out.Atime = out.Mtime
|
||||||
|
out.Ctime = out.Mtime
|
||||||
|
} else {
|
||||||
|
out.Size = 0
|
||||||
|
out.Mtime = uint64(time.Now().Unix())
|
||||||
|
out.Atime = out.Mtime
|
||||||
|
out.Ctime = out.Mtime
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate blocks
|
||||||
|
out.Blocks = (out.Size + 511) / 512
|
||||||
|
|
||||||
|
return ch, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle .m4a virtual files
|
||||||
|
if strings.HasSuffix(strings.ToLower(name), ".m4a") {
|
||||||
|
flacName := name[:len(name)-4] + ".flac"
|
||||||
|
flacPath := filepath.Join(r.f.sourceDir, flacName)
|
||||||
|
|
||||||
|
if _, err := os.Stat(flacPath); err == nil {
|
||||||
|
ch := r.NewInode(
|
||||||
|
ctx,
|
||||||
|
r.f.NewMusicFile(flacPath, name, false),
|
||||||
|
fs.StableAttr{
|
||||||
|
Mode: fuse.S_IFREG,
|
||||||
|
Ino: r.f.nextInode(),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
out.Mode = fuse.S_IFREG | 0444
|
||||||
|
out.Nlink = 1
|
||||||
|
out.Ino = ch.StableAttr().Ino
|
||||||
|
|
||||||
|
if size, err := r.f.cacher.GetStat(flacPath); err == nil {
|
||||||
|
out.Size = uint64(size)
|
||||||
|
} else {
|
||||||
|
out.Size = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
out.Mtime = uint64(time.Now().Unix())
|
||||||
|
out.Atime = out.Mtime
|
||||||
|
out.Ctime = out.Mtime
|
||||||
|
out.Blocks = (out.Size + 511) / 512
|
||||||
|
|
||||||
|
return ch, 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check real file or directory
|
||||||
|
fullPath := filepath.Join(r.f.sourceDir, name)
|
||||||
|
info, err := os.Stat(fullPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, syscall.ENOENT
|
||||||
|
}
|
||||||
|
|
||||||
|
if info.IsDir() {
|
||||||
|
ch := r.NewInode(ctx, r.f.NewMusicDirectory(fullPath), fs.StableAttr{
|
||||||
|
Mode: fuse.S_IFDIR,
|
||||||
|
Ino: r.f.nextInode(),
|
||||||
|
})
|
||||||
|
|
||||||
|
out.Mode = fuse.S_IFDIR | 0755
|
||||||
|
out.Nlink = 2 // Minimum . and ..
|
||||||
|
out.Ino = ch.StableAttr().Ino
|
||||||
|
out.Size = 4096
|
||||||
|
out.Mtime = uint64(info.ModTime().Unix())
|
||||||
|
out.Atime = out.Mtime
|
||||||
|
out.Ctime = out.Mtime
|
||||||
|
out.Blocks = 1
|
||||||
|
|
||||||
|
return ch, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Regular file (non-FLAC)
|
||||||
|
isMeta := r.f.isiTunesMetadata(name)
|
||||||
|
ch := r.NewInode(ctx, r.f.NewMusicFile(fullPath, name, isMeta), fs.StableAttr{
|
||||||
|
Mode: fuse.S_IFREG,
|
||||||
|
Ino: r.f.nextInode(),
|
||||||
|
})
|
||||||
|
|
||||||
|
if isMeta {
|
||||||
|
out.Mode = fuse.S_IFREG | 0644
|
||||||
|
} else {
|
||||||
|
out.Mode = fuse.S_IFREG | 0444
|
||||||
|
}
|
||||||
|
|
||||||
|
out.Nlink = 1
|
||||||
|
out.Ino = ch.StableAttr().Ino
|
||||||
|
out.Size = uint64(info.Size())
|
||||||
|
out.Mtime = uint64(info.ModTime().Unix())
|
||||||
|
out.Atime = out.Mtime
|
||||||
|
out.Ctime = out.Mtime
|
||||||
|
out.Blocks = (out.Size + 511) / 512
|
||||||
|
|
||||||
|
return ch, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RootDirectory) Readdir(ctx context.Context) (fs.DirStream, syscall.Errno) {
|
||||||
|
r.f.app.Logger().WithField("path", r.f.sourceDir).Debug("Readdir called on directory")
|
||||||
|
|
||||||
|
var dirEntries []fuse.DirEntry
|
||||||
|
|
||||||
|
// Always include . and .. first
|
||||||
|
dirEntries = append(dirEntries, fuse.DirEntry{
|
||||||
|
Name: ".",
|
||||||
|
Mode: fuse.S_IFDIR | 0755,
|
||||||
|
Ino: 1, // Root inode
|
||||||
|
})
|
||||||
|
dirEntries = append(dirEntries, fuse.DirEntry{
|
||||||
|
Name: "..",
|
||||||
|
Mode: fuse.S_IFDIR | 0755,
|
||||||
|
Ino: 1,
|
||||||
|
})
|
||||||
|
|
||||||
|
// Read actual directory contents
|
||||||
|
entries, err := os.ReadDir(r.f.sourceDir)
|
||||||
|
if err != nil {
|
||||||
|
r.f.app.Logger().WithError(err).WithField("path", r.f.sourceDir).Error(
|
||||||
|
"Error reading directory",
|
||||||
|
)
|
||||||
|
return fs.NewListDirStream(dirEntries), 0
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, entry := range entries {
|
||||||
|
name := entry.Name()
|
||||||
|
|
||||||
|
if strings.HasPrefix(name, ".") && !r.f.isiTunesMetadata(name) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
mode := fuse.S_IFREG | 0444
|
||||||
|
if entry.IsDir() {
|
||||||
|
mode = fuse.S_IFDIR | 0755
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert .flac to .m4a in directory listing
|
||||||
|
if strings.HasSuffix(strings.ToLower(name), ".flac") {
|
||||||
|
name = name[:len(name)-5] + ".m4a"
|
||||||
|
}
|
||||||
|
|
||||||
|
mode = fuse.S_IFREG | 0644
|
||||||
|
|
||||||
|
dirEntries = append(dirEntries, fuse.DirEntry{
|
||||||
|
Name: name,
|
||||||
|
Mode: uint32(mode),
|
||||||
|
Ino: r.f.nextInode(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
r.f.app.Logger().WithFields(logrus.Fields{
|
||||||
|
"path": r.f.sourceDir,
|
||||||
|
"directory entries": len(dirEntries),
|
||||||
|
}).Debug("Returning directory entries")
|
||||||
|
|
||||||
|
return fs.NewListDirStream(dirEntries), 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RootDirectory) Getattr(
|
||||||
|
ctx context.Context, f fs.FileHandle, out *fuse.AttrOut,
|
||||||
|
) syscall.Errno {
|
||||||
|
// Set basic directory attributes
|
||||||
|
out.Mode = fuse.S_IFDIR | 0755
|
||||||
|
|
||||||
|
// Set nlink to at least 2 (for . and ..)
|
||||||
|
out.Nlink = 2
|
||||||
|
|
||||||
|
// Root directory typically has inode 1
|
||||||
|
out.Ino = 1
|
||||||
|
|
||||||
|
// Set size to typical directory size
|
||||||
|
out.Size = 4096
|
||||||
|
|
||||||
|
// Set timestamps
|
||||||
|
now := uint64(time.Now().Unix())
|
||||||
|
out.Mtime = now
|
||||||
|
out.Atime = now
|
||||||
|
out.Ctime = now
|
||||||
|
|
||||||
|
// Set blocks (1 block of 512 bytes each = 512 bytes)
|
||||||
|
out.Blocks = 1
|
||||||
|
|
||||||
|
// Set block size
|
||||||
|
out.Blksize = 512
|
||||||
|
|
||||||
|
// Count actual subdirectories for accurate nlink
|
||||||
|
if entries, err := os.ReadDir(r.f.sourceDir); err == nil {
|
||||||
|
for _, entry := range entries {
|
||||||
|
if entry.IsDir() {
|
||||||
|
out.Nlink++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RootDirectory) Getxattr(
|
||||||
|
ctx context.Context, attr string, dest []byte,
|
||||||
|
) (uint32, syscall.Errno) {
|
||||||
|
// Handle common macOS/Netatalk xattrs
|
||||||
|
switch attr {
|
||||||
|
case "user.org.netatalk.Metadata":
|
||||||
|
fallthrough
|
||||||
|
case "com.apple.FinderInfo":
|
||||||
|
fallthrough
|
||||||
|
case "com.apple.ResourceFork":
|
||||||
|
// Return empty data
|
||||||
|
if len(dest) > 0 {
|
||||||
|
return 0, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0, 0
|
||||||
|
default:
|
||||||
|
return 0, syscall.ENODATA
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RootDirectory) Setxattr(ctx context.Context, attr string, data []byte, flags uint32) syscall.Errno {
|
||||||
|
// Silently accept xattr writes (ignore them)
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RootDirectory) Removexattr(ctx context.Context, attr string) syscall.Errno {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RootDirectory) Listxattr(ctx context.Context, dest []byte) (uint32, syscall.Errno) {
|
||||||
|
// Return empty xattr list
|
||||||
|
return 0, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *FS) NewRootDirectory() *RootDirectory {
|
||||||
|
return &RootDirectory{
|
||||||
|
f: f,
|
||||||
|
}
|
||||||
|
}
|
||||||
8
internal/domains/transcoder.go
Normal file
8
internal/domains/transcoder.go
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
package domains
|
||||||
|
|
||||||
|
const TranscoderName = "transcoder"
|
||||||
|
|
||||||
|
type Transcoder interface {
|
||||||
|
Convert(sourcePath, destinationPath string) (int64, error)
|
||||||
|
QueueChannel() chan struct{}
|
||||||
|
}
|
||||||
40
internal/domains/transcoder/album_art.go
Normal file
40
internal/domains/transcoder/album_art.go
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
package transcoder
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (t *Transcoder) findAlbumArt(path string) string {
|
||||||
|
// Common album art filenames (in order of preference)
|
||||||
|
artFiles := []string{
|
||||||
|
"albumart.jpg",
|
||||||
|
"AlbumArt.jpg",
|
||||||
|
"cover.jpg",
|
||||||
|
"Cover.jpg",
|
||||||
|
"folder.jpg",
|
||||||
|
"Folder.jpg",
|
||||||
|
"albumart.jpeg",
|
||||||
|
"cover.jpeg",
|
||||||
|
"folder.jpeg",
|
||||||
|
"albumart.png",
|
||||||
|
"cover.png",
|
||||||
|
"folder.png",
|
||||||
|
"albumart.gif",
|
||||||
|
"cover.gif",
|
||||||
|
".albumart.jpg",
|
||||||
|
".cover.jpg",
|
||||||
|
"AlbumArtwork.jpg",
|
||||||
|
"album.jpg",
|
||||||
|
"Album.jpg",
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, artFile := range artFiles {
|
||||||
|
fullPath := filepath.Join(path, artFile)
|
||||||
|
if _, err := os.Stat(fullPath); err == nil {
|
||||||
|
return fullPath
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ""
|
||||||
|
}
|
||||||
212
internal/domains/transcoder/convert.go
Normal file
212
internal/domains/transcoder/convert.go
Normal file
@@ -0,0 +1,212 @@
|
|||||||
|
package transcoder
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
defaultSampleRate = 48000
|
||||||
|
defaultBitDepth = 16
|
||||||
|
)
|
||||||
|
|
||||||
|
// Convert converts the file from FLAC to ALAC using ffmpeg.
|
||||||
|
// It embeds all required metadata and places the file in the desired destination.
|
||||||
|
// On success, it returns the transcoded file's size.
|
||||||
|
func (t *Transcoder) Convert(sourcePath, destinationPath string) (int64, error) {
|
||||||
|
t.app.Logger().WithFields(logrus.Fields{
|
||||||
|
"source file": sourcePath,
|
||||||
|
"destination": destinationPath,
|
||||||
|
}).Info("Transcoding file using ffmpeg...")
|
||||||
|
|
||||||
|
sourceAlbumDir := filepath.Dir(sourcePath)
|
||||||
|
albumArt := t.findAlbumArt(sourceAlbumDir)
|
||||||
|
hasAlbumArt := albumArt != ""
|
||||||
|
sortArtist := t.extractAlbumArtist(sourcePath, sourceAlbumDir)
|
||||||
|
sampleRate := defaultSampleRate
|
||||||
|
bitDepth := defaultBitDepth
|
||||||
|
|
||||||
|
if hasAlbumArt {
|
||||||
|
t.app.Logger().WithField("album art path", albumArt).Debug("Found album art")
|
||||||
|
}
|
||||||
|
|
||||||
|
t.app.Logger().WithField("sort artist", sortArtist).Debug(
|
||||||
|
"Setting sorting artist for iTunes",
|
||||||
|
)
|
||||||
|
|
||||||
|
sourceAnalyzeCmd := exec.Command(
|
||||||
|
"ffprobe",
|
||||||
|
"-v", "quiet",
|
||||||
|
"-show_streams",
|
||||||
|
"-select_streams", "a:0",
|
||||||
|
"-of", "csv=p=0",
|
||||||
|
sourcePath,
|
||||||
|
)
|
||||||
|
|
||||||
|
analyzeOutput, err := sourceAnalyzeCmd.Output()
|
||||||
|
if err == nil {
|
||||||
|
// Investiage bit depth and sample rate from ffprobe output.
|
||||||
|
// We need that to make sure we don't oversample files that are lower
|
||||||
|
// than the default sample rate and bit depth.
|
||||||
|
lines := strings.Split(strings.TrimSpace(string(analyzeOutput)), "\n")
|
||||||
|
for _, line := range lines {
|
||||||
|
if strings.Contains(line, "audio") {
|
||||||
|
parts := strings.Split(line, ",")
|
||||||
|
if len(parts) >= 6 {
|
||||||
|
// Get sample rate
|
||||||
|
if sr, err := strconv.Atoi(parts[2]); err == nil && sr > 0 {
|
||||||
|
sampleRate = sr
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get bit depth from sample_fmt or bits_per_raw_sample
|
||||||
|
sampleFmt := parts[4]
|
||||||
|
if strings.Contains(sampleFmt, "s32") || strings.Contains(sampleFmt, "flt") {
|
||||||
|
bitDepth = 32
|
||||||
|
} else if strings.Contains(sampleFmt, "s64") || strings.Contains(sampleFmt, "dbl") {
|
||||||
|
bitDepth = 64
|
||||||
|
} else if len(parts) >= 6 && parts[5] != "N/A" && parts[5] != "" {
|
||||||
|
if bd, err := strconv.Atoi(parts[5]); err == nil && bd > 0 {
|
||||||
|
bitDepth = bd
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
break // We only need the first audio stream
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
t.app.Logger().WithFields(logrus.Fields{
|
||||||
|
"bit depth": bitDepth,
|
||||||
|
"sample rate": sampleRate,
|
||||||
|
}).Info("Detected source file sample rate and bit depth")
|
||||||
|
|
||||||
|
needsDownsample := sampleRate > defaultSampleRate
|
||||||
|
needsBitReduce := bitDepth > defaultBitDepth
|
||||||
|
|
||||||
|
if needsDownsample {
|
||||||
|
t.app.Logger().WithFields(logrus.Fields{
|
||||||
|
"new sample rate": defaultSampleRate,
|
||||||
|
"old sample rate": sampleRate,
|
||||||
|
}).Info("Sample rate of the destination file will be changed")
|
||||||
|
}
|
||||||
|
|
||||||
|
if needsBitReduce {
|
||||||
|
t.app.Logger().WithFields(logrus.Fields{
|
||||||
|
"new bit depth": defaultBitDepth,
|
||||||
|
"old bit depth": bitDepth,
|
||||||
|
}).Info("Bit depth of the destination file will be changed")
|
||||||
|
}
|
||||||
|
|
||||||
|
ffmpegArgs := make([]string, 0)
|
||||||
|
|
||||||
|
// Add sources
|
||||||
|
ffmpegArgs = append(ffmpegArgs, "-i", sourcePath)
|
||||||
|
|
||||||
|
if hasAlbumArt {
|
||||||
|
ffmpegArgs = append(ffmpegArgs, "-i", albumArt)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Map streams and set codecs
|
||||||
|
if hasAlbumArt {
|
||||||
|
ffmpegArgs = append(ffmpegArgs,
|
||||||
|
"-map", "0:a", // Map audio from first input
|
||||||
|
"-map", "1", // Map image from second input
|
||||||
|
"-c:a", "alac", // ALAC codec for audio
|
||||||
|
"-c:v", "copy", // Copy image without re-encoding
|
||||||
|
"-disposition:v", "attached_pic",
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
ffmpegArgs = append(ffmpegArgs,
|
||||||
|
"-map", "0:a",
|
||||||
|
"-c:a", "alac",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle downsampling
|
||||||
|
if needsDownsample {
|
||||||
|
ffmpegArgs = append(
|
||||||
|
ffmpegArgs,
|
||||||
|
"-af", "aresample=48000:resampler=soxr:precision=28",
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
ffmpegArgs = append(ffmpegArgs, "-ar", fmt.Sprintf("%d", sampleRate))
|
||||||
|
}
|
||||||
|
|
||||||
|
if needsBitReduce {
|
||||||
|
// Reduce to 16-bit with good dithering
|
||||||
|
ffmpegArgs = append(ffmpegArgs,
|
||||||
|
"-sample_fmt", "s16p",
|
||||||
|
"-dither_method", "triangular",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle metadata copying and sort_artist filling
|
||||||
|
ffmpegArgs = append(ffmpegArgs,
|
||||||
|
"-map_metadata", "0",
|
||||||
|
"-metadata", fmt.Sprintf("sort_artist=%s", t.escapeMetadata(sortArtist)),
|
||||||
|
"-write_id3v2", "1",
|
||||||
|
"-id3v2_version", "3",
|
||||||
|
destinationPath,
|
||||||
|
"-y",
|
||||||
|
"-loglevel", "error",
|
||||||
|
"-stats",
|
||||||
|
)
|
||||||
|
|
||||||
|
t.app.Logger().WithField(
|
||||||
|
"ffmpeg command", "ffmpeg "+strings.Join(ffmpegArgs, " "),
|
||||||
|
).Debug("FFMpeg parameters")
|
||||||
|
|
||||||
|
ffmpeg := exec.Command("ffmpeg", ffmpegArgs...)
|
||||||
|
var stderr bytes.Buffer
|
||||||
|
ffmpeg.Stderr = &stderr
|
||||||
|
|
||||||
|
if err := ffmpeg.Run(); err != nil {
|
||||||
|
t.app.Logger().WithError(err).Error("Failed to invoke ffmpeg!")
|
||||||
|
t.app.Logger().WithField("ffmpeg stderr", stderr.String()).Debug("Got ffmpeg stderr")
|
||||||
|
|
||||||
|
return 0, fmt.Errorf("%w: %w (%w)", ErrTranscoder, ErrTranscodeError, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that the result file is saved to cache directory
|
||||||
|
transcodedFileStat, err := os.Stat(destinationPath)
|
||||||
|
if err != nil {
|
||||||
|
t.app.Logger().WithError(err).WithFields(logrus.Fields{
|
||||||
|
"source file": sourcePath,
|
||||||
|
"destination": destinationPath,
|
||||||
|
}).Error("Transcoded file not found (transcode error?). Check the logs for details")
|
||||||
|
|
||||||
|
return 0, fmt.Errorf("%w: %w (%w)", ErrTranscoder, ErrTranscodedFileNotFound, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Discard the file if it's less than 1 kilobyte: it's probably a transcode
|
||||||
|
// error
|
||||||
|
if transcodedFileStat.Size() < 1024 {
|
||||||
|
t.app.Logger().WithFields(logrus.Fields{
|
||||||
|
"source file": sourcePath,
|
||||||
|
"destination": destinationPath,
|
||||||
|
"transcoded file size": transcodedFileStat.Size(),
|
||||||
|
}).Error("Transcoded file not found (transcode error?). Check the logs for details")
|
||||||
|
|
||||||
|
return 0, fmt.Errorf(
|
||||||
|
"%w: %w (%s)",
|
||||||
|
ErrTranscoder, ErrTranscodedFileNotFound,
|
||||||
|
fmt.Sprintf("size is %d bytes, less than 1 kilobyte", transcodedFileStat.Size()),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.app.Logger().WithFields(logrus.Fields{
|
||||||
|
"source file": sourcePath,
|
||||||
|
"destination": destinationPath,
|
||||||
|
"destination size": transcodedFileStat.Size(),
|
||||||
|
}).Info("File transcoded successfully")
|
||||||
|
|
||||||
|
return transcodedFileStat.Size(), nil
|
||||||
|
}
|
||||||
10
internal/domains/transcoder/errors.go
Normal file
10
internal/domains/transcoder/errors.go
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
package transcoder
|
||||||
|
|
||||||
|
import "errors"
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrTranscoder = errors.New("transcoder")
|
||||||
|
ErrTranscodeError = errors.New("transcode error")
|
||||||
|
ErrTranscodedFileIsTooSmall = errors.New("transcoded file is too small")
|
||||||
|
ErrTranscodedFileNotFound = errors.New("transcoded file not found")
|
||||||
|
)
|
||||||
41
internal/domains/transcoder/metadata.go
Normal file
41
internal/domains/transcoder/metadata.go
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
package transcoder
|
||||||
|
|
||||||
|
import (
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (t *Transcoder) escapeMetadata(item string) string {
|
||||||
|
// Escape quotes and backslashes for FFmpeg metadata
|
||||||
|
item = strings.ReplaceAll(item, `\`, `\\`)
|
||||||
|
item = strings.ReplaceAll(item, `"`, `\"`)
|
||||||
|
item = strings.ReplaceAll(item, `'`, `\'`)
|
||||||
|
|
||||||
|
// Also escape semicolons and equals signs
|
||||||
|
item = strings.ReplaceAll(item, `;`, `\;`)
|
||||||
|
item = strings.ReplaceAll(item, `=`, `\=`)
|
||||||
|
|
||||||
|
return item
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Transcoder) extractAlbumArtist(filePath, sourceDir string) string {
|
||||||
|
// Get relative path from source directory
|
||||||
|
relPath, err := filepath.Rel(sourceDir, filePath)
|
||||||
|
if err != nil {
|
||||||
|
return "Unknown Artist"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Split path into components
|
||||||
|
parts := strings.Split(relPath, string(filepath.Separator))
|
||||||
|
|
||||||
|
// Album artist is the first directory after source
|
||||||
|
// e.g., /source/Artist/Album/01 - Track Name.flac
|
||||||
|
if len(parts) >= 2 {
|
||||||
|
artist := parts[0]
|
||||||
|
artist = strings.TrimSpace(artist)
|
||||||
|
|
||||||
|
return artist
|
||||||
|
}
|
||||||
|
|
||||||
|
return "Unknown Artist"
|
||||||
|
}
|
||||||
5
internal/domains/transcoder/queue.go
Normal file
5
internal/domains/transcoder/queue.go
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
package transcoder
|
||||||
|
|
||||||
|
func (t *Transcoder) QueueChannel() chan struct{} {
|
||||||
|
return t.transcodeQueue
|
||||||
|
}
|
||||||
31
internal/domains/transcoder/transcoder.go
Normal file
31
internal/domains/transcoder/transcoder.go
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
package transcoder
|
||||||
|
|
||||||
|
import (
|
||||||
|
"source.hodakov.me/hdkv/faketunes/internal/application"
|
||||||
|
"source.hodakov.me/hdkv/faketunes/internal/domains"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
_ domains.Transcoder = new(Transcoder)
|
||||||
|
_ domains.Domain = new(Transcoder)
|
||||||
|
)
|
||||||
|
|
||||||
|
type Transcoder struct {
|
||||||
|
app *application.App
|
||||||
|
transcodeQueue chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func New(app *application.App) *Transcoder {
|
||||||
|
return &Transcoder{
|
||||||
|
app: app,
|
||||||
|
transcodeQueue: make(chan struct{}, app.Config().Transcoding.Parallel),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Transcoder) ConnectDependencies() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Transcoder) Start() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
31
vendor/github.com/goccy/go-yaml/.codecov.yml
generated
vendored
Normal file
31
vendor/github.com/goccy/go-yaml/.codecov.yml
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
codecov:
|
||||||
|
require_ci_to_pass: yes
|
||||||
|
|
||||||
|
coverage:
|
||||||
|
precision: 2
|
||||||
|
round: down
|
||||||
|
range: "70...100"
|
||||||
|
|
||||||
|
status:
|
||||||
|
project:
|
||||||
|
default:
|
||||||
|
target: 75%
|
||||||
|
threshold: 2%
|
||||||
|
patch: off
|
||||||
|
changes: no
|
||||||
|
|
||||||
|
parsers:
|
||||||
|
gcov:
|
||||||
|
branch_detection:
|
||||||
|
conditional: yes
|
||||||
|
loop: yes
|
||||||
|
method: no
|
||||||
|
macro: no
|
||||||
|
|
||||||
|
comment:
|
||||||
|
layout: "header,diff"
|
||||||
|
behavior: default
|
||||||
|
require_changes: no
|
||||||
|
|
||||||
|
ignore:
|
||||||
|
- ast
|
||||||
3
vendor/github.com/goccy/go-yaml/.gitignore
generated
vendored
Normal file
3
vendor/github.com/goccy/go-yaml/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
bin/
|
||||||
|
.idea/
|
||||||
|
cover.out
|
||||||
65
vendor/github.com/goccy/go-yaml/.golangci.yml
generated
vendored
Normal file
65
vendor/github.com/goccy/go-yaml/.golangci.yml
generated
vendored
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
version: "2"
|
||||||
|
linters:
|
||||||
|
default: none
|
||||||
|
enable:
|
||||||
|
- errcheck
|
||||||
|
- govet
|
||||||
|
- ineffassign
|
||||||
|
- misspell
|
||||||
|
- perfsprint
|
||||||
|
- staticcheck
|
||||||
|
- unused
|
||||||
|
settings:
|
||||||
|
errcheck:
|
||||||
|
without_tests: true
|
||||||
|
govet:
|
||||||
|
disable:
|
||||||
|
- tests
|
||||||
|
misspell:
|
||||||
|
locale: US
|
||||||
|
perfsprint:
|
||||||
|
int-conversion: false
|
||||||
|
err-error: false
|
||||||
|
errorf: true
|
||||||
|
sprintf1: false
|
||||||
|
strconcat: false
|
||||||
|
staticcheck:
|
||||||
|
checks:
|
||||||
|
- -ST1000
|
||||||
|
- -ST1005
|
||||||
|
- all
|
||||||
|
exclusions:
|
||||||
|
generated: lax
|
||||||
|
presets:
|
||||||
|
- comments
|
||||||
|
- common-false-positives
|
||||||
|
- legacy
|
||||||
|
- std-error-handling
|
||||||
|
rules:
|
||||||
|
- linters:
|
||||||
|
- staticcheck
|
||||||
|
path: _test\.go
|
||||||
|
paths:
|
||||||
|
- third_party$
|
||||||
|
- builtin$
|
||||||
|
- examples$
|
||||||
|
formatters:
|
||||||
|
enable:
|
||||||
|
- gci
|
||||||
|
- gofmt
|
||||||
|
settings:
|
||||||
|
gci:
|
||||||
|
sections:
|
||||||
|
- standard
|
||||||
|
- default
|
||||||
|
- prefix(github.com/goccy/go-yaml)
|
||||||
|
- blank
|
||||||
|
- dot
|
||||||
|
gofmt:
|
||||||
|
simplify: true
|
||||||
|
exclusions:
|
||||||
|
generated: lax
|
||||||
|
paths:
|
||||||
|
- third_party$
|
||||||
|
- builtin$
|
||||||
|
- examples$
|
||||||
186
vendor/github.com/goccy/go-yaml/CHANGELOG.md
generated
vendored
Normal file
186
vendor/github.com/goccy/go-yaml/CHANGELOG.md
generated
vendored
Normal file
@@ -0,0 +1,186 @@
|
|||||||
|
# 1.11.2 - 2023-09-15
|
||||||
|
|
||||||
|
### Fix bugs
|
||||||
|
|
||||||
|
- Fix quoted comments ( #370 )
|
||||||
|
- Fix handle of space at start or last ( #376 )
|
||||||
|
- Fix sequence with comment ( #390 )
|
||||||
|
|
||||||
|
# 1.11.1 - 2023-09-14
|
||||||
|
|
||||||
|
### Fix bugs
|
||||||
|
|
||||||
|
- Handle `\r` in a double-quoted string the same as `\n` ( #372 )
|
||||||
|
- Replace loop with n.Values = append(n.Values, target.Values...) ( #380 )
|
||||||
|
- Skip encoding an inline field if it is null ( #386 )
|
||||||
|
- Fix comment parsing with null value ( #388 )
|
||||||
|
|
||||||
|
# 1.11.0 - 2023-04-03
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
- Supports dynamically switch encode and decode processing for a given type
|
||||||
|
|
||||||
|
# 1.10.1 - 2023-03-28
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
- Quote YAML 1.1 bools at encoding time for compatibility with other legacy parsers
|
||||||
|
- Add support of 32-bit architecture
|
||||||
|
|
||||||
|
### Fix bugs
|
||||||
|
|
||||||
|
- Don't trim all space characters in block style sequence
|
||||||
|
- Support strings starting with `@`
|
||||||
|
|
||||||
|
# 1.10.0 - 2023-03-01
|
||||||
|
|
||||||
|
### Fix bugs
|
||||||
|
|
||||||
|
Reversible conversion of comments was not working in various cases, which has been corrected.
|
||||||
|
**Breaking Change** exists in the comment map interface. However, if you are dealing with CommentMap directly, there is no problem.
|
||||||
|
|
||||||
|
|
||||||
|
# 1.9.8 - 2022-12-19
|
||||||
|
|
||||||
|
### Fix feature
|
||||||
|
|
||||||
|
- Append new line at the end of file ( #329 )
|
||||||
|
|
||||||
|
### Fix bugs
|
||||||
|
|
||||||
|
- Fix custom marshaler ( #333, #334 )
|
||||||
|
- Fix behavior when struct fields conflicted( #335 )
|
||||||
|
- Fix position calculation for literal, folded and raw folded strings ( #330 )
|
||||||
|
|
||||||
|
# 1.9.7 - 2022-12-03
|
||||||
|
|
||||||
|
### Fix bugs
|
||||||
|
|
||||||
|
- Fix handling of quoted map key ( #328 )
|
||||||
|
- Fix resusing process of scanning context ( #322 )
|
||||||
|
|
||||||
|
## v1.9.6 - 2022-10-26
|
||||||
|
|
||||||
|
### New Features
|
||||||
|
|
||||||
|
- Introduce MapKeyNode interface to limit node types for map key ( #312 )
|
||||||
|
|
||||||
|
### Fix bugs
|
||||||
|
|
||||||
|
- Quote strings with special characters in flow mode ( #270 )
|
||||||
|
- typeError implements PrettyPrinter interface ( #280 )
|
||||||
|
- Fix incorrect const type ( #284 )
|
||||||
|
- Fix large literals type inference on 32 bits ( #293 )
|
||||||
|
- Fix UTF-8 characters ( #294 )
|
||||||
|
- Fix decoding of unknown aliases ( #317 )
|
||||||
|
- Fix stream encoder for insert a separator between each encoded document ( #318 )
|
||||||
|
|
||||||
|
### Update
|
||||||
|
|
||||||
|
- Update golang.org/x/sys ( #289 )
|
||||||
|
- Update Go version in CI ( #295 )
|
||||||
|
- Add test cases for missing keys to struct literals ( #300 )
|
||||||
|
|
||||||
|
## v1.9.5 - 2022-01-12
|
||||||
|
|
||||||
|
### New Features
|
||||||
|
|
||||||
|
* Add UseSingleQuote option ( #265 )
|
||||||
|
|
||||||
|
### Fix bugs
|
||||||
|
|
||||||
|
* Preserve defaults while decoding nested structs ( #260 )
|
||||||
|
* Fix minor typo in decodeInit error ( #264 )
|
||||||
|
* Handle empty sequence entries ( #275 )
|
||||||
|
* Fix encoding of sequence with multiline string ( #276 )
|
||||||
|
* Fix encoding of BytesMarshaler type ( #277 )
|
||||||
|
* Fix indentState logic for multi-line value ( #278 )
|
||||||
|
|
||||||
|
## v1.9.4 - 2021-10-12
|
||||||
|
|
||||||
|
### Fix bugs
|
||||||
|
|
||||||
|
* Keep prev/next reference between tokens containing comments when filtering comment tokens ( #257 )
|
||||||
|
* Supports escaping reserved keywords in PathBuilder ( #258 )
|
||||||
|
|
||||||
|
## v1.9.3 - 2021-09-07
|
||||||
|
|
||||||
|
### New Features
|
||||||
|
|
||||||
|
* Support encoding and decoding `time.Duration` fields ( #246 )
|
||||||
|
* Allow reserved characters for key name in YAMLPath ( #251 )
|
||||||
|
* Support getting YAMLPath from ast.Node ( #252 )
|
||||||
|
* Support CommentToMap option ( #253 )
|
||||||
|
|
||||||
|
### Fix bugs
|
||||||
|
|
||||||
|
* Fix encoding nested sequences with `yaml.IndentSequence` ( #241 )
|
||||||
|
* Fix error reporting on inline structs in strict mode ( #244, #245 )
|
||||||
|
* Fix encoding of large floats ( #247 )
|
||||||
|
|
||||||
|
### Improve workflow
|
||||||
|
|
||||||
|
* Migrate CI from CircleCI to GitHub Action ( #249 )
|
||||||
|
* Add workflow for ycat ( #250 )
|
||||||
|
|
||||||
|
## v1.9.2 - 2021-07-26
|
||||||
|
|
||||||
|
### Support WithComment option ( #238 )
|
||||||
|
|
||||||
|
`yaml.WithComment` is a option for encoding with comment.
|
||||||
|
The position where you want to add a comment is represented by YAMLPath, and it is the key of `yaml.CommentMap`.
|
||||||
|
Also, you can select `Head` comment or `Line` comment as the comment type.
|
||||||
|
|
||||||
|
## v1.9.1 - 2021-07-20
|
||||||
|
|
||||||
|
### Fix DecodeFromNode ( #237 )
|
||||||
|
|
||||||
|
- Fix YAML handling where anchor exists
|
||||||
|
|
||||||
|
## v1.9.0 - 2021-07-19
|
||||||
|
|
||||||
|
### New features
|
||||||
|
|
||||||
|
- Support encoding of comment node ( #233 )
|
||||||
|
- Support `yaml.NodeToValue(ast.Node, interface{}, ...DecodeOption) error` ( #236 )
|
||||||
|
- Can convert a AST node to a value directly
|
||||||
|
|
||||||
|
### Fix decoder for comment
|
||||||
|
|
||||||
|
- Fix parsing of literal with comment ( #234 )
|
||||||
|
|
||||||
|
### Rename API ( #235 )
|
||||||
|
|
||||||
|
- Rename `MarshalWithContext` to `MarshalContext`
|
||||||
|
- Rename `UnmarshalWithContext` to `UnmarshalContext`
|
||||||
|
|
||||||
|
## v1.8.10 - 2021-07-02
|
||||||
|
|
||||||
|
### Fixed bugs
|
||||||
|
|
||||||
|
- Fix searching anchor by alias name ( #212 )
|
||||||
|
- Fixing Issue 186, scanner should account for newline characters when processing multi-line text. Without this source annotations line/column number (for this and all subsequent tokens) is inconsistent with plain text editors. e.g. https://github.com/goccy/go-yaml/issues/186. This addresses the issue specifically for single and double quote text only. ( #210 )
|
||||||
|
- Add error for unterminated flow mapping node ( #213 )
|
||||||
|
- Handle missing required field validation ( #221 )
|
||||||
|
- Nicely format unexpected node type errors ( #229 )
|
||||||
|
- Support to encode map which has defined type key ( #231 )
|
||||||
|
|
||||||
|
### New features
|
||||||
|
|
||||||
|
- Support sequence indentation by EncodeOption ( #232 )
|
||||||
|
|
||||||
|
## v1.8.9 - 2021-03-01
|
||||||
|
|
||||||
|
### Fixed bugs
|
||||||
|
|
||||||
|
- Fix origin buffer for DocumentHeader and DocumentEnd and Directive
|
||||||
|
- Fix origin buffer for anchor value
|
||||||
|
- Fix syntax error about map value
|
||||||
|
- Fix parsing MergeKey ('<<') characters
|
||||||
|
- Fix encoding of float value
|
||||||
|
- Fix incorrect column annotation when single or double quotes are used
|
||||||
|
|
||||||
|
### New features
|
||||||
|
|
||||||
|
- Support to encode/decode of ast.Node directly
|
||||||
21
vendor/github.com/goccy/go-yaml/LICENSE
generated
vendored
Normal file
21
vendor/github.com/goccy/go-yaml/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2019 Masaaki Goshima
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
55
vendor/github.com/goccy/go-yaml/Makefile
generated
vendored
Normal file
55
vendor/github.com/goccy/go-yaml/Makefile
generated
vendored
Normal file
@@ -0,0 +1,55 @@
|
|||||||
|
## Location to install dependencies to
|
||||||
|
LOCALBIN ?= $(shell pwd)/bin
|
||||||
|
TESTMOD := testdata/go_test.mod
|
||||||
|
|
||||||
|
$(LOCALBIN):
|
||||||
|
mkdir -p $(LOCALBIN)
|
||||||
|
|
||||||
|
.PHONY: test
|
||||||
|
test:
|
||||||
|
go test -v -race ./...
|
||||||
|
go test -v -race ./testdata -modfile=$(TESTMOD)
|
||||||
|
|
||||||
|
.PHONY: simple-test
|
||||||
|
simple-test:
|
||||||
|
go test -v ./...
|
||||||
|
go test -v ./testdata -modfile=$(TESTMOD)
|
||||||
|
|
||||||
|
.PHONY: fuzz
|
||||||
|
fuzz:
|
||||||
|
go test -fuzz=Fuzz -fuzztime 60s
|
||||||
|
|
||||||
|
.PHONY: cover
|
||||||
|
cover:
|
||||||
|
go test -coverpkg=.,./ast,./lexer,./parser,./printer,./scanner,./token -coverprofile=cover.out -modfile=$(TESTMOD) ./... ./testdata
|
||||||
|
|
||||||
|
.PHONY: cover-html
|
||||||
|
cover-html: cover
|
||||||
|
go tool cover -html=cover.out
|
||||||
|
|
||||||
|
.PHONY: ycat/build
|
||||||
|
ycat/build: $(LOCALBIN)
|
||||||
|
cd ./cmd/ycat && go build -o $(LOCALBIN)/ycat .
|
||||||
|
|
||||||
|
.PHONY: lint
|
||||||
|
lint: golangci-lint ## Run golangci-lint
|
||||||
|
@$(GOLANGCI_LINT) run
|
||||||
|
|
||||||
|
.PHONY: fmt
|
||||||
|
fmt: golangci-lint ## Ensure consistent code style
|
||||||
|
@go mod tidy
|
||||||
|
@go fmt ./...
|
||||||
|
@$(GOLANGCI_LINT) run --fix
|
||||||
|
|
||||||
|
## Tool Binaries
|
||||||
|
GOLANGCI_LINT ?= $(LOCALBIN)/golangci-lint
|
||||||
|
|
||||||
|
## Tool Versions
|
||||||
|
GOLANGCI_VERSION := 2.1.2
|
||||||
|
|
||||||
|
.PHONY: golangci-lint
|
||||||
|
.PHONY: $(GOLANGCI_LINT)
|
||||||
|
golangci-lint: $(GOLANGCI_LINT) ## Download golangci-lint locally if necessary.
|
||||||
|
$(GOLANGCI_LINT): $(LOCALBIN)
|
||||||
|
@test -s $(LOCALBIN)/golangci-lint && $(LOCALBIN)/golangci-lint version --short | grep -q $(GOLANGCI_VERSION) || \
|
||||||
|
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(LOCALBIN) v$(GOLANGCI_VERSION)
|
||||||
420
vendor/github.com/goccy/go-yaml/README.md
generated
vendored
Normal file
420
vendor/github.com/goccy/go-yaml/README.md
generated
vendored
Normal file
@@ -0,0 +1,420 @@
|
|||||||
|
# YAML support for the Go language
|
||||||
|
|
||||||
|
[](https://pkg.go.dev/github.com/goccy/go-yaml)
|
||||||
|

|
||||||
|
[](https://codecov.io/gh/goccy/go-yaml)
|
||||||
|
[](https://goreportcard.com/report/github.com/goccy/go-yaml)
|
||||||
|
|
||||||
|
<img width="300px" src="https://user-images.githubusercontent.com/209884/67159116-64d94b80-f37b-11e9-9b28-f8379636a43c.png"></img>
|
||||||
|
|
||||||
|
## This library has **NO** relation to the go-yaml/yaml library
|
||||||
|
|
||||||
|
> [!IMPORTANT]
|
||||||
|
> This library is developed from scratch to replace [`go-yaml/yaml`](https://github.com/go-yaml/yaml).
|
||||||
|
> If you're looking for a better YAML library, this one should be helpful.
|
||||||
|
|
||||||
|
# Why a new library?
|
||||||
|
|
||||||
|
As of this writing, there already exists a de facto standard library for YAML processing for Go: [https://github.com/go-yaml/yaml](https://github.com/go-yaml/yaml). However, we believe that a new YAML library is necessary for the following reasons:
|
||||||
|
|
||||||
|
- Not actively maintained
|
||||||
|
- `go-yaml/yaml` has ported the libyaml written in C to Go, so the source code is not written in Go style
|
||||||
|
- There is a lot of content that cannot be parsed
|
||||||
|
- YAML is often used for configuration, and it is common to include validation along with it. However, the errors in `go-yaml/yaml` are not intuitive, and it is difficult to provide meaningful validation errors
|
||||||
|
- When creating tools that use YAML, there are cases where reversible transformation of YAML is required. However, to perform reversible transformations of content that includes Comments or Anchors/Aliases, manipulating the AST is the only option
|
||||||
|
- Non-intuitive [Marshaler](https://pkg.go.dev/gopkg.in/yaml.v3#Marshaler) / [Unmarshaler](https://pkg.go.dev/gopkg.in/yaml.v3#Unmarshaler)
|
||||||
|
|
||||||
|
By the way, libraries such as [ghodss/yaml](https://github.com/ghodss/yaml) and [sigs.k8s.io/yaml](https://github.com/kubernetes-sigs/yaml) also depend on go-yaml/yaml, so if you are using these libraries, the same issues apply: they cannot parse things that go-yaml/yaml cannot parse, and they inherit many of the problems that go-yaml/yaml has.
|
||||||
|
|
||||||
|
# Features
|
||||||
|
|
||||||
|
- No dependencies
|
||||||
|
- A better parser than `go-yaml/yaml`.
|
||||||
|
- [Support recursive processing](https://github.com/apple/device-management/blob/release/docs/schema.yaml)
|
||||||
|
- Higher coverage in the [YAML Test Suite](https://github.com/yaml/yaml-test-suite?tab=readme-ov-file)
|
||||||
|
- YAML Test Suite consists of 402 cases in total, of which `gopkg.in/yaml.v3` passes `295`. In addition to passing all those test cases, `goccy/go-yaml` successfully passes nearly 60 additional test cases ( 2024/12/15 )
|
||||||
|
- The test code is [here](https://github.com/goccy/go-yaml/blob/master/yaml_test_suite_test.go#L77)
|
||||||
|
- Ease and sustainability of maintenance
|
||||||
|
- The main maintainer is [@goccy](https://github.com/goccy), but we are also building a system to develop as a team with trusted developers
|
||||||
|
- Since it is written from scratch, the code is easy to read for Gophers
|
||||||
|
- An API structure that allows the use of not only `Encoder`/`Decoder` but also `Tokenizer` and `Parser` functionalities.
|
||||||
|
- [lexer.Tokenize](https://pkg.go.dev/github.com/goccy/go-yaml@v1.15.4/lexer#Tokenize)
|
||||||
|
- [parser.Parse](https://pkg.go.dev/github.com/goccy/go-yaml@v1.15.4/parser#Parse)
|
||||||
|
- Filtering, replacing, and merging YAML content using YAML Path
|
||||||
|
- Reversible transformation without using the AST for YAML that includes Anchors, Aliases, and Comments
|
||||||
|
- Customize the Marshal/Unmarshal behavior for primitive types and third-party library types ([RegisterCustomMarshaler](https://pkg.go.dev/github.com/goccy/go-yaml#RegisterCustomMarshaler), [RegisterCustomUnmarshaler](https://pkg.go.dev/github.com/goccy/go-yaml#RegisterCustomUnmarshaler))
|
||||||
|
- Respects `encoding/json` behavior
|
||||||
|
- Accept the `json` tag. Note that not all options from the `json` tag will have significance when parsing YAML documents. If both tags exist, `yaml` tag will take precedence.
|
||||||
|
- [json.Marshaler](https://pkg.go.dev/encoding/json#Marshaler) style [marshaler](https://pkg.go.dev/github.com/goccy/go-yaml#BytesMarshaler)
|
||||||
|
- [json.Unmarshaler](https://pkg.go.dev/encoding/json#Unmarshaler) style [unmarshaler](https://pkg.go.dev/github.com/goccy/go-yaml#BytesUnmarshaler)
|
||||||
|
- Options for using `MarshalJSON` and `UnmarshalJSON` ([UseJSONMarshaler](https://pkg.go.dev/github.com/goccy/go-yaml#UseJSONMarshaler), [UseJSONUnmarshaler](https://pkg.go.dev/github.com/goccy/go-yaml#UseJSONUnmarshaler))
|
||||||
|
- Pretty format for error notifications
|
||||||
|
- Smart validation processing combined with [go-playground/validator](https://github.com/go-playground/validator)
|
||||||
|
- [example test code is here](https://github.com/goccy/go-yaml/blob/45889c98b0a0967240eb595a1bd6896e2f575106/testdata/validate_test.go#L12)
|
||||||
|
- Allow referencing elements declared in another file via anchors
|
||||||
|
|
||||||
|
# Users
|
||||||
|
|
||||||
|
The repositories that use goccy/go-yaml are listed here.
|
||||||
|
|
||||||
|
- https://github.com/goccy/go-yaml/wiki/Users
|
||||||
|
|
||||||
|
The source data is [here](https://github.com/goccy/go-yaml/network/dependents).
|
||||||
|
It is already being used in many repositories. Now it's your turn 😄
|
||||||
|
|
||||||
|
# Playground
|
||||||
|
|
||||||
|
The Playground visualizes how go-yaml processes YAML text. Use it to assist with your debugging or issue reporting.
|
||||||
|
|
||||||
|
https://goccy.github.io/go-yaml
|
||||||
|
|
||||||
|
# Installation
|
||||||
|
|
||||||
|
```sh
|
||||||
|
go get github.com/goccy/go-yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
# Synopsis
|
||||||
|
|
||||||
|
## 1. Simple Encode/Decode
|
||||||
|
|
||||||
|
Has an interface like `go-yaml/yaml` using `reflect`
|
||||||
|
|
||||||
|
```go
|
||||||
|
var v struct {
|
||||||
|
A int
|
||||||
|
B string
|
||||||
|
}
|
||||||
|
v.A = 1
|
||||||
|
v.B = "hello"
|
||||||
|
bytes, err := yaml.Marshal(v)
|
||||||
|
if err != nil {
|
||||||
|
//...
|
||||||
|
}
|
||||||
|
fmt.Println(string(bytes)) // "a: 1\nb: hello\n"
|
||||||
|
```
|
||||||
|
|
||||||
|
```go
|
||||||
|
yml := `
|
||||||
|
%YAML 1.2
|
||||||
|
---
|
||||||
|
a: 1
|
||||||
|
b: c
|
||||||
|
`
|
||||||
|
var v struct {
|
||||||
|
A int
|
||||||
|
B string
|
||||||
|
}
|
||||||
|
if err := yaml.Unmarshal([]byte(yml), &v); err != nil {
|
||||||
|
//...
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
To control marshal/unmarshal behavior, you can use the `yaml` tag.
|
||||||
|
|
||||||
|
```go
|
||||||
|
yml := `---
|
||||||
|
foo: 1
|
||||||
|
bar: c
|
||||||
|
`
|
||||||
|
var v struct {
|
||||||
|
A int `yaml:"foo"`
|
||||||
|
B string `yaml:"bar"`
|
||||||
|
}
|
||||||
|
if err := yaml.Unmarshal([]byte(yml), &v); err != nil {
|
||||||
|
//...
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
For convenience, we also accept the `json` tag. Note that not all options from
|
||||||
|
the `json` tag will have significance when parsing YAML documents. If both
|
||||||
|
tags exist, `yaml` tag will take precedence.
|
||||||
|
|
||||||
|
```go
|
||||||
|
yml := `---
|
||||||
|
foo: 1
|
||||||
|
bar: c
|
||||||
|
`
|
||||||
|
var v struct {
|
||||||
|
A int `json:"foo"`
|
||||||
|
B string `json:"bar"`
|
||||||
|
}
|
||||||
|
if err := yaml.Unmarshal([]byte(yml), &v); err != nil {
|
||||||
|
//...
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
For custom marshal/unmarshaling, implement either `Bytes` or `Interface` variant of marshaler/unmarshaler. The difference is that while `BytesMarshaler`/`BytesUnmarshaler` behaves like [`encoding/json`](https://pkg.go.dev/encoding/json) and `InterfaceMarshaler`/`InterfaceUnmarshaler` behaves like [`gopkg.in/yaml.v2`](https://pkg.go.dev/gopkg.in/yaml.v2).
|
||||||
|
|
||||||
|
Semantically both are the same, but they differ in performance. Because indentation matters in YAML, you cannot simply accept a valid YAML fragment from a Marshaler, and expect it to work when it is attached to the parent container's serialized form. Therefore when we receive use the `BytesMarshaler`, which returns `[]byte`, we must decode it once to figure out how to make it work in the given context. If you use the `InterfaceMarshaler`, we can skip the decoding.
|
||||||
|
|
||||||
|
If you are repeatedly marshaling complex objects, the latter is always better
|
||||||
|
performance wise. But if you are, for example, just providing a choice between
|
||||||
|
a config file format that is read only once, the former is probably easier to
|
||||||
|
code.
|
||||||
|
|
||||||
|
## 2. Reference elements declared in another file
|
||||||
|
|
||||||
|
`testdata` directory contains `anchor.yml` file:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
├── testdata
|
||||||
|
└── anchor.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
And `anchor.yml` is defined as follows:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
a: &a
|
||||||
|
b: 1
|
||||||
|
c: hello
|
||||||
|
```
|
||||||
|
|
||||||
|
Then, if `yaml.ReferenceDirs("testdata")` option is passed to `yaml.Decoder`,
|
||||||
|
`Decoder` tries to find the anchor definition from YAML files the under `testdata` directory.
|
||||||
|
|
||||||
|
```go
|
||||||
|
buf := bytes.NewBufferString("a: *a\n")
|
||||||
|
dec := yaml.NewDecoder(buf, yaml.ReferenceDirs("testdata"))
|
||||||
|
var v struct {
|
||||||
|
A struct {
|
||||||
|
B int
|
||||||
|
C string
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := dec.Decode(&v); err != nil {
|
||||||
|
//...
|
||||||
|
}
|
||||||
|
fmt.Printf("%+v\n", v) // {A:{B:1 C:hello}}
|
||||||
|
```
|
||||||
|
|
||||||
|
## 3. Encode with `Anchor` and `Alias`
|
||||||
|
|
||||||
|
### 3.1. Explicitly declared `Anchor` name and `Alias` name
|
||||||
|
|
||||||
|
If you want to use `anchor`, you can define it as a struct tag.
|
||||||
|
If the value specified for an anchor is a pointer type and the same address as the pointer is found, the value is automatically set to alias.
|
||||||
|
If an explicit alias name is specified, an error is raised if its value is different from the value specified in the anchor.
|
||||||
|
|
||||||
|
```go
|
||||||
|
type T struct {
|
||||||
|
A int
|
||||||
|
B string
|
||||||
|
}
|
||||||
|
var v struct {
|
||||||
|
C *T `yaml:"c,anchor=x"`
|
||||||
|
D *T `yaml:"d,alias=x"`
|
||||||
|
}
|
||||||
|
v.C = &T{A: 1, B: "hello"}
|
||||||
|
v.D = v.C
|
||||||
|
bytes, err := yaml.Marshal(v)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
fmt.Println(string(bytes))
|
||||||
|
/*
|
||||||
|
c: &x
|
||||||
|
a: 1
|
||||||
|
b: hello
|
||||||
|
d: *x
|
||||||
|
*/
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3.2. Implicitly declared `Anchor` and `Alias` names
|
||||||
|
|
||||||
|
If you do not explicitly declare the anchor name, the default behavior is to
|
||||||
|
use the equivalent of `strings.ToLower($FieldName)` as the name of the anchor.
|
||||||
|
If the value specified for an anchor is a pointer type and the same address as the pointer is found, the value is automatically set to alias.
|
||||||
|
|
||||||
|
```go
|
||||||
|
type T struct {
|
||||||
|
I int
|
||||||
|
S string
|
||||||
|
}
|
||||||
|
var v struct {
|
||||||
|
A *T `yaml:"a,anchor"`
|
||||||
|
B *T `yaml:"b,anchor"`
|
||||||
|
C *T `yaml:"c"`
|
||||||
|
D *T `yaml:"d"`
|
||||||
|
}
|
||||||
|
v.A = &T{I: 1, S: "hello"}
|
||||||
|
v.B = &T{I: 2, S: "world"}
|
||||||
|
v.C = v.A // C has same pointer address to A
|
||||||
|
v.D = v.B // D has same pointer address to B
|
||||||
|
bytes, err := yaml.Marshal(v)
|
||||||
|
if err != nil {
|
||||||
|
//...
|
||||||
|
}
|
||||||
|
fmt.Println(string(bytes))
|
||||||
|
/*
|
||||||
|
a: &a
|
||||||
|
i: 1
|
||||||
|
s: hello
|
||||||
|
b: &b
|
||||||
|
i: 2
|
||||||
|
s: world
|
||||||
|
c: *a
|
||||||
|
d: *b
|
||||||
|
*/
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3.3 MergeKey and Alias
|
||||||
|
|
||||||
|
Merge key and alias ( `<<: *alias` ) can be used by embedding a structure with the `inline,alias` tag.
|
||||||
|
|
||||||
|
```go
|
||||||
|
type Person struct {
|
||||||
|
*Person `yaml:",omitempty,inline,alias"` // embed Person type for default value
|
||||||
|
Name string `yaml:",omitempty"`
|
||||||
|
Age int `yaml:",omitempty"`
|
||||||
|
}
|
||||||
|
defaultPerson := &Person{
|
||||||
|
Name: "John Smith",
|
||||||
|
Age: 20,
|
||||||
|
}
|
||||||
|
people := []*Person{
|
||||||
|
{
|
||||||
|
Person: defaultPerson, // assign default value
|
||||||
|
Name: "Ken", // override Name property
|
||||||
|
Age: 10, // override Age property
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Person: defaultPerson, // assign default value only
|
||||||
|
},
|
||||||
|
}
|
||||||
|
var doc struct {
|
||||||
|
Default *Person `yaml:"default,anchor"`
|
||||||
|
People []*Person `yaml:"people"`
|
||||||
|
}
|
||||||
|
doc.Default = defaultPerson
|
||||||
|
doc.People = people
|
||||||
|
bytes, err := yaml.Marshal(doc)
|
||||||
|
if err != nil {
|
||||||
|
//...
|
||||||
|
}
|
||||||
|
fmt.Println(string(bytes))
|
||||||
|
/*
|
||||||
|
default: &default
|
||||||
|
name: John Smith
|
||||||
|
age: 20
|
||||||
|
people:
|
||||||
|
- <<: *default
|
||||||
|
name: Ken
|
||||||
|
age: 10
|
||||||
|
- <<: *default
|
||||||
|
*/
|
||||||
|
```
|
||||||
|
|
||||||
|
## 4. Pretty Formatted Errors
|
||||||
|
|
||||||
|
Error values produced during parsing have two extra features over regular
|
||||||
|
error values.
|
||||||
|
|
||||||
|
First, by default, they contain extra information on the location of the error
|
||||||
|
from the source YAML document, to make it easier to find the error location.
|
||||||
|
|
||||||
|
Second, the error messages can optionally be colorized.
|
||||||
|
|
||||||
|
If you would like to control exactly how the output looks like, consider
|
||||||
|
using `yaml.FormatError`, which accepts two boolean values to
|
||||||
|
control turning these features on or off.
|
||||||
|
|
||||||
|
<img src="https://user-images.githubusercontent.com/209884/67358124-587f0980-f59a-11e9-96fc-7205aab77695.png"></img>
|
||||||
|
|
||||||
|
## 5. Use YAMLPath
|
||||||
|
|
||||||
|
```go
|
||||||
|
yml := `
|
||||||
|
store:
|
||||||
|
book:
|
||||||
|
- author: john
|
||||||
|
price: 10
|
||||||
|
- author: ken
|
||||||
|
price: 12
|
||||||
|
bicycle:
|
||||||
|
color: red
|
||||||
|
price: 19.95
|
||||||
|
`
|
||||||
|
path, err := yaml.PathString("$.store.book[*].author")
|
||||||
|
if err != nil {
|
||||||
|
//...
|
||||||
|
}
|
||||||
|
var authors []string
|
||||||
|
if err := path.Read(strings.NewReader(yml), &authors); err != nil {
|
||||||
|
//...
|
||||||
|
}
|
||||||
|
fmt.Println(authors)
|
||||||
|
// [john ken]
|
||||||
|
```
|
||||||
|
|
||||||
|
### 5.1 Print customized error with YAML source code
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/goccy/go-yaml"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
yml := `
|
||||||
|
a: 1
|
||||||
|
b: "hello"
|
||||||
|
`
|
||||||
|
var v struct {
|
||||||
|
A int
|
||||||
|
B string
|
||||||
|
}
|
||||||
|
if err := yaml.Unmarshal([]byte(yml), &v); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
if v.A != 2 {
|
||||||
|
// output error with YAML source
|
||||||
|
path, err := yaml.PathString("$.a")
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
source, err := path.AnnotateSource([]byte(yml), true)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
fmt.Printf("a value expected 2 but actual %d:\n%s\n", v.A, string(source))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
output result is the following:
|
||||||
|
|
||||||
|
<img src="https://user-images.githubusercontent.com/209884/84148813-7aca8680-aa9a-11ea-8fc9-37dece2ebdac.png"></img>
|
||||||
|
|
||||||
|
|
||||||
|
# Tools
|
||||||
|
|
||||||
|
## ycat
|
||||||
|
|
||||||
|
print yaml file with color
|
||||||
|
|
||||||
|
<img width="713" alt="ycat" src="https://user-images.githubusercontent.com/209884/66986084-19b00600-f0f9-11e9-9f0e-1f91eb072fe0.png">
|
||||||
|
|
||||||
|
### Installation
|
||||||
|
|
||||||
|
```sh
|
||||||
|
git clone https://github.com/goccy/go-yaml.git
|
||||||
|
cd go-yaml/cmd/ycat && go install .
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
# For Developers
|
||||||
|
|
||||||
|
> [!NOTE]
|
||||||
|
> In this project, we manage such test code under the `testdata` directory to avoid adding dependencies on libraries that are only needed for testing to the top `go.mod` file. Therefore, if you want to add test cases that use 3rd party libraries, please add the test code to the `testdata` directory.
|
||||||
|
|
||||||
|
# Looking for Sponsors
|
||||||
|
|
||||||
|
I'm looking for sponsors this library. This library is being developed as a personal project in my spare time. If you want a quick response or problem resolution when using this library in your project, please register as a [sponsor](https://github.com/sponsors/goccy). I will cooperate as much as possible. Of course, this library is developed as an MIT license, so you can use it freely for free.
|
||||||
|
|
||||||
|
# License
|
||||||
|
|
||||||
|
MIT
|
||||||
2381
vendor/github.com/goccy/go-yaml/ast/ast.go
generated
vendored
Normal file
2381
vendor/github.com/goccy/go-yaml/ast/ast.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
37
vendor/github.com/goccy/go-yaml/context.go
generated
vendored
Normal file
37
vendor/github.com/goccy/go-yaml/context.go
generated
vendored
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
package yaml
|
||||||
|
|
||||||
|
import "context"
|
||||||
|
|
||||||
|
type (
|
||||||
|
ctxMergeKey struct{}
|
||||||
|
ctxAnchorKey struct{}
|
||||||
|
)
|
||||||
|
|
||||||
|
func withMerge(ctx context.Context) context.Context {
|
||||||
|
return context.WithValue(ctx, ctxMergeKey{}, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func isMerge(ctx context.Context) bool {
|
||||||
|
v, ok := ctx.Value(ctxMergeKey{}).(bool)
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
func withAnchor(ctx context.Context, name string) context.Context {
|
||||||
|
anchorMap := getAnchorMap(ctx)
|
||||||
|
if anchorMap == nil {
|
||||||
|
anchorMap = make(map[string]struct{})
|
||||||
|
}
|
||||||
|
anchorMap[name] = struct{}{}
|
||||||
|
return context.WithValue(ctx, ctxAnchorKey{}, anchorMap)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getAnchorMap(ctx context.Context) map[string]struct{} {
|
||||||
|
v, ok := ctx.Value(ctxAnchorKey{}).(map[string]struct{})
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
2037
vendor/github.com/goccy/go-yaml/decode.go
generated
vendored
Normal file
2037
vendor/github.com/goccy/go-yaml/decode.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1074
vendor/github.com/goccy/go-yaml/encode.go
generated
vendored
Normal file
1074
vendor/github.com/goccy/go-yaml/encode.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
77
vendor/github.com/goccy/go-yaml/error.go
generated
vendored
Normal file
77
vendor/github.com/goccy/go-yaml/error.go
generated
vendored
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
package yaml
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/goccy/go-yaml/ast"
|
||||||
|
"github.com/goccy/go-yaml/internal/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrInvalidQuery = errors.New("invalid query")
|
||||||
|
ErrInvalidPath = errors.New("invalid path instance")
|
||||||
|
ErrInvalidPathString = errors.New("invalid path string")
|
||||||
|
ErrNotFoundNode = errors.New("node not found")
|
||||||
|
ErrUnknownCommentPositionType = errors.New("unknown comment position type")
|
||||||
|
ErrInvalidCommentMapValue = errors.New("invalid comment map value. it must be not nil value")
|
||||||
|
ErrDecodeRequiredPointerType = errors.New("required pointer type value")
|
||||||
|
ErrExceededMaxDepth = errors.New("exceeded max depth")
|
||||||
|
FormatErrorWithToken = errors.FormatError
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
SyntaxError = errors.SyntaxError
|
||||||
|
TypeError = errors.TypeError
|
||||||
|
OverflowError = errors.OverflowError
|
||||||
|
DuplicateKeyError = errors.DuplicateKeyError
|
||||||
|
UnknownFieldError = errors.UnknownFieldError
|
||||||
|
UnexpectedNodeTypeError = errors.UnexpectedNodeTypeError
|
||||||
|
Error = errors.Error
|
||||||
|
)
|
||||||
|
|
||||||
|
func ErrUnsupportedHeadPositionType(node ast.Node) error {
|
||||||
|
return fmt.Errorf("unsupported comment head position for %s", node.Type())
|
||||||
|
}
|
||||||
|
|
||||||
|
func ErrUnsupportedLinePositionType(node ast.Node) error {
|
||||||
|
return fmt.Errorf("unsupported comment line position for %s", node.Type())
|
||||||
|
}
|
||||||
|
|
||||||
|
func ErrUnsupportedFootPositionType(node ast.Node) error {
|
||||||
|
return fmt.Errorf("unsupported comment foot position for %s", node.Type())
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsInvalidQueryError whether err is ErrInvalidQuery or not.
|
||||||
|
func IsInvalidQueryError(err error) bool {
|
||||||
|
return errors.Is(err, ErrInvalidQuery)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsInvalidPathError whether err is ErrInvalidPath or not.
|
||||||
|
func IsInvalidPathError(err error) bool {
|
||||||
|
return errors.Is(err, ErrInvalidPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsInvalidPathStringError whether err is ErrInvalidPathString or not.
|
||||||
|
func IsInvalidPathStringError(err error) bool {
|
||||||
|
return errors.Is(err, ErrInvalidPathString)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsNotFoundNodeError whether err is ErrNotFoundNode or not.
|
||||||
|
func IsNotFoundNodeError(err error) bool {
|
||||||
|
return errors.Is(err, ErrNotFoundNode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsInvalidTokenTypeError whether err is ast.ErrInvalidTokenType or not.
|
||||||
|
func IsInvalidTokenTypeError(err error) bool {
|
||||||
|
return errors.Is(err, ast.ErrInvalidTokenType)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsInvalidAnchorNameError whether err is ast.ErrInvalidAnchorName or not.
|
||||||
|
func IsInvalidAnchorNameError(err error) bool {
|
||||||
|
return errors.Is(err, ast.ErrInvalidAnchorName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsInvalidAliasNameError whether err is ast.ErrInvalidAliasName or not.
|
||||||
|
func IsInvalidAliasNameError(err error) bool {
|
||||||
|
return errors.Is(err, ast.ErrInvalidAliasName)
|
||||||
|
}
|
||||||
246
vendor/github.com/goccy/go-yaml/internal/errors/error.go
generated
vendored
Normal file
246
vendor/github.com/goccy/go-yaml/internal/errors/error.go
generated
vendored
Normal file
@@ -0,0 +1,246 @@
|
|||||||
|
package errors
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
"github.com/goccy/go-yaml/ast"
|
||||||
|
"github.com/goccy/go-yaml/printer"
|
||||||
|
"github.com/goccy/go-yaml/token"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
As = errors.As
|
||||||
|
Is = errors.Is
|
||||||
|
New = errors.New
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
defaultFormatColor = false
|
||||||
|
defaultIncludeSource = true
|
||||||
|
)
|
||||||
|
|
||||||
|
type Error interface {
|
||||||
|
error
|
||||||
|
GetToken() *token.Token
|
||||||
|
GetMessage() string
|
||||||
|
FormatError(bool, bool) string
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
_ Error = new(SyntaxError)
|
||||||
|
_ Error = new(TypeError)
|
||||||
|
_ Error = new(OverflowError)
|
||||||
|
_ Error = new(DuplicateKeyError)
|
||||||
|
_ Error = new(UnknownFieldError)
|
||||||
|
_ Error = new(UnexpectedNodeTypeError)
|
||||||
|
)
|
||||||
|
|
||||||
|
type SyntaxError struct {
|
||||||
|
Message string
|
||||||
|
Token *token.Token
|
||||||
|
}
|
||||||
|
|
||||||
|
type TypeError struct {
|
||||||
|
DstType reflect.Type
|
||||||
|
SrcType reflect.Type
|
||||||
|
StructFieldName *string
|
||||||
|
Token *token.Token
|
||||||
|
}
|
||||||
|
|
||||||
|
type OverflowError struct {
|
||||||
|
DstType reflect.Type
|
||||||
|
SrcNum string
|
||||||
|
Token *token.Token
|
||||||
|
}
|
||||||
|
|
||||||
|
type DuplicateKeyError struct {
|
||||||
|
Message string
|
||||||
|
Token *token.Token
|
||||||
|
}
|
||||||
|
|
||||||
|
type UnknownFieldError struct {
|
||||||
|
Message string
|
||||||
|
Token *token.Token
|
||||||
|
}
|
||||||
|
|
||||||
|
type UnexpectedNodeTypeError struct {
|
||||||
|
Actual ast.NodeType
|
||||||
|
Expected ast.NodeType
|
||||||
|
Token *token.Token
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrSyntax create syntax error instance with message and token
|
||||||
|
func ErrSyntax(msg string, tk *token.Token) *SyntaxError {
|
||||||
|
return &SyntaxError{
|
||||||
|
Message: msg,
|
||||||
|
Token: tk,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrOverflow creates an overflow error instance with message and a token.
|
||||||
|
func ErrOverflow(dstType reflect.Type, num string, tk *token.Token) *OverflowError {
|
||||||
|
return &OverflowError{
|
||||||
|
DstType: dstType,
|
||||||
|
SrcNum: num,
|
||||||
|
Token: tk,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrTypeMismatch cerates an type mismatch error instance with token.
|
||||||
|
func ErrTypeMismatch(dstType, srcType reflect.Type, token *token.Token) *TypeError {
|
||||||
|
return &TypeError{
|
||||||
|
DstType: dstType,
|
||||||
|
SrcType: srcType,
|
||||||
|
Token: token,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrDuplicateKey creates an duplicate key error instance with token.
|
||||||
|
func ErrDuplicateKey(msg string, tk *token.Token) *DuplicateKeyError {
|
||||||
|
return &DuplicateKeyError{
|
||||||
|
Message: msg,
|
||||||
|
Token: tk,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrUnknownField creates an unknown field error instance with token.
|
||||||
|
func ErrUnknownField(msg string, tk *token.Token) *UnknownFieldError {
|
||||||
|
return &UnknownFieldError{
|
||||||
|
Message: msg,
|
||||||
|
Token: tk,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ErrUnexpectedNodeType(actual, expected ast.NodeType, tk *token.Token) *UnexpectedNodeTypeError {
|
||||||
|
return &UnexpectedNodeTypeError{
|
||||||
|
Actual: actual,
|
||||||
|
Expected: expected,
|
||||||
|
Token: tk,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *SyntaxError) GetMessage() string {
|
||||||
|
return e.Message
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *SyntaxError) GetToken() *token.Token {
|
||||||
|
return e.Token
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *SyntaxError) Error() string {
|
||||||
|
return e.FormatError(defaultFormatColor, defaultIncludeSource)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *SyntaxError) FormatError(colored, inclSource bool) string {
|
||||||
|
return FormatError(e.Message, e.Token, colored, inclSource)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *OverflowError) GetMessage() string {
|
||||||
|
return e.msg()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *OverflowError) GetToken() *token.Token {
|
||||||
|
return e.Token
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *OverflowError) Error() string {
|
||||||
|
return e.FormatError(defaultFormatColor, defaultIncludeSource)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *OverflowError) FormatError(colored, inclSource bool) string {
|
||||||
|
return FormatError(e.msg(), e.Token, colored, inclSource)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *OverflowError) msg() string {
|
||||||
|
return fmt.Sprintf("cannot unmarshal %s into Go value of type %s ( overflow )", e.SrcNum, e.DstType)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *TypeError) msg() string {
|
||||||
|
if e.StructFieldName != nil {
|
||||||
|
return fmt.Sprintf("cannot unmarshal %s into Go struct field %s of type %s", e.SrcType, *e.StructFieldName, e.DstType)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("cannot unmarshal %s into Go value of type %s", e.SrcType, e.DstType)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *TypeError) GetMessage() string {
|
||||||
|
return e.msg()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *TypeError) GetToken() *token.Token {
|
||||||
|
return e.Token
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *TypeError) Error() string {
|
||||||
|
return e.FormatError(defaultFormatColor, defaultIncludeSource)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *TypeError) FormatError(colored, inclSource bool) string {
|
||||||
|
return FormatError(e.msg(), e.Token, colored, inclSource)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *DuplicateKeyError) GetMessage() string {
|
||||||
|
return e.Message
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *DuplicateKeyError) GetToken() *token.Token {
|
||||||
|
return e.Token
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *DuplicateKeyError) Error() string {
|
||||||
|
return e.FormatError(defaultFormatColor, defaultIncludeSource)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *DuplicateKeyError) FormatError(colored, inclSource bool) string {
|
||||||
|
return FormatError(e.Message, e.Token, colored, inclSource)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *UnknownFieldError) GetMessage() string {
|
||||||
|
return e.Message
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *UnknownFieldError) GetToken() *token.Token {
|
||||||
|
return e.Token
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *UnknownFieldError) Error() string {
|
||||||
|
return e.FormatError(defaultFormatColor, defaultIncludeSource)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *UnknownFieldError) FormatError(colored, inclSource bool) string {
|
||||||
|
return FormatError(e.Message, e.Token, colored, inclSource)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *UnexpectedNodeTypeError) GetMessage() string {
|
||||||
|
return e.msg()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *UnexpectedNodeTypeError) GetToken() *token.Token {
|
||||||
|
return e.Token
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *UnexpectedNodeTypeError) Error() string {
|
||||||
|
return e.FormatError(defaultFormatColor, defaultIncludeSource)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *UnexpectedNodeTypeError) FormatError(colored, inclSource bool) string {
|
||||||
|
return FormatError(e.msg(), e.Token, colored, inclSource)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *UnexpectedNodeTypeError) msg() string {
|
||||||
|
return fmt.Sprintf("%s was used where %s is expected", e.Actual.YAMLName(), e.Expected.YAMLName())
|
||||||
|
}
|
||||||
|
|
||||||
|
func FormatError(errMsg string, token *token.Token, colored, inclSource bool) string {
|
||||||
|
var pp printer.Printer
|
||||||
|
if token == nil {
|
||||||
|
return pp.PrintErrorMessage(errMsg, colored)
|
||||||
|
}
|
||||||
|
pos := fmt.Sprintf("[%d:%d] ", token.Position.Line, token.Position.Column)
|
||||||
|
msg := pp.PrintErrorMessage(fmt.Sprintf("%s%s", pos, errMsg), colored)
|
||||||
|
if inclSource {
|
||||||
|
msg += "\n" + pp.PrintErrorToken(token, colored)
|
||||||
|
}
|
||||||
|
return msg
|
||||||
|
}
|
||||||
541
vendor/github.com/goccy/go-yaml/internal/format/format.go
generated
vendored
Normal file
541
vendor/github.com/goccy/go-yaml/internal/format/format.go
generated
vendored
Normal file
@@ -0,0 +1,541 @@
|
|||||||
|
package format
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/goccy/go-yaml/ast"
|
||||||
|
"github.com/goccy/go-yaml/token"
|
||||||
|
)
|
||||||
|
|
||||||
|
func FormatNodeWithResolvedAlias(n ast.Node, anchorNodeMap map[string]ast.Node) string {
|
||||||
|
tk := getFirstToken(n)
|
||||||
|
if tk == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
formatter := newFormatter(tk, hasComment(n))
|
||||||
|
formatter.anchorNodeMap = anchorNodeMap
|
||||||
|
return formatter.format(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func FormatNode(n ast.Node) string {
|
||||||
|
tk := getFirstToken(n)
|
||||||
|
if tk == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return newFormatter(tk, hasComment(n)).format(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func FormatFile(file *ast.File) string {
|
||||||
|
if len(file.Docs) == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
tk := getFirstToken(file.Docs[0])
|
||||||
|
if tk == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return newFormatter(tk, hasCommentFile(file)).formatFile(file)
|
||||||
|
}
|
||||||
|
|
||||||
|
func hasCommentFile(f *ast.File) bool {
|
||||||
|
for _, doc := range f.Docs {
|
||||||
|
if hasComment(doc.Body) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func hasComment(n ast.Node) bool {
|
||||||
|
if n == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
switch nn := n.(type) {
|
||||||
|
case *ast.DocumentNode:
|
||||||
|
return hasComment(nn.Body)
|
||||||
|
case *ast.NullNode:
|
||||||
|
return nn.Comment != nil
|
||||||
|
case *ast.BoolNode:
|
||||||
|
return nn.Comment != nil
|
||||||
|
case *ast.IntegerNode:
|
||||||
|
return nn.Comment != nil
|
||||||
|
case *ast.FloatNode:
|
||||||
|
return nn.Comment != nil
|
||||||
|
case *ast.StringNode:
|
||||||
|
return nn.Comment != nil
|
||||||
|
case *ast.InfinityNode:
|
||||||
|
return nn.Comment != nil
|
||||||
|
case *ast.NanNode:
|
||||||
|
return nn.Comment != nil
|
||||||
|
case *ast.LiteralNode:
|
||||||
|
return nn.Comment != nil
|
||||||
|
case *ast.DirectiveNode:
|
||||||
|
if nn.Comment != nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
for _, value := range nn.Values {
|
||||||
|
if hasComment(value) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case *ast.TagNode:
|
||||||
|
if nn.Comment != nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return hasComment(nn.Value)
|
||||||
|
case *ast.MappingNode:
|
||||||
|
if nn.Comment != nil || nn.FootComment != nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
for _, value := range nn.Values {
|
||||||
|
if value.Comment != nil || value.FootComment != nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if hasComment(value.Key) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if hasComment(value.Value) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case *ast.MappingKeyNode:
|
||||||
|
return nn.Comment != nil
|
||||||
|
case *ast.MergeKeyNode:
|
||||||
|
return nn.Comment != nil
|
||||||
|
case *ast.SequenceNode:
|
||||||
|
if nn.Comment != nil || nn.FootComment != nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
for _, entry := range nn.Entries {
|
||||||
|
if entry.Comment != nil || entry.HeadComment != nil || entry.LineComment != nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if hasComment(entry.Value) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case *ast.AnchorNode:
|
||||||
|
if nn.Comment != nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if hasComment(nn.Name) || hasComment(nn.Value) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
case *ast.AliasNode:
|
||||||
|
if nn.Comment != nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if hasComment(nn.Value) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func getFirstToken(n ast.Node) *token.Token {
|
||||||
|
if n == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
switch nn := n.(type) {
|
||||||
|
case *ast.DocumentNode:
|
||||||
|
if nn.Start != nil {
|
||||||
|
return nn.Start
|
||||||
|
}
|
||||||
|
return getFirstToken(nn.Body)
|
||||||
|
case *ast.NullNode:
|
||||||
|
return nn.Token
|
||||||
|
case *ast.BoolNode:
|
||||||
|
return nn.Token
|
||||||
|
case *ast.IntegerNode:
|
||||||
|
return nn.Token
|
||||||
|
case *ast.FloatNode:
|
||||||
|
return nn.Token
|
||||||
|
case *ast.StringNode:
|
||||||
|
return nn.Token
|
||||||
|
case *ast.InfinityNode:
|
||||||
|
return nn.Token
|
||||||
|
case *ast.NanNode:
|
||||||
|
return nn.Token
|
||||||
|
case *ast.LiteralNode:
|
||||||
|
return nn.Start
|
||||||
|
case *ast.DirectiveNode:
|
||||||
|
return nn.Start
|
||||||
|
case *ast.TagNode:
|
||||||
|
return nn.Start
|
||||||
|
case *ast.MappingNode:
|
||||||
|
if nn.IsFlowStyle {
|
||||||
|
return nn.Start
|
||||||
|
}
|
||||||
|
if len(nn.Values) == 0 {
|
||||||
|
return nn.Start
|
||||||
|
}
|
||||||
|
return getFirstToken(nn.Values[0].Key)
|
||||||
|
case *ast.MappingKeyNode:
|
||||||
|
return nn.Start
|
||||||
|
case *ast.MergeKeyNode:
|
||||||
|
return nn.Token
|
||||||
|
case *ast.SequenceNode:
|
||||||
|
return nn.Start
|
||||||
|
case *ast.AnchorNode:
|
||||||
|
return nn.Start
|
||||||
|
case *ast.AliasNode:
|
||||||
|
return nn.Start
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type Formatter struct {
|
||||||
|
existsComment bool
|
||||||
|
tokenToOriginMap map[*token.Token]string
|
||||||
|
anchorNodeMap map[string]ast.Node
|
||||||
|
}
|
||||||
|
|
||||||
|
func newFormatter(tk *token.Token, existsComment bool) *Formatter {
|
||||||
|
tokenToOriginMap := make(map[*token.Token]string)
|
||||||
|
for tk.Prev != nil {
|
||||||
|
tk = tk.Prev
|
||||||
|
}
|
||||||
|
tokenToOriginMap[tk] = tk.Origin
|
||||||
|
|
||||||
|
var origin string
|
||||||
|
for tk.Next != nil {
|
||||||
|
tk = tk.Next
|
||||||
|
if tk.Type == token.CommentType {
|
||||||
|
origin += strings.Repeat("\n", strings.Count(normalizeNewLineChars(tk.Origin), "\n"))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
origin += tk.Origin
|
||||||
|
tokenToOriginMap[tk] = origin
|
||||||
|
origin = ""
|
||||||
|
}
|
||||||
|
return &Formatter{
|
||||||
|
existsComment: existsComment,
|
||||||
|
tokenToOriginMap: tokenToOriginMap,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getIndentNumByFirstLineToken(tk *token.Token) int {
|
||||||
|
defaultIndent := tk.Position.Column - 1
|
||||||
|
|
||||||
|
// key: value
|
||||||
|
// ^
|
||||||
|
// next
|
||||||
|
if tk.Type == token.SequenceEntryType {
|
||||||
|
// If the current token is the sequence entry.
|
||||||
|
// the indent is calculated from the column value of the current token.
|
||||||
|
return defaultIndent
|
||||||
|
}
|
||||||
|
|
||||||
|
// key: value
|
||||||
|
// ^
|
||||||
|
// next
|
||||||
|
if tk.Next != nil && tk.Next.Type == token.MappingValueType {
|
||||||
|
// If the current token is the key in the mapping-value,
|
||||||
|
// the indent is calculated from the column value of the current token.
|
||||||
|
return defaultIndent
|
||||||
|
}
|
||||||
|
|
||||||
|
if tk.Prev == nil {
|
||||||
|
return defaultIndent
|
||||||
|
}
|
||||||
|
prev := tk.Prev
|
||||||
|
|
||||||
|
// key: value
|
||||||
|
// ^
|
||||||
|
// prev
|
||||||
|
if prev.Type == token.MappingValueType {
|
||||||
|
// If the current token is the value in the mapping-value,
|
||||||
|
// the indent is calculated from the column value of the key two steps back.
|
||||||
|
if prev.Prev == nil {
|
||||||
|
return defaultIndent
|
||||||
|
}
|
||||||
|
return prev.Prev.Position.Column - 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// - value
|
||||||
|
// ^
|
||||||
|
// prev
|
||||||
|
if prev.Type == token.SequenceEntryType {
|
||||||
|
// If the value is not a mapping-value and the previous token was a sequence entry,
|
||||||
|
// the indent is calculated using the column value of the sequence entry token.
|
||||||
|
return prev.Position.Column - 1
|
||||||
|
}
|
||||||
|
|
||||||
|
return defaultIndent
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Formatter) format(n ast.Node) string {
|
||||||
|
return f.trimSpacePrefix(
|
||||||
|
f.trimIndentSpace(
|
||||||
|
getIndentNumByFirstLineToken(getFirstToken(n)),
|
||||||
|
f.trimNewLineCharPrefix(f.formatNode(n)),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Formatter) formatFile(file *ast.File) string {
|
||||||
|
if len(file.Docs) == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
var ret string
|
||||||
|
for _, doc := range file.Docs {
|
||||||
|
ret += f.formatDocument(doc)
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Formatter) origin(tk *token.Token) string {
|
||||||
|
if tk == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
if f.existsComment {
|
||||||
|
return tk.Origin
|
||||||
|
}
|
||||||
|
return f.tokenToOriginMap[tk]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Formatter) formatDocument(n *ast.DocumentNode) string {
|
||||||
|
return f.origin(n.Start) + f.formatNode(n.Body) + f.origin(n.End)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Formatter) formatNull(n *ast.NullNode) string {
|
||||||
|
return f.origin(n.Token) + f.formatCommentGroup(n.Comment)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Formatter) formatString(n *ast.StringNode) string {
|
||||||
|
return f.origin(n.Token) + f.formatCommentGroup(n.Comment)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Formatter) formatInteger(n *ast.IntegerNode) string {
|
||||||
|
return f.origin(n.Token) + f.formatCommentGroup(n.Comment)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Formatter) formatFloat(n *ast.FloatNode) string {
|
||||||
|
return f.origin(n.Token) + f.formatCommentGroup(n.Comment)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Formatter) formatBool(n *ast.BoolNode) string {
|
||||||
|
return f.origin(n.Token) + f.formatCommentGroup(n.Comment)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Formatter) formatInfinity(n *ast.InfinityNode) string {
|
||||||
|
return f.origin(n.Token) + f.formatCommentGroup(n.Comment)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Formatter) formatNan(n *ast.NanNode) string {
|
||||||
|
return f.origin(n.Token) + f.formatCommentGroup(n.Comment)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Formatter) formatLiteral(n *ast.LiteralNode) string {
|
||||||
|
return f.origin(n.Start) + f.formatCommentGroup(n.Comment) + f.origin(n.Value.Token)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Formatter) formatMergeKey(n *ast.MergeKeyNode) string {
|
||||||
|
return f.origin(n.Token)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Formatter) formatMappingValue(n *ast.MappingValueNode) string {
|
||||||
|
return f.formatCommentGroup(n.Comment) +
|
||||||
|
f.origin(n.Key.GetToken()) + ":" + f.formatCommentGroup(n.Key.GetComment()) + f.formatNode(n.Value) +
|
||||||
|
f.formatCommentGroup(n.FootComment)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Formatter) formatDirective(n *ast.DirectiveNode) string {
|
||||||
|
ret := f.origin(n.Start) + f.formatNode(n.Name)
|
||||||
|
for _, val := range n.Values {
|
||||||
|
ret += f.formatNode(val)
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Formatter) formatMapping(n *ast.MappingNode) string {
|
||||||
|
var ret string
|
||||||
|
if n.IsFlowStyle {
|
||||||
|
ret = f.origin(n.Start)
|
||||||
|
} else {
|
||||||
|
ret += f.formatCommentGroup(n.Comment)
|
||||||
|
}
|
||||||
|
for _, value := range n.Values {
|
||||||
|
if value.CollectEntry != nil {
|
||||||
|
ret += f.origin(value.CollectEntry)
|
||||||
|
}
|
||||||
|
ret += f.formatMappingValue(value)
|
||||||
|
}
|
||||||
|
if n.IsFlowStyle {
|
||||||
|
ret += f.origin(n.End)
|
||||||
|
ret += f.formatCommentGroup(n.Comment)
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Formatter) formatTag(n *ast.TagNode) string {
|
||||||
|
return f.origin(n.Start) + f.formatNode(n.Value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Formatter) formatMappingKey(n *ast.MappingKeyNode) string {
|
||||||
|
return f.origin(n.Start) + f.formatNode(n.Value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Formatter) formatSequence(n *ast.SequenceNode) string {
|
||||||
|
var ret string
|
||||||
|
if n.IsFlowStyle {
|
||||||
|
ret = f.origin(n.Start)
|
||||||
|
} else {
|
||||||
|
// add head comment.
|
||||||
|
ret += f.formatCommentGroup(n.Comment)
|
||||||
|
}
|
||||||
|
for _, entry := range n.Entries {
|
||||||
|
ret += f.formatNode(entry)
|
||||||
|
}
|
||||||
|
if n.IsFlowStyle {
|
||||||
|
ret += f.origin(n.End)
|
||||||
|
ret += f.formatCommentGroup(n.Comment)
|
||||||
|
}
|
||||||
|
ret += f.formatCommentGroup(n.FootComment)
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Formatter) formatSequenceEntry(n *ast.SequenceEntryNode) string {
|
||||||
|
return f.formatCommentGroup(n.HeadComment) + f.origin(n.Start) + f.formatCommentGroup(n.LineComment) + f.formatNode(n.Value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Formatter) formatAnchor(n *ast.AnchorNode) string {
|
||||||
|
return f.origin(n.Start) + f.formatNode(n.Name) + f.formatNode(n.Value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Formatter) formatAlias(n *ast.AliasNode) string {
|
||||||
|
if f.anchorNodeMap != nil {
|
||||||
|
anchorName := n.Value.GetToken().Value
|
||||||
|
node := f.anchorNodeMap[anchorName]
|
||||||
|
if node != nil {
|
||||||
|
formatted := f.formatNode(node)
|
||||||
|
// If formatted text contains newline characters, indentation needs to be considered.
|
||||||
|
if strings.Contains(formatted, "\n") {
|
||||||
|
// If the first character is not a newline, the first line should be output without indentation.
|
||||||
|
isIgnoredFirstLine := !strings.HasPrefix(formatted, "\n")
|
||||||
|
formatted = f.addIndentSpace(n.GetToken().Position.IndentNum, formatted, isIgnoredFirstLine)
|
||||||
|
}
|
||||||
|
return formatted
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return f.origin(n.Start) + f.formatNode(n.Value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Formatter) formatNode(n ast.Node) string {
|
||||||
|
switch nn := n.(type) {
|
||||||
|
case *ast.DocumentNode:
|
||||||
|
return f.formatDocument(nn)
|
||||||
|
case *ast.NullNode:
|
||||||
|
return f.formatNull(nn)
|
||||||
|
case *ast.BoolNode:
|
||||||
|
return f.formatBool(nn)
|
||||||
|
case *ast.IntegerNode:
|
||||||
|
return f.formatInteger(nn)
|
||||||
|
case *ast.FloatNode:
|
||||||
|
return f.formatFloat(nn)
|
||||||
|
case *ast.StringNode:
|
||||||
|
return f.formatString(nn)
|
||||||
|
case *ast.InfinityNode:
|
||||||
|
return f.formatInfinity(nn)
|
||||||
|
case *ast.NanNode:
|
||||||
|
return f.formatNan(nn)
|
||||||
|
case *ast.LiteralNode:
|
||||||
|
return f.formatLiteral(nn)
|
||||||
|
case *ast.DirectiveNode:
|
||||||
|
return f.formatDirective(nn)
|
||||||
|
case *ast.TagNode:
|
||||||
|
return f.formatTag(nn)
|
||||||
|
case *ast.MappingNode:
|
||||||
|
return f.formatMapping(nn)
|
||||||
|
case *ast.MappingKeyNode:
|
||||||
|
return f.formatMappingKey(nn)
|
||||||
|
case *ast.MappingValueNode:
|
||||||
|
return f.formatMappingValue(nn)
|
||||||
|
case *ast.MergeKeyNode:
|
||||||
|
return f.formatMergeKey(nn)
|
||||||
|
case *ast.SequenceNode:
|
||||||
|
return f.formatSequence(nn)
|
||||||
|
case *ast.SequenceEntryNode:
|
||||||
|
return f.formatSequenceEntry(nn)
|
||||||
|
case *ast.AnchorNode:
|
||||||
|
return f.formatAnchor(nn)
|
||||||
|
case *ast.AliasNode:
|
||||||
|
return f.formatAlias(nn)
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Formatter) formatCommentGroup(g *ast.CommentGroupNode) string {
|
||||||
|
if g == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
var ret string
|
||||||
|
for _, cm := range g.Comments {
|
||||||
|
ret += f.formatComment(cm)
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Formatter) formatComment(n *ast.CommentNode) string {
|
||||||
|
if n == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return n.Token.Origin
|
||||||
|
}
|
||||||
|
|
||||||
|
// nolint: unused
|
||||||
|
func (f *Formatter) formatIndent(col int) string {
|
||||||
|
if col <= 1 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return strings.Repeat(" ", col-1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Formatter) trimNewLineCharPrefix(v string) string {
|
||||||
|
return strings.TrimLeftFunc(v, func(r rune) bool {
|
||||||
|
return r == '\n' || r == '\r'
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Formatter) trimSpacePrefix(v string) string {
|
||||||
|
return strings.TrimLeftFunc(v, func(r rune) bool {
|
||||||
|
return r == ' '
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Formatter) trimIndentSpace(trimIndentNum int, v string) string {
|
||||||
|
if trimIndentNum == 0 {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
lines := strings.Split(normalizeNewLineChars(v), "\n")
|
||||||
|
out := make([]string, 0, len(lines))
|
||||||
|
for _, line := range lines {
|
||||||
|
var cnt int
|
||||||
|
out = append(out, strings.TrimLeftFunc(line, func(r rune) bool {
|
||||||
|
cnt++
|
||||||
|
return r == ' ' && cnt <= trimIndentNum
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
return strings.Join(out, "\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Formatter) addIndentSpace(indentNum int, v string, isIgnoredFirstLine bool) string {
|
||||||
|
if indentNum == 0 {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
indent := strings.Repeat(" ", indentNum)
|
||||||
|
lines := strings.Split(normalizeNewLineChars(v), "\n")
|
||||||
|
out := make([]string, 0, len(lines))
|
||||||
|
for idx, line := range lines {
|
||||||
|
if line == "" || (isIgnoredFirstLine && idx == 0) {
|
||||||
|
out = append(out, line)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
out = append(out, indent+line)
|
||||||
|
}
|
||||||
|
return strings.Join(out, "\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
// normalizeNewLineChars normalize CRLF and CR to LF.
|
||||||
|
func normalizeNewLineChars(v string) string {
|
||||||
|
return strings.ReplaceAll(strings.ReplaceAll(v, "\r\n", "\n"), "\r", "\n")
|
||||||
|
}
|
||||||
23
vendor/github.com/goccy/go-yaml/lexer/lexer.go
generated
vendored
Normal file
23
vendor/github.com/goccy/go-yaml/lexer/lexer.go
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
package lexer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"github.com/goccy/go-yaml/scanner"
|
||||||
|
"github.com/goccy/go-yaml/token"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Tokenize split to token instances from string
|
||||||
|
func Tokenize(src string) token.Tokens {
|
||||||
|
var s scanner.Scanner
|
||||||
|
s.Init(src)
|
||||||
|
var tokens token.Tokens
|
||||||
|
for {
|
||||||
|
subTokens, err := s.Scan()
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
tokens.Add(subTokens...)
|
||||||
|
}
|
||||||
|
return tokens
|
||||||
|
}
|
||||||
352
vendor/github.com/goccy/go-yaml/option.go
generated
vendored
Normal file
352
vendor/github.com/goccy/go-yaml/option.go
generated
vendored
Normal file
@@ -0,0 +1,352 @@
|
|||||||
|
package yaml
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
"github.com/goccy/go-yaml/ast"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DecodeOption functional option type for Decoder
|
||||||
|
type DecodeOption func(d *Decoder) error
|
||||||
|
|
||||||
|
// ReferenceReaders pass to Decoder that reference to anchor defined by passed readers
|
||||||
|
func ReferenceReaders(readers ...io.Reader) DecodeOption {
|
||||||
|
return func(d *Decoder) error {
|
||||||
|
d.referenceReaders = append(d.referenceReaders, readers...)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReferenceFiles pass to Decoder that reference to anchor defined by passed files
|
||||||
|
func ReferenceFiles(files ...string) DecodeOption {
|
||||||
|
return func(d *Decoder) error {
|
||||||
|
d.referenceFiles = files
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReferenceDirs pass to Decoder that reference to anchor defined by files under the passed dirs
|
||||||
|
func ReferenceDirs(dirs ...string) DecodeOption {
|
||||||
|
return func(d *Decoder) error {
|
||||||
|
d.referenceDirs = dirs
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RecursiveDir search yaml file recursively from passed dirs by ReferenceDirs option
|
||||||
|
func RecursiveDir(isRecursive bool) DecodeOption {
|
||||||
|
return func(d *Decoder) error {
|
||||||
|
d.isRecursiveDir = isRecursive
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validator set StructValidator instance to Decoder
|
||||||
|
func Validator(v StructValidator) DecodeOption {
|
||||||
|
return func(d *Decoder) error {
|
||||||
|
d.validator = v
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Strict enable DisallowUnknownField
|
||||||
|
func Strict() DecodeOption {
|
||||||
|
return func(d *Decoder) error {
|
||||||
|
d.disallowUnknownField = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DisallowUnknownField causes the Decoder to return an error when the destination
|
||||||
|
// is a struct and the input contains object keys which do not match any
|
||||||
|
// non-ignored, exported fields in the destination.
|
||||||
|
func DisallowUnknownField() DecodeOption {
|
||||||
|
return func(d *Decoder) error {
|
||||||
|
d.disallowUnknownField = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowFieldPrefixes, when paired with [DisallowUnknownField], allows fields
|
||||||
|
// with the specified prefixes to bypass the unknown field check.
|
||||||
|
func AllowFieldPrefixes(prefixes ...string) DecodeOption {
|
||||||
|
return func(d *Decoder) error {
|
||||||
|
d.allowedFieldPrefixes = append(d.allowedFieldPrefixes, prefixes...)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowDuplicateMapKey ignore syntax error when mapping keys that are duplicates.
|
||||||
|
func AllowDuplicateMapKey() DecodeOption {
|
||||||
|
return func(d *Decoder) error {
|
||||||
|
d.allowDuplicateMapKey = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UseOrderedMap can be interpreted as a map,
|
||||||
|
// and uses MapSlice ( ordered map ) aggressively if there is no type specification
|
||||||
|
func UseOrderedMap() DecodeOption {
|
||||||
|
return func(d *Decoder) error {
|
||||||
|
d.useOrderedMap = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UseJSONUnmarshaler if neither `BytesUnmarshaler` nor `InterfaceUnmarshaler` is implemented
|
||||||
|
// and `UnmashalJSON([]byte)error` is implemented, convert the argument from `YAML` to `JSON` and then call it.
|
||||||
|
func UseJSONUnmarshaler() DecodeOption {
|
||||||
|
return func(d *Decoder) error {
|
||||||
|
d.useJSONUnmarshaler = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CustomUnmarshaler overrides any decoding process for the type specified in generics.
|
||||||
|
//
|
||||||
|
// NOTE: If RegisterCustomUnmarshaler and CustomUnmarshaler of DecodeOption are specified for the same type,
|
||||||
|
// the CustomUnmarshaler specified in DecodeOption takes precedence.
|
||||||
|
func CustomUnmarshaler[T any](unmarshaler func(*T, []byte) error) DecodeOption {
|
||||||
|
return func(d *Decoder) error {
|
||||||
|
var typ *T
|
||||||
|
d.customUnmarshalerMap[reflect.TypeOf(typ)] = func(ctx context.Context, v interface{}, b []byte) error {
|
||||||
|
return unmarshaler(v.(*T), b)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CustomUnmarshalerContext overrides any decoding process for the type specified in generics.
|
||||||
|
// Similar to CustomUnmarshaler, but allows passing a context to the unmarshaler function.
|
||||||
|
func CustomUnmarshalerContext[T any](unmarshaler func(context.Context, *T, []byte) error) DecodeOption {
|
||||||
|
return func(d *Decoder) error {
|
||||||
|
var typ *T
|
||||||
|
d.customUnmarshalerMap[reflect.TypeOf(typ)] = func(ctx context.Context, v interface{}, b []byte) error {
|
||||||
|
return unmarshaler(ctx, v.(*T), b)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodeOption functional option type for Encoder
|
||||||
|
type EncodeOption func(e *Encoder) error
|
||||||
|
|
||||||
|
// Indent change indent number
|
||||||
|
func Indent(spaces int) EncodeOption {
|
||||||
|
return func(e *Encoder) error {
|
||||||
|
e.indentNum = spaces
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IndentSequence causes sequence values to be indented the same value as Indent
|
||||||
|
func IndentSequence(indent bool) EncodeOption {
|
||||||
|
return func(e *Encoder) error {
|
||||||
|
e.indentSequence = indent
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UseSingleQuote determines if single or double quotes should be preferred for strings.
|
||||||
|
func UseSingleQuote(sq bool) EncodeOption {
|
||||||
|
return func(e *Encoder) error {
|
||||||
|
e.singleQuote = sq
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flow encoding by flow style
|
||||||
|
func Flow(isFlowStyle bool) EncodeOption {
|
||||||
|
return func(e *Encoder) error {
|
||||||
|
e.isFlowStyle = isFlowStyle
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithSmartAnchor when multiple map values share the same pointer,
|
||||||
|
// an anchor is automatically assigned to the first occurrence, and aliases are used for subsequent elements.
|
||||||
|
// The map key name is used as the anchor name by default.
|
||||||
|
// If key names conflict, a suffix is automatically added to avoid collisions.
|
||||||
|
// This is an experimental feature and cannot be used simultaneously with anchor tags.
|
||||||
|
func WithSmartAnchor() EncodeOption {
|
||||||
|
return func(e *Encoder) error {
|
||||||
|
e.enableSmartAnchor = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UseLiteralStyleIfMultiline causes encoding multiline strings with a literal syntax,
|
||||||
|
// no matter what characters they include
|
||||||
|
func UseLiteralStyleIfMultiline(useLiteralStyleIfMultiline bool) EncodeOption {
|
||||||
|
return func(e *Encoder) error {
|
||||||
|
e.useLiteralStyleIfMultiline = useLiteralStyleIfMultiline
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// JSON encode in JSON format
|
||||||
|
func JSON() EncodeOption {
|
||||||
|
return func(e *Encoder) error {
|
||||||
|
e.isJSONStyle = true
|
||||||
|
e.isFlowStyle = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalAnchor call back if encoder find an anchor during encoding
|
||||||
|
func MarshalAnchor(callback func(*ast.AnchorNode, interface{}) error) EncodeOption {
|
||||||
|
return func(e *Encoder) error {
|
||||||
|
e.anchorCallback = callback
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UseJSONMarshaler if neither `BytesMarshaler` nor `InterfaceMarshaler`
|
||||||
|
// nor `encoding.TextMarshaler` is implemented and `MarshalJSON()([]byte, error)` is implemented,
|
||||||
|
// call `MarshalJSON` to convert the returned `JSON` to `YAML` for processing.
|
||||||
|
func UseJSONMarshaler() EncodeOption {
|
||||||
|
return func(e *Encoder) error {
|
||||||
|
e.useJSONMarshaler = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CustomMarshaler overrides any encoding process for the type specified in generics.
|
||||||
|
//
|
||||||
|
// NOTE: If type T implements MarshalYAML for pointer receiver, the type specified in CustomMarshaler must be *T.
|
||||||
|
// If RegisterCustomMarshaler and CustomMarshaler of EncodeOption are specified for the same type,
|
||||||
|
// the CustomMarshaler specified in EncodeOption takes precedence.
|
||||||
|
func CustomMarshaler[T any](marshaler func(T) ([]byte, error)) EncodeOption {
|
||||||
|
return func(e *Encoder) error {
|
||||||
|
var typ T
|
||||||
|
e.customMarshalerMap[reflect.TypeOf(typ)] = func(ctx context.Context, v interface{}) ([]byte, error) {
|
||||||
|
return marshaler(v.(T))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CustomMarshalerContext overrides any encoding process for the type specified in generics.
|
||||||
|
// Similar to CustomMarshaler, but allows passing a context to the marshaler function.
|
||||||
|
func CustomMarshalerContext[T any](marshaler func(context.Context, T) ([]byte, error)) EncodeOption {
|
||||||
|
return func(e *Encoder) error {
|
||||||
|
var typ T
|
||||||
|
e.customMarshalerMap[reflect.TypeOf(typ)] = func(ctx context.Context, v interface{}) ([]byte, error) {
|
||||||
|
return marshaler(ctx, v.(T))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AutoInt automatically converts floating-point numbers to integers when the fractional part is zero.
|
||||||
|
// For example, a value of 1.0 will be encoded as 1.
|
||||||
|
func AutoInt() EncodeOption {
|
||||||
|
return func(e *Encoder) error {
|
||||||
|
e.autoInt = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OmitEmpty behaves in the same way as the interpretation of the omitempty tag in the encoding/json library.
|
||||||
|
// set on all the fields.
|
||||||
|
// In the current implementation, the omitempty tag is not implemented in the same way as encoding/json,
|
||||||
|
// so please specify this option if you expect the same behavior.
|
||||||
|
func OmitEmpty() EncodeOption {
|
||||||
|
return func(e *Encoder) error {
|
||||||
|
e.omitEmpty = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OmitZero forces the encoder to assume an `omitzero` struct tag is
|
||||||
|
// set on all the fields. See `Marshal` commentary for the `omitzero` tag logic.
|
||||||
|
func OmitZero() EncodeOption {
|
||||||
|
return func(e *Encoder) error {
|
||||||
|
e.omitZero = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CommentPosition type of the position for comment.
|
||||||
|
type CommentPosition int
|
||||||
|
|
||||||
|
const (
|
||||||
|
CommentHeadPosition CommentPosition = CommentPosition(iota)
|
||||||
|
CommentLinePosition
|
||||||
|
CommentFootPosition
|
||||||
|
)
|
||||||
|
|
||||||
|
func (p CommentPosition) String() string {
|
||||||
|
switch p {
|
||||||
|
case CommentHeadPosition:
|
||||||
|
return "Head"
|
||||||
|
case CommentLinePosition:
|
||||||
|
return "Line"
|
||||||
|
case CommentFootPosition:
|
||||||
|
return "Foot"
|
||||||
|
default:
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// LineComment create a one-line comment for CommentMap.
|
||||||
|
func LineComment(text string) *Comment {
|
||||||
|
return &Comment{
|
||||||
|
Texts: []string{text},
|
||||||
|
Position: CommentLinePosition,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// HeadComment create a multiline comment for CommentMap.
|
||||||
|
func HeadComment(texts ...string) *Comment {
|
||||||
|
return &Comment{
|
||||||
|
Texts: texts,
|
||||||
|
Position: CommentHeadPosition,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FootComment create a multiline comment for CommentMap.
|
||||||
|
func FootComment(texts ...string) *Comment {
|
||||||
|
return &Comment{
|
||||||
|
Texts: texts,
|
||||||
|
Position: CommentFootPosition,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Comment raw data for comment.
|
||||||
|
type Comment struct {
|
||||||
|
Texts []string
|
||||||
|
Position CommentPosition
|
||||||
|
}
|
||||||
|
|
||||||
|
// CommentMap map of the position of the comment and the comment information.
|
||||||
|
type CommentMap map[string][]*Comment
|
||||||
|
|
||||||
|
// WithComment add a comment using the location and text information given in the CommentMap.
|
||||||
|
func WithComment(cm CommentMap) EncodeOption {
|
||||||
|
return func(e *Encoder) error {
|
||||||
|
commentMap := map[*Path][]*Comment{}
|
||||||
|
for k, v := range cm {
|
||||||
|
path, err := PathString(k)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
commentMap[path] = v
|
||||||
|
}
|
||||||
|
e.commentMap = commentMap
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CommentToMap apply the position and content of comments in a YAML document to a CommentMap.
|
||||||
|
func CommentToMap(cm CommentMap) DecodeOption {
|
||||||
|
return func(d *Decoder) error {
|
||||||
|
if cm == nil {
|
||||||
|
return ErrInvalidCommentMapValue
|
||||||
|
}
|
||||||
|
d.toCommentMap = cm
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
28
vendor/github.com/goccy/go-yaml/parser/color.go
generated
vendored
Normal file
28
vendor/github.com/goccy/go-yaml/parser/color.go
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
package parser
|
||||||
|
|
||||||
|
import "fmt"
|
||||||
|
|
||||||
|
const (
|
||||||
|
colorFgHiBlack int = iota + 90
|
||||||
|
colorFgHiRed
|
||||||
|
colorFgHiGreen
|
||||||
|
colorFgHiYellow
|
||||||
|
colorFgHiBlue
|
||||||
|
colorFgHiMagenta
|
||||||
|
colorFgHiCyan
|
||||||
|
)
|
||||||
|
|
||||||
|
var colorTable = []int{
|
||||||
|
colorFgHiRed,
|
||||||
|
colorFgHiGreen,
|
||||||
|
colorFgHiYellow,
|
||||||
|
colorFgHiBlue,
|
||||||
|
colorFgHiMagenta,
|
||||||
|
colorFgHiCyan,
|
||||||
|
}
|
||||||
|
|
||||||
|
func colorize(idx int, content string) string {
|
||||||
|
colorIdx := idx % len(colorTable)
|
||||||
|
color := colorTable[colorIdx]
|
||||||
|
return fmt.Sprintf("\x1b[1;%dm", color) + content + "\x1b[22;0m"
|
||||||
|
}
|
||||||
187
vendor/github.com/goccy/go-yaml/parser/context.go
generated
vendored
Normal file
187
vendor/github.com/goccy/go-yaml/parser/context.go
generated
vendored
Normal file
@@ -0,0 +1,187 @@
|
|||||||
|
package parser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/goccy/go-yaml/token"
|
||||||
|
)
|
||||||
|
|
||||||
|
// context context at parsing
|
||||||
|
type context struct {
|
||||||
|
tokenRef *tokenRef
|
||||||
|
path string
|
||||||
|
isFlow bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type tokenRef struct {
|
||||||
|
tokens []*Token
|
||||||
|
size int
|
||||||
|
idx int
|
||||||
|
}
|
||||||
|
|
||||||
|
var pathSpecialChars = []string{
|
||||||
|
"$", "*", ".", "[", "]",
|
||||||
|
}
|
||||||
|
|
||||||
|
func containsPathSpecialChar(path string) bool {
|
||||||
|
for _, char := range pathSpecialChars {
|
||||||
|
if strings.Contains(path, char) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func normalizePath(path string) string {
|
||||||
|
if containsPathSpecialChar(path) {
|
||||||
|
return fmt.Sprintf("'%s'", path)
|
||||||
|
}
|
||||||
|
return path
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *context) currentToken() *Token {
|
||||||
|
if c.tokenRef.idx >= c.tokenRef.size {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return c.tokenRef.tokens[c.tokenRef.idx]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *context) isComment() bool {
|
||||||
|
return c.currentToken().Type() == token.CommentType
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *context) nextToken() *Token {
|
||||||
|
if c.tokenRef.idx+1 >= c.tokenRef.size {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return c.tokenRef.tokens[c.tokenRef.idx+1]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *context) nextNotCommentToken() *Token {
|
||||||
|
for i := c.tokenRef.idx + 1; i < c.tokenRef.size; i++ {
|
||||||
|
tk := c.tokenRef.tokens[i]
|
||||||
|
if tk.Type() == token.CommentType {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return tk
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *context) isTokenNotFound() bool {
|
||||||
|
return c.currentToken() == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *context) withGroup(g *TokenGroup) *context {
|
||||||
|
ctx := *c
|
||||||
|
ctx.tokenRef = &tokenRef{
|
||||||
|
tokens: g.Tokens,
|
||||||
|
size: len(g.Tokens),
|
||||||
|
}
|
||||||
|
return &ctx
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *context) withChild(path string) *context {
|
||||||
|
ctx := *c
|
||||||
|
ctx.path = c.path + "." + normalizePath(path)
|
||||||
|
return &ctx
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *context) withIndex(idx uint) *context {
|
||||||
|
ctx := *c
|
||||||
|
ctx.path = c.path + "[" + fmt.Sprint(idx) + "]"
|
||||||
|
return &ctx
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *context) withFlow(isFlow bool) *context {
|
||||||
|
ctx := *c
|
||||||
|
ctx.isFlow = isFlow
|
||||||
|
return &ctx
|
||||||
|
}
|
||||||
|
|
||||||
|
func newContext() *context {
|
||||||
|
return &context{
|
||||||
|
path: "$",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *context) goNext() {
|
||||||
|
ref := c.tokenRef
|
||||||
|
if ref.size <= ref.idx+1 {
|
||||||
|
ref.idx = ref.size
|
||||||
|
} else {
|
||||||
|
ref.idx++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *context) next() bool {
|
||||||
|
return c.tokenRef.idx < c.tokenRef.size
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *context) insertNullToken(tk *Token) *Token {
|
||||||
|
nullToken := c.createImplicitNullToken(tk)
|
||||||
|
c.insertToken(nullToken)
|
||||||
|
c.goNext()
|
||||||
|
|
||||||
|
return nullToken
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *context) addNullValueToken(tk *Token) *Token {
|
||||||
|
nullToken := c.createImplicitNullToken(tk)
|
||||||
|
rawTk := nullToken.RawToken()
|
||||||
|
|
||||||
|
// add space for map or sequence value.
|
||||||
|
rawTk.Position.Column++
|
||||||
|
|
||||||
|
c.addToken(nullToken)
|
||||||
|
c.goNext()
|
||||||
|
|
||||||
|
return nullToken
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *context) createImplicitNullToken(base *Token) *Token {
|
||||||
|
pos := *(base.RawToken().Position)
|
||||||
|
pos.Column++
|
||||||
|
tk := token.New("null", " null", &pos)
|
||||||
|
tk.Type = token.ImplicitNullType
|
||||||
|
return &Token{Token: tk}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *context) insertToken(tk *Token) {
|
||||||
|
ref := c.tokenRef
|
||||||
|
idx := ref.idx
|
||||||
|
if ref.size < idx {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if ref.size == idx {
|
||||||
|
curToken := ref.tokens[ref.size-1]
|
||||||
|
tk.RawToken().Next = curToken.RawToken()
|
||||||
|
curToken.RawToken().Prev = tk.RawToken()
|
||||||
|
|
||||||
|
ref.tokens = append(ref.tokens, tk)
|
||||||
|
ref.size = len(ref.tokens)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
curToken := ref.tokens[idx]
|
||||||
|
tk.RawToken().Next = curToken.RawToken()
|
||||||
|
curToken.RawToken().Prev = tk.RawToken()
|
||||||
|
|
||||||
|
ref.tokens = append(ref.tokens[:idx+1], ref.tokens[idx:]...)
|
||||||
|
ref.tokens[idx] = tk
|
||||||
|
ref.size = len(ref.tokens)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *context) addToken(tk *Token) {
|
||||||
|
ref := c.tokenRef
|
||||||
|
lastTk := ref.tokens[ref.size-1]
|
||||||
|
if lastTk.Group != nil {
|
||||||
|
lastTk = lastTk.Group.Last()
|
||||||
|
}
|
||||||
|
lastTk.RawToken().Next = tk.RawToken()
|
||||||
|
tk.RawToken().Prev = lastTk.RawToken()
|
||||||
|
|
||||||
|
ref.tokens = append(ref.tokens, tk)
|
||||||
|
ref.size = len(ref.tokens)
|
||||||
|
}
|
||||||
257
vendor/github.com/goccy/go-yaml/parser/node.go
generated
vendored
Normal file
257
vendor/github.com/goccy/go-yaml/parser/node.go
generated
vendored
Normal file
@@ -0,0 +1,257 @@
|
|||||||
|
package parser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/goccy/go-yaml/ast"
|
||||||
|
"github.com/goccy/go-yaml/internal/errors"
|
||||||
|
"github.com/goccy/go-yaml/token"
|
||||||
|
)
|
||||||
|
|
||||||
|
func newMappingNode(ctx *context, tk *Token, isFlow bool, values ...*ast.MappingValueNode) (*ast.MappingNode, error) {
|
||||||
|
node := ast.Mapping(tk.RawToken(), isFlow, values...)
|
||||||
|
node.SetPath(ctx.path)
|
||||||
|
return node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMappingValueNode(ctx *context, colonTk, entryTk *Token, key ast.MapKeyNode, value ast.Node) (*ast.MappingValueNode, error) {
|
||||||
|
node := ast.MappingValue(colonTk.RawToken(), key, value)
|
||||||
|
node.SetPath(ctx.path)
|
||||||
|
node.CollectEntry = entryTk.RawToken()
|
||||||
|
if key.GetToken().Position.Line == value.GetToken().Position.Line {
|
||||||
|
// originally key was commented, but now that null value has been added, value must be commented.
|
||||||
|
if err := setLineComment(ctx, value, colonTk); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// set line comment by colonTk or entryTk.
|
||||||
|
if err := setLineComment(ctx, value, entryTk); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if err := setLineComment(ctx, key, colonTk); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// set line comment by colonTk or entryTk.
|
||||||
|
if err := setLineComment(ctx, key, entryTk); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMappingKeyNode(ctx *context, tk *Token) (*ast.MappingKeyNode, error) {
|
||||||
|
node := ast.MappingKey(tk.RawToken())
|
||||||
|
node.SetPath(ctx.path)
|
||||||
|
if err := setLineComment(ctx, node, tk); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newAnchorNode(ctx *context, tk *Token) (*ast.AnchorNode, error) {
|
||||||
|
node := ast.Anchor(tk.RawToken())
|
||||||
|
node.SetPath(ctx.path)
|
||||||
|
if err := setLineComment(ctx, node, tk); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newAliasNode(ctx *context, tk *Token) (*ast.AliasNode, error) {
|
||||||
|
node := ast.Alias(tk.RawToken())
|
||||||
|
node.SetPath(ctx.path)
|
||||||
|
if err := setLineComment(ctx, node, tk); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newDirectiveNode(ctx *context, tk *Token) (*ast.DirectiveNode, error) {
|
||||||
|
node := ast.Directive(tk.RawToken())
|
||||||
|
node.SetPath(ctx.path)
|
||||||
|
if err := setLineComment(ctx, node, tk); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMergeKeyNode(ctx *context, tk *Token) (*ast.MergeKeyNode, error) {
|
||||||
|
node := ast.MergeKey(tk.RawToken())
|
||||||
|
node.SetPath(ctx.path)
|
||||||
|
if err := setLineComment(ctx, node, tk); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newNullNode(ctx *context, tk *Token) (*ast.NullNode, error) {
|
||||||
|
node := ast.Null(tk.RawToken())
|
||||||
|
node.SetPath(ctx.path)
|
||||||
|
if err := setLineComment(ctx, node, tk); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newBoolNode(ctx *context, tk *Token) (*ast.BoolNode, error) {
|
||||||
|
node := ast.Bool(tk.RawToken())
|
||||||
|
node.SetPath(ctx.path)
|
||||||
|
if err := setLineComment(ctx, node, tk); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newIntegerNode(ctx *context, tk *Token) (*ast.IntegerNode, error) {
|
||||||
|
node := ast.Integer(tk.RawToken())
|
||||||
|
node.SetPath(ctx.path)
|
||||||
|
if err := setLineComment(ctx, node, tk); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newFloatNode(ctx *context, tk *Token) (*ast.FloatNode, error) {
|
||||||
|
node := ast.Float(tk.RawToken())
|
||||||
|
node.SetPath(ctx.path)
|
||||||
|
if err := setLineComment(ctx, node, tk); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newInfinityNode(ctx *context, tk *Token) (*ast.InfinityNode, error) {
|
||||||
|
node := ast.Infinity(tk.RawToken())
|
||||||
|
node.SetPath(ctx.path)
|
||||||
|
if err := setLineComment(ctx, node, tk); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newNanNode(ctx *context, tk *Token) (*ast.NanNode, error) {
|
||||||
|
node := ast.Nan(tk.RawToken())
|
||||||
|
node.SetPath(ctx.path)
|
||||||
|
if err := setLineComment(ctx, node, tk); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newStringNode(ctx *context, tk *Token) (*ast.StringNode, error) {
|
||||||
|
node := ast.String(tk.RawToken())
|
||||||
|
node.SetPath(ctx.path)
|
||||||
|
if err := setLineComment(ctx, node, tk); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newLiteralNode(ctx *context, tk *Token) (*ast.LiteralNode, error) {
|
||||||
|
node := ast.Literal(tk.RawToken())
|
||||||
|
node.SetPath(ctx.path)
|
||||||
|
if err := setLineComment(ctx, node, tk); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTagNode(ctx *context, tk *Token) (*ast.TagNode, error) {
|
||||||
|
node := ast.Tag(tk.RawToken())
|
||||||
|
node.SetPath(ctx.path)
|
||||||
|
if err := setLineComment(ctx, node, tk); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newSequenceNode(ctx *context, tk *Token, isFlow bool) (*ast.SequenceNode, error) {
|
||||||
|
node := ast.Sequence(tk.RawToken(), isFlow)
|
||||||
|
node.SetPath(ctx.path)
|
||||||
|
if err := setLineComment(ctx, node, tk); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTagDefaultScalarValueNode(ctx *context, tag *token.Token) (ast.ScalarNode, error) {
|
||||||
|
pos := *(tag.Position)
|
||||||
|
pos.Column++
|
||||||
|
|
||||||
|
var (
|
||||||
|
tk *Token
|
||||||
|
node ast.ScalarNode
|
||||||
|
)
|
||||||
|
switch token.ReservedTagKeyword(tag.Value) {
|
||||||
|
case token.IntegerTag:
|
||||||
|
tk = &Token{Token: token.New("0", "0", &pos)}
|
||||||
|
n, err := newIntegerNode(ctx, tk)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
node = n
|
||||||
|
case token.FloatTag:
|
||||||
|
tk = &Token{Token: token.New("0", "0", &pos)}
|
||||||
|
n, err := newFloatNode(ctx, tk)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
node = n
|
||||||
|
case token.StringTag, token.BinaryTag, token.TimestampTag:
|
||||||
|
tk = &Token{Token: token.New("", "", &pos)}
|
||||||
|
n, err := newStringNode(ctx, tk)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
node = n
|
||||||
|
case token.BooleanTag:
|
||||||
|
tk = &Token{Token: token.New("false", "false", &pos)}
|
||||||
|
n, err := newBoolNode(ctx, tk)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
node = n
|
||||||
|
case token.NullTag:
|
||||||
|
tk = &Token{Token: token.New("null", "null", &pos)}
|
||||||
|
n, err := newNullNode(ctx, tk)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
node = n
|
||||||
|
default:
|
||||||
|
return nil, errors.ErrSyntax(fmt.Sprintf("cannot assign default value for %q tag", tag.Value), tag)
|
||||||
|
}
|
||||||
|
ctx.insertToken(tk)
|
||||||
|
ctx.goNext()
|
||||||
|
return node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func setLineComment(ctx *context, node ast.Node, tk *Token) error {
|
||||||
|
if tk == nil || tk.LineComment == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
comment := ast.CommentGroup([]*token.Token{tk.LineComment})
|
||||||
|
comment.SetPath(ctx.path)
|
||||||
|
if err := node.SetComment(comment); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func setHeadComment(cm *ast.CommentGroupNode, value ast.Node) error {
|
||||||
|
if cm == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
switch n := value.(type) {
|
||||||
|
case *ast.MappingNode:
|
||||||
|
if len(n.Values) != 0 && value.GetComment() == nil {
|
||||||
|
cm.SetPath(n.Values[0].GetPath())
|
||||||
|
return n.Values[0].SetComment(cm)
|
||||||
|
}
|
||||||
|
case *ast.MappingValueNode:
|
||||||
|
cm.SetPath(n.GetPath())
|
||||||
|
return n.SetComment(cm)
|
||||||
|
}
|
||||||
|
cm.SetPath(value.GetPath())
|
||||||
|
return value.SetComment(cm)
|
||||||
|
}
|
||||||
12
vendor/github.com/goccy/go-yaml/parser/option.go
generated
vendored
Normal file
12
vendor/github.com/goccy/go-yaml/parser/option.go
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
package parser
|
||||||
|
|
||||||
|
// Option represents parser's option.
|
||||||
|
type Option func(p *parser)
|
||||||
|
|
||||||
|
// AllowDuplicateMapKey allow the use of keys with the same name in the same map,
|
||||||
|
// but by default, this is not permitted.
|
||||||
|
func AllowDuplicateMapKey() Option {
|
||||||
|
return func(p *parser) {
|
||||||
|
p.allowDuplicateMapKey = true
|
||||||
|
}
|
||||||
|
}
|
||||||
1330
vendor/github.com/goccy/go-yaml/parser/parser.go
generated
vendored
Normal file
1330
vendor/github.com/goccy/go-yaml/parser/parser.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
746
vendor/github.com/goccy/go-yaml/parser/token.go
generated
vendored
Normal file
746
vendor/github.com/goccy/go-yaml/parser/token.go
generated
vendored
Normal file
@@ -0,0 +1,746 @@
|
|||||||
|
package parser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/goccy/go-yaml/internal/errors"
|
||||||
|
"github.com/goccy/go-yaml/token"
|
||||||
|
)
|
||||||
|
|
||||||
|
type TokenGroupType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
TokenGroupNone TokenGroupType = iota
|
||||||
|
TokenGroupDirective
|
||||||
|
TokenGroupDirectiveName
|
||||||
|
TokenGroupDocument
|
||||||
|
TokenGroupDocumentBody
|
||||||
|
TokenGroupAnchor
|
||||||
|
TokenGroupAnchorName
|
||||||
|
TokenGroupAlias
|
||||||
|
TokenGroupLiteral
|
||||||
|
TokenGroupFolded
|
||||||
|
TokenGroupScalarTag
|
||||||
|
TokenGroupMapKey
|
||||||
|
TokenGroupMapKeyValue
|
||||||
|
)
|
||||||
|
|
||||||
|
func (t TokenGroupType) String() string {
|
||||||
|
switch t {
|
||||||
|
case TokenGroupNone:
|
||||||
|
return "none"
|
||||||
|
case TokenGroupDirective:
|
||||||
|
return "directive"
|
||||||
|
case TokenGroupDirectiveName:
|
||||||
|
return "directive_name"
|
||||||
|
case TokenGroupDocument:
|
||||||
|
return "document"
|
||||||
|
case TokenGroupDocumentBody:
|
||||||
|
return "document_body"
|
||||||
|
case TokenGroupAnchor:
|
||||||
|
return "anchor"
|
||||||
|
case TokenGroupAnchorName:
|
||||||
|
return "anchor_name"
|
||||||
|
case TokenGroupAlias:
|
||||||
|
return "alias"
|
||||||
|
case TokenGroupLiteral:
|
||||||
|
return "literal"
|
||||||
|
case TokenGroupFolded:
|
||||||
|
return "folded"
|
||||||
|
case TokenGroupScalarTag:
|
||||||
|
return "scalar_tag"
|
||||||
|
case TokenGroupMapKey:
|
||||||
|
return "map_key"
|
||||||
|
case TokenGroupMapKeyValue:
|
||||||
|
return "map_key_value"
|
||||||
|
}
|
||||||
|
return "none"
|
||||||
|
}
|
||||||
|
|
||||||
|
type Token struct {
|
||||||
|
Token *token.Token
|
||||||
|
Group *TokenGroup
|
||||||
|
LineComment *token.Token
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Token) RawToken() *token.Token {
|
||||||
|
if t == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if t.Token != nil {
|
||||||
|
return t.Token
|
||||||
|
}
|
||||||
|
return t.Group.RawToken()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Token) Type() token.Type {
|
||||||
|
if t == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
if t.Token != nil {
|
||||||
|
return t.Token.Type
|
||||||
|
}
|
||||||
|
return t.Group.TokenType()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Token) GroupType() TokenGroupType {
|
||||||
|
if t == nil {
|
||||||
|
return TokenGroupNone
|
||||||
|
}
|
||||||
|
if t.Token != nil {
|
||||||
|
return TokenGroupNone
|
||||||
|
}
|
||||||
|
return t.Group.Type
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Token) Line() int {
|
||||||
|
if t == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
if t.Token != nil {
|
||||||
|
return t.Token.Position.Line
|
||||||
|
}
|
||||||
|
return t.Group.Line()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Token) Column() int {
|
||||||
|
if t == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
if t.Token != nil {
|
||||||
|
return t.Token.Position.Column
|
||||||
|
}
|
||||||
|
return t.Group.Column()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Token) SetGroupType(typ TokenGroupType) {
|
||||||
|
if t.Group == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
t.Group.Type = typ
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Token) Dump() {
|
||||||
|
ctx := new(groupTokenRenderContext)
|
||||||
|
if t.Token != nil {
|
||||||
|
fmt.Fprint(os.Stdout, t.Token.Value)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
t.Group.dump(ctx)
|
||||||
|
fmt.Fprintf(os.Stdout, "\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Token) dump(ctx *groupTokenRenderContext) {
|
||||||
|
if t.Token != nil {
|
||||||
|
fmt.Fprint(os.Stdout, t.Token.Value)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
t.Group.dump(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
type groupTokenRenderContext struct {
|
||||||
|
num int
|
||||||
|
}
|
||||||
|
|
||||||
|
type TokenGroup struct {
|
||||||
|
Type TokenGroupType
|
||||||
|
Tokens []*Token
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *TokenGroup) First() *Token {
|
||||||
|
if len(g.Tokens) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return g.Tokens[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *TokenGroup) Last() *Token {
|
||||||
|
if len(g.Tokens) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return g.Tokens[len(g.Tokens)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *TokenGroup) dump(ctx *groupTokenRenderContext) {
|
||||||
|
num := ctx.num
|
||||||
|
fmt.Fprint(os.Stdout, colorize(num, "("))
|
||||||
|
ctx.num++
|
||||||
|
for _, tk := range g.Tokens {
|
||||||
|
tk.dump(ctx)
|
||||||
|
}
|
||||||
|
fmt.Fprint(os.Stdout, colorize(num, ")"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *TokenGroup) RawToken() *token.Token {
|
||||||
|
if len(g.Tokens) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return g.Tokens[0].RawToken()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *TokenGroup) Line() int {
|
||||||
|
if len(g.Tokens) == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return g.Tokens[0].Line()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *TokenGroup) Column() int {
|
||||||
|
if len(g.Tokens) == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return g.Tokens[0].Column()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *TokenGroup) TokenType() token.Type {
|
||||||
|
if len(g.Tokens) == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return g.Tokens[0].Type()
|
||||||
|
}
|
||||||
|
|
||||||
|
func CreateGroupedTokens(tokens token.Tokens) ([]*Token, error) {
|
||||||
|
var err error
|
||||||
|
tks := newTokens(tokens)
|
||||||
|
tks = createLineCommentTokenGroups(tks)
|
||||||
|
tks, err = createLiteralAndFoldedTokenGroups(tks)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
tks, err = createAnchorAndAliasTokenGroups(tks)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
tks, err = createScalarTagTokenGroups(tks)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
tks, err = createAnchorWithScalarTagTokenGroups(tks)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
tks, err = createMapKeyTokenGroups(tks)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
tks = createMapKeyValueTokenGroups(tks)
|
||||||
|
tks, err = createDirectiveTokenGroups(tks)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
tks, err = createDocumentTokens(tks)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return tks, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTokens(tks token.Tokens) []*Token {
|
||||||
|
ret := make([]*Token, 0, len(tks))
|
||||||
|
for _, tk := range tks {
|
||||||
|
ret = append(ret, &Token{Token: tk})
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func createLineCommentTokenGroups(tokens []*Token) []*Token {
|
||||||
|
ret := make([]*Token, 0, len(tokens))
|
||||||
|
for i := 0; i < len(tokens); i++ {
|
||||||
|
tk := tokens[i]
|
||||||
|
switch tk.Type() {
|
||||||
|
case token.CommentType:
|
||||||
|
if i > 0 && tokens[i-1].Line() == tk.Line() {
|
||||||
|
tokens[i-1].LineComment = tk.RawToken()
|
||||||
|
} else {
|
||||||
|
ret = append(ret, tk)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
ret = append(ret, tk)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func createLiteralAndFoldedTokenGroups(tokens []*Token) ([]*Token, error) {
|
||||||
|
ret := make([]*Token, 0, len(tokens))
|
||||||
|
for i := 0; i < len(tokens); i++ {
|
||||||
|
tk := tokens[i]
|
||||||
|
switch tk.Type() {
|
||||||
|
case token.LiteralType:
|
||||||
|
tks := []*Token{tk}
|
||||||
|
if i+1 < len(tokens) {
|
||||||
|
tks = append(tks, tokens[i+1])
|
||||||
|
}
|
||||||
|
ret = append(ret, &Token{
|
||||||
|
Group: &TokenGroup{
|
||||||
|
Type: TokenGroupLiteral,
|
||||||
|
Tokens: tks,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
i++
|
||||||
|
case token.FoldedType:
|
||||||
|
tks := []*Token{tk}
|
||||||
|
if i+1 < len(tokens) {
|
||||||
|
tks = append(tks, tokens[i+1])
|
||||||
|
}
|
||||||
|
ret = append(ret, &Token{
|
||||||
|
Group: &TokenGroup{
|
||||||
|
Type: TokenGroupFolded,
|
||||||
|
Tokens: tks,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
i++
|
||||||
|
default:
|
||||||
|
ret = append(ret, tk)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func createAnchorAndAliasTokenGroups(tokens []*Token) ([]*Token, error) {
|
||||||
|
ret := make([]*Token, 0, len(tokens))
|
||||||
|
for i := 0; i < len(tokens); i++ {
|
||||||
|
tk := tokens[i]
|
||||||
|
switch tk.Type() {
|
||||||
|
case token.AnchorType:
|
||||||
|
if i+1 >= len(tokens) {
|
||||||
|
return nil, errors.ErrSyntax("undefined anchor name", tk.RawToken())
|
||||||
|
}
|
||||||
|
if i+2 >= len(tokens) {
|
||||||
|
return nil, errors.ErrSyntax("undefined anchor value", tk.RawToken())
|
||||||
|
}
|
||||||
|
anchorName := &Token{
|
||||||
|
Group: &TokenGroup{
|
||||||
|
Type: TokenGroupAnchorName,
|
||||||
|
Tokens: []*Token{tk, tokens[i+1]},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
valueTk := tokens[i+2]
|
||||||
|
if tk.Line() == valueTk.Line() && valueTk.Type() == token.SequenceEntryType {
|
||||||
|
return nil, errors.ErrSyntax("sequence entries are not allowed after anchor on the same line", valueTk.RawToken())
|
||||||
|
}
|
||||||
|
if tk.Line() == valueTk.Line() && isScalarType(valueTk) {
|
||||||
|
ret = append(ret, &Token{
|
||||||
|
Group: &TokenGroup{
|
||||||
|
Type: TokenGroupAnchor,
|
||||||
|
Tokens: []*Token{anchorName, valueTk},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
i++
|
||||||
|
} else {
|
||||||
|
ret = append(ret, anchorName)
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
case token.AliasType:
|
||||||
|
if i+1 == len(tokens) {
|
||||||
|
return nil, errors.ErrSyntax("undefined alias name", tk.RawToken())
|
||||||
|
}
|
||||||
|
ret = append(ret, &Token{
|
||||||
|
Group: &TokenGroup{
|
||||||
|
Type: TokenGroupAlias,
|
||||||
|
Tokens: []*Token{tk, tokens[i+1]},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
i++
|
||||||
|
default:
|
||||||
|
ret = append(ret, tk)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func createScalarTagTokenGroups(tokens []*Token) ([]*Token, error) {
|
||||||
|
ret := make([]*Token, 0, len(tokens))
|
||||||
|
for i := 0; i < len(tokens); i++ {
|
||||||
|
tk := tokens[i]
|
||||||
|
if tk.Type() != token.TagType {
|
||||||
|
ret = append(ret, tk)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
tag := tk.RawToken()
|
||||||
|
if strings.HasPrefix(tag.Value, "!!") {
|
||||||
|
// secondary tag.
|
||||||
|
switch token.ReservedTagKeyword(tag.Value) {
|
||||||
|
case token.IntegerTag, token.FloatTag, token.StringTag, token.BinaryTag, token.TimestampTag, token.BooleanTag, token.NullTag:
|
||||||
|
if len(tokens) <= i+1 {
|
||||||
|
ret = append(ret, tk)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if tk.Line() != tokens[i+1].Line() {
|
||||||
|
ret = append(ret, tk)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if tokens[i+1].GroupType() == TokenGroupAnchorName {
|
||||||
|
ret = append(ret, tk)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if isScalarType(tokens[i+1]) {
|
||||||
|
ret = append(ret, &Token{
|
||||||
|
Group: &TokenGroup{
|
||||||
|
Type: TokenGroupScalarTag,
|
||||||
|
Tokens: []*Token{tk, tokens[i+1]},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
i++
|
||||||
|
} else {
|
||||||
|
ret = append(ret, tk)
|
||||||
|
}
|
||||||
|
case token.MergeTag:
|
||||||
|
if len(tokens) <= i+1 {
|
||||||
|
ret = append(ret, tk)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if tk.Line() != tokens[i+1].Line() {
|
||||||
|
ret = append(ret, tk)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if tokens[i+1].GroupType() == TokenGroupAnchorName {
|
||||||
|
ret = append(ret, tk)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if tokens[i+1].Type() != token.MergeKeyType {
|
||||||
|
return nil, errors.ErrSyntax("could not find merge key", tokens[i+1].RawToken())
|
||||||
|
}
|
||||||
|
ret = append(ret, &Token{
|
||||||
|
Group: &TokenGroup{
|
||||||
|
Type: TokenGroupScalarTag,
|
||||||
|
Tokens: []*Token{tk, tokens[i+1]},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
i++
|
||||||
|
default:
|
||||||
|
ret = append(ret, tk)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if len(tokens) <= i+1 {
|
||||||
|
ret = append(ret, tk)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if tk.Line() != tokens[i+1].Line() {
|
||||||
|
ret = append(ret, tk)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if tokens[i+1].GroupType() == TokenGroupAnchorName {
|
||||||
|
ret = append(ret, tk)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if isFlowType(tokens[i+1]) {
|
||||||
|
ret = append(ret, tk)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ret = append(ret, &Token{
|
||||||
|
Group: &TokenGroup{
|
||||||
|
Type: TokenGroupScalarTag,
|
||||||
|
Tokens: []*Token{tk, tokens[i+1]},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func createAnchorWithScalarTagTokenGroups(tokens []*Token) ([]*Token, error) {
|
||||||
|
ret := make([]*Token, 0, len(tokens))
|
||||||
|
for i := 0; i < len(tokens); i++ {
|
||||||
|
tk := tokens[i]
|
||||||
|
switch tk.GroupType() {
|
||||||
|
case TokenGroupAnchorName:
|
||||||
|
if i+1 >= len(tokens) {
|
||||||
|
return nil, errors.ErrSyntax("undefined anchor value", tk.RawToken())
|
||||||
|
}
|
||||||
|
valueTk := tokens[i+1]
|
||||||
|
if tk.Line() == valueTk.Line() && valueTk.GroupType() == TokenGroupScalarTag {
|
||||||
|
ret = append(ret, &Token{
|
||||||
|
Group: &TokenGroup{
|
||||||
|
Type: TokenGroupAnchor,
|
||||||
|
Tokens: []*Token{tk, tokens[i+1]},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
i++
|
||||||
|
} else {
|
||||||
|
ret = append(ret, tk)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
ret = append(ret, tk)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func createMapKeyTokenGroups(tokens []*Token) ([]*Token, error) {
|
||||||
|
tks, err := createMapKeyByMappingKey(tokens)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return createMapKeyByMappingValue(tks)
|
||||||
|
}
|
||||||
|
|
||||||
|
func createMapKeyByMappingKey(tokens []*Token) ([]*Token, error) {
|
||||||
|
ret := make([]*Token, 0, len(tokens))
|
||||||
|
for i := 0; i < len(tokens); i++ {
|
||||||
|
tk := tokens[i]
|
||||||
|
switch tk.Type() {
|
||||||
|
case token.MappingKeyType:
|
||||||
|
if i+1 >= len(tokens) {
|
||||||
|
return nil, errors.ErrSyntax("undefined map key", tk.RawToken())
|
||||||
|
}
|
||||||
|
ret = append(ret, &Token{
|
||||||
|
Group: &TokenGroup{
|
||||||
|
Type: TokenGroupMapKey,
|
||||||
|
Tokens: []*Token{tk, tokens[i+1]},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
i++
|
||||||
|
default:
|
||||||
|
ret = append(ret, tk)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func createMapKeyByMappingValue(tokens []*Token) ([]*Token, error) {
|
||||||
|
ret := make([]*Token, 0, len(tokens))
|
||||||
|
for i := 0; i < len(tokens); i++ {
|
||||||
|
tk := tokens[i]
|
||||||
|
switch tk.Type() {
|
||||||
|
case token.MappingValueType:
|
||||||
|
if i == 0 {
|
||||||
|
return nil, errors.ErrSyntax("unexpected key name", tk.RawToken())
|
||||||
|
}
|
||||||
|
mapKeyTk := tokens[i-1]
|
||||||
|
if isNotMapKeyType(mapKeyTk) {
|
||||||
|
return nil, errors.ErrSyntax("found an invalid key for this map", tokens[i].RawToken())
|
||||||
|
}
|
||||||
|
newTk := &Token{Token: mapKeyTk.Token, Group: mapKeyTk.Group}
|
||||||
|
mapKeyTk.Token = nil
|
||||||
|
mapKeyTk.Group = &TokenGroup{
|
||||||
|
Type: TokenGroupMapKey,
|
||||||
|
Tokens: []*Token{newTk, tk},
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
ret = append(ret, tk)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func createMapKeyValueTokenGroups(tokens []*Token) []*Token {
|
||||||
|
ret := make([]*Token, 0, len(tokens))
|
||||||
|
for i := 0; i < len(tokens); i++ {
|
||||||
|
tk := tokens[i]
|
||||||
|
switch tk.GroupType() {
|
||||||
|
case TokenGroupMapKey:
|
||||||
|
if len(tokens) <= i+1 {
|
||||||
|
ret = append(ret, tk)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
valueTk := tokens[i+1]
|
||||||
|
if tk.Line() != valueTk.Line() {
|
||||||
|
ret = append(ret, tk)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if valueTk.GroupType() == TokenGroupAnchorName {
|
||||||
|
ret = append(ret, tk)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if valueTk.Type() == token.TagType && valueTk.GroupType() != TokenGroupScalarTag {
|
||||||
|
ret = append(ret, tk)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if isScalarType(valueTk) || valueTk.Type() == token.TagType {
|
||||||
|
ret = append(ret, &Token{
|
||||||
|
Group: &TokenGroup{
|
||||||
|
Type: TokenGroupMapKeyValue,
|
||||||
|
Tokens: []*Token{tk, valueTk},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
i++
|
||||||
|
} else {
|
||||||
|
ret = append(ret, tk)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
ret = append(ret, tk)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func createDirectiveTokenGroups(tokens []*Token) ([]*Token, error) {
|
||||||
|
ret := make([]*Token, 0, len(tokens))
|
||||||
|
for i := 0; i < len(tokens); i++ {
|
||||||
|
tk := tokens[i]
|
||||||
|
switch tk.Type() {
|
||||||
|
case token.DirectiveType:
|
||||||
|
if i+1 >= len(tokens) {
|
||||||
|
return nil, errors.ErrSyntax("undefined directive value", tk.RawToken())
|
||||||
|
}
|
||||||
|
directiveName := &Token{
|
||||||
|
Group: &TokenGroup{
|
||||||
|
Type: TokenGroupDirectiveName,
|
||||||
|
Tokens: []*Token{tk, tokens[i+1]},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
var valueTks []*Token
|
||||||
|
for j := i + 1; j < len(tokens); j++ {
|
||||||
|
if tokens[j].Line() != tk.Line() {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
valueTks = append(valueTks, tokens[j])
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
if i+1 >= len(tokens) || tokens[i+1].Type() != token.DocumentHeaderType {
|
||||||
|
return nil, errors.ErrSyntax("unexpected directive value. document not started", tk.RawToken())
|
||||||
|
}
|
||||||
|
if len(valueTks) != 0 {
|
||||||
|
ret = append(ret, &Token{
|
||||||
|
Group: &TokenGroup{
|
||||||
|
Type: TokenGroupDirective,
|
||||||
|
Tokens: append([]*Token{directiveName}, valueTks...),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
ret = append(ret, directiveName)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
ret = append(ret, tk)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func createDocumentTokens(tokens []*Token) ([]*Token, error) {
|
||||||
|
var ret []*Token
|
||||||
|
for i := 0; i < len(tokens); i++ {
|
||||||
|
tk := tokens[i]
|
||||||
|
switch tk.Type() {
|
||||||
|
case token.DocumentHeaderType:
|
||||||
|
if i != 0 {
|
||||||
|
ret = append(ret, &Token{
|
||||||
|
Group: &TokenGroup{Tokens: tokens[:i]},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if i+1 == len(tokens) {
|
||||||
|
// if current token is last token, add DocumentHeader only tokens to ret.
|
||||||
|
return append(ret, &Token{
|
||||||
|
Group: &TokenGroup{
|
||||||
|
Type: TokenGroupDocument,
|
||||||
|
Tokens: []*Token{tk},
|
||||||
|
},
|
||||||
|
}), nil
|
||||||
|
}
|
||||||
|
if tokens[i+1].Type() == token.DocumentHeaderType {
|
||||||
|
return append(ret, &Token{
|
||||||
|
Group: &TokenGroup{
|
||||||
|
Type: TokenGroupDocument,
|
||||||
|
Tokens: []*Token{tk},
|
||||||
|
},
|
||||||
|
}), nil
|
||||||
|
}
|
||||||
|
if tokens[i].Line() == tokens[i+1].Line() {
|
||||||
|
switch tokens[i+1].GroupType() {
|
||||||
|
case TokenGroupMapKey, TokenGroupMapKeyValue:
|
||||||
|
return nil, errors.ErrSyntax("value cannot be placed after document separator", tokens[i+1].RawToken())
|
||||||
|
}
|
||||||
|
switch tokens[i+1].Type() {
|
||||||
|
case token.SequenceEntryType:
|
||||||
|
return nil, errors.ErrSyntax("value cannot be placed after document separator", tokens[i+1].RawToken())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tks, err := createDocumentTokens(tokens[i+1:])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(tks) != 0 {
|
||||||
|
tks[0].SetGroupType(TokenGroupDocument)
|
||||||
|
tks[0].Group.Tokens = append([]*Token{tk}, tks[0].Group.Tokens...)
|
||||||
|
return append(ret, tks...), nil
|
||||||
|
}
|
||||||
|
return append(ret, &Token{
|
||||||
|
Group: &TokenGroup{
|
||||||
|
Type: TokenGroupDocument,
|
||||||
|
Tokens: []*Token{tk},
|
||||||
|
},
|
||||||
|
}), nil
|
||||||
|
case token.DocumentEndType:
|
||||||
|
if i != 0 {
|
||||||
|
ret = append(ret, &Token{
|
||||||
|
Group: &TokenGroup{
|
||||||
|
Type: TokenGroupDocument,
|
||||||
|
Tokens: tokens[0 : i+1],
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if i+1 == len(tokens) {
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
if isScalarType(tokens[i+1]) {
|
||||||
|
return nil, errors.ErrSyntax("unexpected end content", tokens[i+1].RawToken())
|
||||||
|
}
|
||||||
|
|
||||||
|
tks, err := createDocumentTokens(tokens[i+1:])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return append(ret, tks...), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return append(ret, &Token{
|
||||||
|
Group: &TokenGroup{
|
||||||
|
Type: TokenGroupDocument,
|
||||||
|
Tokens: tokens,
|
||||||
|
},
|
||||||
|
}), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func isScalarType(tk *Token) bool {
|
||||||
|
switch tk.GroupType() {
|
||||||
|
case TokenGroupMapKey, TokenGroupMapKeyValue:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
typ := tk.Type()
|
||||||
|
return typ == token.AnchorType ||
|
||||||
|
typ == token.AliasType ||
|
||||||
|
typ == token.LiteralType ||
|
||||||
|
typ == token.FoldedType ||
|
||||||
|
typ == token.NullType ||
|
||||||
|
typ == token.ImplicitNullType ||
|
||||||
|
typ == token.BoolType ||
|
||||||
|
typ == token.IntegerType ||
|
||||||
|
typ == token.BinaryIntegerType ||
|
||||||
|
typ == token.OctetIntegerType ||
|
||||||
|
typ == token.HexIntegerType ||
|
||||||
|
typ == token.FloatType ||
|
||||||
|
typ == token.InfinityType ||
|
||||||
|
typ == token.NanType ||
|
||||||
|
typ == token.StringType ||
|
||||||
|
typ == token.SingleQuoteType ||
|
||||||
|
typ == token.DoubleQuoteType
|
||||||
|
}
|
||||||
|
|
||||||
|
func isNotMapKeyType(tk *Token) bool {
|
||||||
|
typ := tk.Type()
|
||||||
|
return typ == token.DirectiveType ||
|
||||||
|
typ == token.DocumentHeaderType ||
|
||||||
|
typ == token.DocumentEndType ||
|
||||||
|
typ == token.CollectEntryType ||
|
||||||
|
typ == token.MappingStartType ||
|
||||||
|
typ == token.MappingValueType ||
|
||||||
|
typ == token.MappingEndType ||
|
||||||
|
typ == token.SequenceStartType ||
|
||||||
|
typ == token.SequenceEntryType ||
|
||||||
|
typ == token.SequenceEndType
|
||||||
|
}
|
||||||
|
|
||||||
|
func isFlowType(tk *Token) bool {
|
||||||
|
typ := tk.Type()
|
||||||
|
return typ == token.MappingStartType ||
|
||||||
|
typ == token.MappingEndType ||
|
||||||
|
typ == token.SequenceStartType ||
|
||||||
|
typ == token.SequenceEntryType
|
||||||
|
}
|
||||||
835
vendor/github.com/goccy/go-yaml/path.go
generated
vendored
Normal file
835
vendor/github.com/goccy/go-yaml/path.go
generated
vendored
Normal file
@@ -0,0 +1,835 @@
|
|||||||
|
package yaml
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/goccy/go-yaml/ast"
|
||||||
|
"github.com/goccy/go-yaml/parser"
|
||||||
|
"github.com/goccy/go-yaml/printer"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PathString create Path from string
|
||||||
|
//
|
||||||
|
// YAMLPath rule
|
||||||
|
// $ : the root object/element
|
||||||
|
// . : child operator
|
||||||
|
// .. : recursive descent
|
||||||
|
// [num] : object/element of array by number
|
||||||
|
// [*] : all objects/elements for array.
|
||||||
|
//
|
||||||
|
// If you want to use reserved characters such as `.` and `*` as a key name,
|
||||||
|
// enclose them in single quotation as follows ( $.foo.'bar.baz-*'.hoge ).
|
||||||
|
// If you want to use a single quote with reserved characters, escape it with `\` ( $.foo.'bar.baz\'s value'.hoge ).
|
||||||
|
func PathString(s string) (*Path, error) {
|
||||||
|
buf := []rune(s)
|
||||||
|
length := len(buf)
|
||||||
|
cursor := 0
|
||||||
|
builder := &PathBuilder{}
|
||||||
|
for cursor < length {
|
||||||
|
c := buf[cursor]
|
||||||
|
switch c {
|
||||||
|
case '$':
|
||||||
|
builder = builder.Root()
|
||||||
|
cursor++
|
||||||
|
case '.':
|
||||||
|
b, buf, c, err := parsePathDot(builder, buf, cursor)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
length = len(buf)
|
||||||
|
builder = b
|
||||||
|
cursor = c
|
||||||
|
case '[':
|
||||||
|
b, buf, c, err := parsePathIndex(builder, buf, cursor)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
length = len(buf)
|
||||||
|
builder = b
|
||||||
|
cursor = c
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("invalid path at %d: %w", cursor, ErrInvalidPathString)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return builder.Build(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parsePathRecursive(b *PathBuilder, buf []rune, cursor int) (*PathBuilder, []rune, int, error) {
|
||||||
|
length := len(buf)
|
||||||
|
cursor += 2 // skip .. characters
|
||||||
|
start := cursor
|
||||||
|
for ; cursor < length; cursor++ {
|
||||||
|
c := buf[cursor]
|
||||||
|
switch c {
|
||||||
|
case '$':
|
||||||
|
return nil, nil, 0, fmt.Errorf("specified '$' after '..' character: %w", ErrInvalidPathString)
|
||||||
|
case '*':
|
||||||
|
return nil, nil, 0, fmt.Errorf("specified '*' after '..' character: %w", ErrInvalidPathString)
|
||||||
|
case '.', '[':
|
||||||
|
goto end
|
||||||
|
case ']':
|
||||||
|
return nil, nil, 0, fmt.Errorf("specified ']' after '..' character: %w", ErrInvalidPathString)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
end:
|
||||||
|
if start == cursor {
|
||||||
|
return nil, nil, 0, fmt.Errorf("not found recursive selector: %w", ErrInvalidPathString)
|
||||||
|
}
|
||||||
|
return b.Recursive(string(buf[start:cursor])), buf, cursor, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parsePathDot(b *PathBuilder, buf []rune, cursor int) (*PathBuilder, []rune, int, error) {
|
||||||
|
if b.root == nil || b.node == nil {
|
||||||
|
return nil, nil, 0, fmt.Errorf("required '$' character at first: %w", ErrInvalidPathString)
|
||||||
|
}
|
||||||
|
length := len(buf)
|
||||||
|
if cursor+1 < length && buf[cursor+1] == '.' {
|
||||||
|
b, buf, c, err := parsePathRecursive(b, buf, cursor)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, 0, err
|
||||||
|
}
|
||||||
|
return b, buf, c, nil
|
||||||
|
}
|
||||||
|
cursor++ // skip . character
|
||||||
|
start := cursor
|
||||||
|
|
||||||
|
// if started single quote, looking for end single quote char
|
||||||
|
if cursor < length && buf[cursor] == '\'' {
|
||||||
|
return parseQuotedKey(b, buf, cursor)
|
||||||
|
}
|
||||||
|
for ; cursor < length; cursor++ {
|
||||||
|
c := buf[cursor]
|
||||||
|
switch c {
|
||||||
|
case '$':
|
||||||
|
return nil, nil, 0, fmt.Errorf("specified '$' after '.' character: %w", ErrInvalidPathString)
|
||||||
|
case '*':
|
||||||
|
return nil, nil, 0, fmt.Errorf("specified '*' after '.' character: %w", ErrInvalidPathString)
|
||||||
|
case '.', '[':
|
||||||
|
goto end
|
||||||
|
case ']':
|
||||||
|
return nil, nil, 0, fmt.Errorf("specified ']' after '.' character: %w", ErrInvalidPathString)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
end:
|
||||||
|
if start == cursor {
|
||||||
|
return nil, nil, 0, fmt.Errorf("could not find by empty key: %w", ErrInvalidPathString)
|
||||||
|
}
|
||||||
|
return b.child(string(buf[start:cursor])), buf, cursor, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseQuotedKey(b *PathBuilder, buf []rune, cursor int) (*PathBuilder, []rune, int, error) {
|
||||||
|
if b.root == nil || b.node == nil {
|
||||||
|
return nil, nil, 0, fmt.Errorf("required '$' character at first: %w", ErrInvalidPathString)
|
||||||
|
}
|
||||||
|
|
||||||
|
cursor++ // skip single quote
|
||||||
|
start := cursor
|
||||||
|
length := len(buf)
|
||||||
|
var foundEndDelim bool
|
||||||
|
for ; cursor < length; cursor++ {
|
||||||
|
switch buf[cursor] {
|
||||||
|
case '\\':
|
||||||
|
buf = append(append([]rune{}, buf[:cursor]...), buf[cursor+1:]...)
|
||||||
|
length = len(buf)
|
||||||
|
case '\'':
|
||||||
|
foundEndDelim = true
|
||||||
|
goto end
|
||||||
|
}
|
||||||
|
}
|
||||||
|
end:
|
||||||
|
if !foundEndDelim {
|
||||||
|
return nil, nil, 0, fmt.Errorf("could not find end delimiter for key: %w", ErrInvalidPathString)
|
||||||
|
}
|
||||||
|
if start == cursor {
|
||||||
|
return nil, nil, 0, fmt.Errorf("could not find by empty key: %w", ErrInvalidPathString)
|
||||||
|
}
|
||||||
|
selector := buf[start:cursor]
|
||||||
|
cursor++
|
||||||
|
if cursor < length {
|
||||||
|
switch buf[cursor] {
|
||||||
|
case '$':
|
||||||
|
return nil, nil, 0, fmt.Errorf("specified '$' after '.' character: %w", ErrInvalidPathString)
|
||||||
|
case '*':
|
||||||
|
return nil, nil, 0, fmt.Errorf("specified '*' after '.' character: %w", ErrInvalidPathString)
|
||||||
|
case ']':
|
||||||
|
return nil, nil, 0, fmt.Errorf("specified ']' after '.' character: %w", ErrInvalidPathString)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return b.child(string(selector)), buf, cursor, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parsePathIndex(b *PathBuilder, buf []rune, cursor int) (*PathBuilder, []rune, int, error) {
|
||||||
|
if b.root == nil || b.node == nil {
|
||||||
|
return nil, nil, 0, fmt.Errorf("required '$' character at first: %w", ErrInvalidPathString)
|
||||||
|
}
|
||||||
|
|
||||||
|
length := len(buf)
|
||||||
|
cursor++ // skip '[' character
|
||||||
|
if length <= cursor {
|
||||||
|
return nil, nil, 0, fmt.Errorf("unexpected end of YAML Path: %w", ErrInvalidPathString)
|
||||||
|
}
|
||||||
|
c := buf[cursor]
|
||||||
|
switch c {
|
||||||
|
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '*':
|
||||||
|
start := cursor
|
||||||
|
cursor++
|
||||||
|
for ; cursor < length; cursor++ {
|
||||||
|
c := buf[cursor]
|
||||||
|
switch c {
|
||||||
|
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if buf[cursor] != ']' {
|
||||||
|
return nil, nil, 0, fmt.Errorf("invalid character %s at %d: %w", string(buf[cursor]), cursor, ErrInvalidPathString)
|
||||||
|
}
|
||||||
|
numOrAll := string(buf[start:cursor])
|
||||||
|
if numOrAll == "*" {
|
||||||
|
return b.IndexAll(), buf, cursor + 1, nil
|
||||||
|
}
|
||||||
|
num, err := strconv.ParseInt(numOrAll, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, 0, err
|
||||||
|
}
|
||||||
|
return b.Index(uint(num)), buf, cursor + 1, nil
|
||||||
|
}
|
||||||
|
return nil, nil, 0, fmt.Errorf("invalid character %q at %d: %w", c, cursor, ErrInvalidPathString)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Path represent YAMLPath ( like a JSONPath ).
|
||||||
|
type Path struct {
|
||||||
|
node pathNode
|
||||||
|
}
|
||||||
|
|
||||||
|
// String path to text.
|
||||||
|
func (p *Path) String() string {
|
||||||
|
return p.node.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read decode from r and set extracted value by YAMLPath to v.
|
||||||
|
func (p *Path) Read(r io.Reader, v interface{}) error {
|
||||||
|
node, err := p.ReadNode(r)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := Unmarshal([]byte(node.String()), v); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadNode create AST from r and extract node by YAMLPath.
|
||||||
|
func (p *Path) ReadNode(r io.Reader) (ast.Node, error) {
|
||||||
|
if p.node == nil {
|
||||||
|
return nil, ErrInvalidPath
|
||||||
|
}
|
||||||
|
var buf bytes.Buffer
|
||||||
|
if _, err := io.Copy(&buf, r); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
f, err := parser.ParseBytes(buf.Bytes(), 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
node, err := p.FilterFile(f)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filter filter from target by YAMLPath and set it to v.
|
||||||
|
func (p *Path) Filter(target, v interface{}) error {
|
||||||
|
b, err := Marshal(target)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := p.Read(bytes.NewBuffer(b), v); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterFile filter from ast.File by YAMLPath.
|
||||||
|
func (p *Path) FilterFile(f *ast.File) (ast.Node, error) {
|
||||||
|
for _, doc := range f.Docs {
|
||||||
|
// For simplicity, directives cannot be the target of operations
|
||||||
|
if doc.Body != nil && doc.Body.Type() == ast.DirectiveType {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
node, err := p.FilterNode(doc.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if node != nil {
|
||||||
|
return node, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("failed to find path ( %s ): %w", p.node, ErrNotFoundNode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilterNode filter from node by YAMLPath.
|
||||||
|
func (p *Path) FilterNode(node ast.Node) (ast.Node, error) {
|
||||||
|
if node == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
n, err := p.node.filter(node)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MergeFromReader merge YAML text into ast.File.
|
||||||
|
func (p *Path) MergeFromReader(dst *ast.File, src io.Reader) error {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
if _, err := io.Copy(&buf, src); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
file, err := parser.ParseBytes(buf.Bytes(), 0)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := p.MergeFromFile(dst, file); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MergeFromFile merge ast.File into ast.File.
|
||||||
|
func (p *Path) MergeFromFile(dst *ast.File, src *ast.File) error {
|
||||||
|
base, err := p.FilterFile(dst)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, doc := range src.Docs {
|
||||||
|
if err := ast.Merge(base, doc); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MergeFromNode merge ast.Node into ast.File.
|
||||||
|
func (p *Path) MergeFromNode(dst *ast.File, src ast.Node) error {
|
||||||
|
base, err := p.FilterFile(dst)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := ast.Merge(base, src); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReplaceWithReader replace ast.File with io.Reader.
|
||||||
|
func (p *Path) ReplaceWithReader(dst *ast.File, src io.Reader) error {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
if _, err := io.Copy(&buf, src); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
file, err := parser.ParseBytes(buf.Bytes(), 0)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := p.ReplaceWithFile(dst, file); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReplaceWithFile replace ast.File with ast.File.
|
||||||
|
func (p *Path) ReplaceWithFile(dst *ast.File, src *ast.File) error {
|
||||||
|
for _, doc := range src.Docs {
|
||||||
|
if err := p.ReplaceWithNode(dst, doc); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReplaceNode replace ast.File with ast.Node.
|
||||||
|
func (p *Path) ReplaceWithNode(dst *ast.File, node ast.Node) error {
|
||||||
|
for _, doc := range dst.Docs {
|
||||||
|
// For simplicity, directives cannot be the target of operations
|
||||||
|
if doc.Body != nil && doc.Body.Type() == ast.DirectiveType {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if node.Type() == ast.DocumentType {
|
||||||
|
node = node.(*ast.DocumentNode).Body
|
||||||
|
}
|
||||||
|
if err := p.node.replace(doc.Body, node); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AnnotateSource add annotation to passed source ( see section 5.1 in README.md ).
|
||||||
|
func (p *Path) AnnotateSource(source []byte, colored bool) ([]byte, error) {
|
||||||
|
file, err := parser.ParseBytes(source, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
node, err := p.FilterFile(file)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var pp printer.Printer
|
||||||
|
return []byte(pp.PrintErrorToken(node.GetToken(), colored)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PathBuilder represent builder for YAMLPath.
|
||||||
|
type PathBuilder struct {
|
||||||
|
root *rootNode
|
||||||
|
node pathNode
|
||||||
|
}
|
||||||
|
|
||||||
|
// Root add '$' to current path.
|
||||||
|
func (b *PathBuilder) Root() *PathBuilder {
|
||||||
|
root := newRootNode()
|
||||||
|
return &PathBuilder{root: root, node: root}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IndexAll add '[*]' to current path.
|
||||||
|
func (b *PathBuilder) IndexAll() *PathBuilder {
|
||||||
|
b.node = b.node.chain(newIndexAllNode())
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// Recursive add '..selector' to current path.
|
||||||
|
func (b *PathBuilder) Recursive(selector string) *PathBuilder {
|
||||||
|
b.node = b.node.chain(newRecursiveNode(selector))
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *PathBuilder) containsReservedPathCharacters(path string) bool {
|
||||||
|
if strings.Contains(path, ".") {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if strings.Contains(path, "*") {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *PathBuilder) enclosedSingleQuote(name string) bool {
|
||||||
|
return strings.HasPrefix(name, "'") && strings.HasSuffix(name, "'")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *PathBuilder) normalizeSelectorName(name string) string {
|
||||||
|
if b.enclosedSingleQuote(name) {
|
||||||
|
// already escaped name
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
if b.containsReservedPathCharacters(name) {
|
||||||
|
escapedName := strings.ReplaceAll(name, `'`, `\'`)
|
||||||
|
return "'" + escapedName + "'"
|
||||||
|
}
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *PathBuilder) child(name string) *PathBuilder {
|
||||||
|
b.node = b.node.chain(newSelectorNode(name))
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// Child add '.name' to current path.
|
||||||
|
func (b *PathBuilder) Child(name string) *PathBuilder {
|
||||||
|
return b.child(b.normalizeSelectorName(name))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index add '[idx]' to current path.
|
||||||
|
func (b *PathBuilder) Index(idx uint) *PathBuilder {
|
||||||
|
b.node = b.node.chain(newIndexNode(idx))
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build build YAMLPath.
|
||||||
|
func (b *PathBuilder) Build() *Path {
|
||||||
|
return &Path{node: b.root}
|
||||||
|
}
|
||||||
|
|
||||||
|
type pathNode interface {
|
||||||
|
fmt.Stringer
|
||||||
|
chain(pathNode) pathNode
|
||||||
|
filter(ast.Node) (ast.Node, error)
|
||||||
|
replace(ast.Node, ast.Node) error
|
||||||
|
}
|
||||||
|
|
||||||
|
type basePathNode struct {
|
||||||
|
child pathNode
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *basePathNode) chain(node pathNode) pathNode {
|
||||||
|
n.child = node
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
type rootNode struct {
|
||||||
|
*basePathNode
|
||||||
|
}
|
||||||
|
|
||||||
|
func newRootNode() *rootNode {
|
||||||
|
return &rootNode{basePathNode: &basePathNode{}}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *rootNode) String() string {
|
||||||
|
s := "$"
|
||||||
|
if n.child != nil {
|
||||||
|
s += n.child.String()
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *rootNode) filter(node ast.Node) (ast.Node, error) {
|
||||||
|
if n.child == nil {
|
||||||
|
return node, nil
|
||||||
|
}
|
||||||
|
filtered, err := n.child.filter(node)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return filtered, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *rootNode) replace(node ast.Node, target ast.Node) error {
|
||||||
|
if n.child == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if err := n.child.replace(node, target); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type selectorNode struct {
|
||||||
|
*basePathNode
|
||||||
|
selector string
|
||||||
|
}
|
||||||
|
|
||||||
|
func newSelectorNode(selector string) *selectorNode {
|
||||||
|
return &selectorNode{
|
||||||
|
basePathNode: &basePathNode{},
|
||||||
|
selector: selector,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *selectorNode) filter(node ast.Node) (ast.Node, error) {
|
||||||
|
selector := n.selector
|
||||||
|
if len(selector) > 1 && selector[0] == '\'' && selector[len(selector)-1] == '\'' {
|
||||||
|
selector = selector[1 : len(selector)-1]
|
||||||
|
}
|
||||||
|
switch node.Type() {
|
||||||
|
case ast.MappingType:
|
||||||
|
for _, value := range node.(*ast.MappingNode).Values {
|
||||||
|
key := value.Key.GetToken().Value
|
||||||
|
if len(key) > 0 {
|
||||||
|
switch key[0] {
|
||||||
|
case '"':
|
||||||
|
var err error
|
||||||
|
key, err = strconv.Unquote(key)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
case '\'':
|
||||||
|
if len(key) > 1 && key[len(key)-1] == '\'' {
|
||||||
|
key = key[1 : len(key)-1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if key == selector {
|
||||||
|
if n.child == nil {
|
||||||
|
return value.Value, nil
|
||||||
|
}
|
||||||
|
filtered, err := n.child.filter(value.Value)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return filtered, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case ast.MappingValueType:
|
||||||
|
value, _ := node.(*ast.MappingValueNode)
|
||||||
|
key := value.Key.GetToken().Value
|
||||||
|
if key == selector {
|
||||||
|
if n.child == nil {
|
||||||
|
return value.Value, nil
|
||||||
|
}
|
||||||
|
filtered, err := n.child.filter(value.Value)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return filtered, nil
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("expected node type is map or map value. but got %s: %w", node.Type(), ErrInvalidQuery)
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *selectorNode) replaceMapValue(value *ast.MappingValueNode, target ast.Node) error {
|
||||||
|
key := value.Key.GetToken().Value
|
||||||
|
if key != n.selector {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if n.child == nil {
|
||||||
|
if err := value.Replace(target); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if err := n.child.replace(value.Value, target); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *selectorNode) replace(node ast.Node, target ast.Node) error {
|
||||||
|
switch node.Type() {
|
||||||
|
case ast.MappingType:
|
||||||
|
for _, value := range node.(*ast.MappingNode).Values {
|
||||||
|
if err := n.replaceMapValue(value, target); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case ast.MappingValueType:
|
||||||
|
value, _ := node.(*ast.MappingValueNode)
|
||||||
|
if err := n.replaceMapValue(value, target); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("expected node type is map or map value. but got %s: %w", node.Type(), ErrInvalidQuery)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *selectorNode) String() string {
|
||||||
|
var builder PathBuilder
|
||||||
|
selector := builder.normalizeSelectorName(n.selector)
|
||||||
|
s := fmt.Sprintf(".%s", selector)
|
||||||
|
if n.child != nil {
|
||||||
|
s += n.child.String()
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
type indexNode struct {
|
||||||
|
*basePathNode
|
||||||
|
selector uint
|
||||||
|
}
|
||||||
|
|
||||||
|
func newIndexNode(selector uint) *indexNode {
|
||||||
|
return &indexNode{
|
||||||
|
basePathNode: &basePathNode{},
|
||||||
|
selector: selector,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *indexNode) filter(node ast.Node) (ast.Node, error) {
|
||||||
|
if node.Type() != ast.SequenceType {
|
||||||
|
return nil, fmt.Errorf("expected sequence type node. but got %s: %w", node.Type(), ErrInvalidQuery)
|
||||||
|
}
|
||||||
|
sequence, _ := node.(*ast.SequenceNode)
|
||||||
|
if n.selector >= uint(len(sequence.Values)) {
|
||||||
|
return nil, fmt.Errorf("expected index is %d. but got sequences has %d items: %w", n.selector, len(sequence.Values), ErrInvalidQuery)
|
||||||
|
}
|
||||||
|
value := sequence.Values[n.selector]
|
||||||
|
if n.child == nil {
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
filtered, err := n.child.filter(value)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return filtered, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *indexNode) replace(node ast.Node, target ast.Node) error {
|
||||||
|
if node.Type() != ast.SequenceType {
|
||||||
|
return fmt.Errorf("expected sequence type node. but got %s: %w", node.Type(), ErrInvalidQuery)
|
||||||
|
}
|
||||||
|
sequence, _ := node.(*ast.SequenceNode)
|
||||||
|
if n.selector >= uint(len(sequence.Values)) {
|
||||||
|
return fmt.Errorf("expected index is %d. but got sequences has %d items: %w", n.selector, len(sequence.Values), ErrInvalidQuery)
|
||||||
|
}
|
||||||
|
if n.child == nil {
|
||||||
|
if err := sequence.Replace(int(n.selector), target); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if err := n.child.replace(sequence.Values[n.selector], target); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *indexNode) String() string {
|
||||||
|
s := fmt.Sprintf("[%d]", n.selector)
|
||||||
|
if n.child != nil {
|
||||||
|
s += n.child.String()
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
type indexAllNode struct {
|
||||||
|
*basePathNode
|
||||||
|
}
|
||||||
|
|
||||||
|
func newIndexAllNode() *indexAllNode {
|
||||||
|
return &indexAllNode{
|
||||||
|
basePathNode: &basePathNode{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *indexAllNode) String() string {
|
||||||
|
s := "[*]"
|
||||||
|
if n.child != nil {
|
||||||
|
s += n.child.String()
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *indexAllNode) filter(node ast.Node) (ast.Node, error) {
|
||||||
|
if node.Type() != ast.SequenceType {
|
||||||
|
return nil, fmt.Errorf("expected sequence type node. but got %s: %w", node.Type(), ErrInvalidQuery)
|
||||||
|
}
|
||||||
|
sequence, _ := node.(*ast.SequenceNode)
|
||||||
|
if n.child == nil {
|
||||||
|
return sequence, nil
|
||||||
|
}
|
||||||
|
out := *sequence
|
||||||
|
out.Values = []ast.Node{}
|
||||||
|
for _, value := range sequence.Values {
|
||||||
|
filtered, err := n.child.filter(value)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
out.Values = append(out.Values, filtered)
|
||||||
|
}
|
||||||
|
return &out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *indexAllNode) replace(node ast.Node, target ast.Node) error {
|
||||||
|
if node.Type() != ast.SequenceType {
|
||||||
|
return fmt.Errorf("expected sequence type node. but got %s: %w", node.Type(), ErrInvalidQuery)
|
||||||
|
}
|
||||||
|
sequence, _ := node.(*ast.SequenceNode)
|
||||||
|
if n.child == nil {
|
||||||
|
for idx := range sequence.Values {
|
||||||
|
if err := sequence.Replace(idx, target); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
for _, value := range sequence.Values {
|
||||||
|
if err := n.child.replace(value, target); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type recursiveNode struct {
|
||||||
|
*basePathNode
|
||||||
|
selector string
|
||||||
|
}
|
||||||
|
|
||||||
|
func newRecursiveNode(selector string) *recursiveNode {
|
||||||
|
return &recursiveNode{
|
||||||
|
basePathNode: &basePathNode{},
|
||||||
|
selector: selector,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *recursiveNode) String() string {
|
||||||
|
s := fmt.Sprintf("..%s", n.selector)
|
||||||
|
if n.child != nil {
|
||||||
|
s += n.child.String()
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *recursiveNode) filterNode(node ast.Node) (*ast.SequenceNode, error) {
|
||||||
|
sequence := &ast.SequenceNode{BaseNode: &ast.BaseNode{}}
|
||||||
|
switch typedNode := node.(type) {
|
||||||
|
case *ast.MappingNode:
|
||||||
|
for _, value := range typedNode.Values {
|
||||||
|
seq, err := n.filterNode(value)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
sequence.Values = append(sequence.Values, seq.Values...)
|
||||||
|
}
|
||||||
|
case *ast.MappingValueNode:
|
||||||
|
key := typedNode.Key.GetToken().Value
|
||||||
|
if n.selector == key {
|
||||||
|
sequence.Values = append(sequence.Values, typedNode.Value)
|
||||||
|
}
|
||||||
|
seq, err := n.filterNode(typedNode.Value)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
sequence.Values = append(sequence.Values, seq.Values...)
|
||||||
|
case *ast.SequenceNode:
|
||||||
|
for _, value := range typedNode.Values {
|
||||||
|
seq, err := n.filterNode(value)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
sequence.Values = append(sequence.Values, seq.Values...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return sequence, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *recursiveNode) filter(node ast.Node) (ast.Node, error) {
|
||||||
|
sequence, err := n.filterNode(node)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
sequence.Start = node.GetToken()
|
||||||
|
return sequence, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *recursiveNode) replaceNode(node ast.Node, target ast.Node) error {
|
||||||
|
switch typedNode := node.(type) {
|
||||||
|
case *ast.MappingNode:
|
||||||
|
for _, value := range typedNode.Values {
|
||||||
|
if err := n.replaceNode(value, target); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case *ast.MappingValueNode:
|
||||||
|
key := typedNode.Key.GetToken().Value
|
||||||
|
if n.selector == key {
|
||||||
|
if err := typedNode.Replace(target); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := n.replaceNode(typedNode.Value, target); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
case *ast.SequenceNode:
|
||||||
|
for _, value := range typedNode.Values {
|
||||||
|
if err := n.replaceNode(value, target); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *recursiveNode) replace(node ast.Node, target ast.Node) error {
|
||||||
|
if err := n.replaceNode(node, target); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
83
vendor/github.com/goccy/go-yaml/printer/color.go
generated
vendored
Normal file
83
vendor/github.com/goccy/go-yaml/printer/color.go
generated
vendored
Normal file
@@ -0,0 +1,83 @@
|
|||||||
|
// This source inspired by https://github.com/fatih/color.
|
||||||
|
package printer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ColorAttribute int
|
||||||
|
|
||||||
|
const (
|
||||||
|
ColorReset ColorAttribute = iota
|
||||||
|
ColorBold
|
||||||
|
ColorFaint
|
||||||
|
ColorItalic
|
||||||
|
ColorUnderline
|
||||||
|
ColorBlinkSlow
|
||||||
|
ColorBlinkRapid
|
||||||
|
ColorReverseVideo
|
||||||
|
ColorConcealed
|
||||||
|
ColorCrossedOut
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
ColorFgHiBlack ColorAttribute = iota + 90
|
||||||
|
ColorFgHiRed
|
||||||
|
ColorFgHiGreen
|
||||||
|
ColorFgHiYellow
|
||||||
|
ColorFgHiBlue
|
||||||
|
ColorFgHiMagenta
|
||||||
|
ColorFgHiCyan
|
||||||
|
ColorFgHiWhite
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
ColorResetBold ColorAttribute = iota + 22
|
||||||
|
ColorResetItalic
|
||||||
|
ColorResetUnderline
|
||||||
|
ColorResetBlinking
|
||||||
|
|
||||||
|
ColorResetReversed
|
||||||
|
ColorResetConcealed
|
||||||
|
ColorResetCrossedOut
|
||||||
|
)
|
||||||
|
|
||||||
|
const escape = "\x1b"
|
||||||
|
|
||||||
|
var colorResetMap = map[ColorAttribute]ColorAttribute{
|
||||||
|
ColorBold: ColorResetBold,
|
||||||
|
ColorFaint: ColorResetBold,
|
||||||
|
ColorItalic: ColorResetItalic,
|
||||||
|
ColorUnderline: ColorResetUnderline,
|
||||||
|
ColorBlinkSlow: ColorResetBlinking,
|
||||||
|
ColorBlinkRapid: ColorResetBlinking,
|
||||||
|
ColorReverseVideo: ColorResetReversed,
|
||||||
|
ColorConcealed: ColorResetConcealed,
|
||||||
|
ColorCrossedOut: ColorResetCrossedOut,
|
||||||
|
}
|
||||||
|
|
||||||
|
func format(attrs ...ColorAttribute) string {
|
||||||
|
format := make([]string, 0, len(attrs))
|
||||||
|
for _, attr := range attrs {
|
||||||
|
format = append(format, fmt.Sprint(attr))
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s[%sm", escape, strings.Join(format, ";"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func unformat(attrs ...ColorAttribute) string {
|
||||||
|
format := make([]string, len(attrs))
|
||||||
|
for _, attr := range attrs {
|
||||||
|
v := fmt.Sprint(ColorReset)
|
||||||
|
reset, exists := colorResetMap[attr]
|
||||||
|
if exists {
|
||||||
|
v = fmt.Sprint(reset)
|
||||||
|
}
|
||||||
|
format = append(format, v)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s[%sm", escape, strings.Join(format, ";"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func colorize(msg string, attrs ...ColorAttribute) string {
|
||||||
|
return format(attrs...) + msg + unformat(attrs...)
|
||||||
|
}
|
||||||
353
vendor/github.com/goccy/go-yaml/printer/printer.go
generated
vendored
Normal file
353
vendor/github.com/goccy/go-yaml/printer/printer.go
generated
vendored
Normal file
@@ -0,0 +1,353 @@
|
|||||||
|
package printer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/goccy/go-yaml/ast"
|
||||||
|
"github.com/goccy/go-yaml/token"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Property additional property set for each the token
|
||||||
|
type Property struct {
|
||||||
|
Prefix string
|
||||||
|
Suffix string
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrintFunc returns property instance
|
||||||
|
type PrintFunc func() *Property
|
||||||
|
|
||||||
|
// Printer create text from token collection or ast
|
||||||
|
type Printer struct {
|
||||||
|
LineNumber bool
|
||||||
|
LineNumberFormat func(num int) string
|
||||||
|
MapKey PrintFunc
|
||||||
|
Anchor PrintFunc
|
||||||
|
Alias PrintFunc
|
||||||
|
Bool PrintFunc
|
||||||
|
String PrintFunc
|
||||||
|
Number PrintFunc
|
||||||
|
Comment PrintFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
func defaultLineNumberFormat(num int) string {
|
||||||
|
return fmt.Sprintf("%2d | ", num)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Printer) property(tk *token.Token) *Property {
|
||||||
|
prop := &Property{}
|
||||||
|
switch tk.PreviousType() {
|
||||||
|
case token.AnchorType:
|
||||||
|
if p.Anchor != nil {
|
||||||
|
return p.Anchor()
|
||||||
|
}
|
||||||
|
return prop
|
||||||
|
case token.AliasType:
|
||||||
|
if p.Alias != nil {
|
||||||
|
return p.Alias()
|
||||||
|
}
|
||||||
|
return prop
|
||||||
|
}
|
||||||
|
switch tk.NextType() {
|
||||||
|
case token.MappingValueType:
|
||||||
|
if p.MapKey != nil {
|
||||||
|
return p.MapKey()
|
||||||
|
}
|
||||||
|
return prop
|
||||||
|
}
|
||||||
|
switch tk.Type {
|
||||||
|
case token.BoolType:
|
||||||
|
if p.Bool != nil {
|
||||||
|
return p.Bool()
|
||||||
|
}
|
||||||
|
return prop
|
||||||
|
case token.AnchorType:
|
||||||
|
if p.Anchor != nil {
|
||||||
|
return p.Anchor()
|
||||||
|
}
|
||||||
|
return prop
|
||||||
|
case token.AliasType:
|
||||||
|
if p.Anchor != nil {
|
||||||
|
return p.Alias()
|
||||||
|
}
|
||||||
|
return prop
|
||||||
|
case token.StringType, token.SingleQuoteType, token.DoubleQuoteType:
|
||||||
|
if p.String != nil {
|
||||||
|
return p.String()
|
||||||
|
}
|
||||||
|
return prop
|
||||||
|
case token.IntegerType, token.FloatType:
|
||||||
|
if p.Number != nil {
|
||||||
|
return p.Number()
|
||||||
|
}
|
||||||
|
return prop
|
||||||
|
case token.CommentType:
|
||||||
|
if p.Comment != nil {
|
||||||
|
return p.Comment()
|
||||||
|
}
|
||||||
|
return prop
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
return prop
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrintTokens create text from token collection
|
||||||
|
func (p *Printer) PrintTokens(tokens token.Tokens) string {
|
||||||
|
if len(tokens) == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
if p.LineNumber {
|
||||||
|
if p.LineNumberFormat == nil {
|
||||||
|
p.LineNumberFormat = defaultLineNumberFormat
|
||||||
|
}
|
||||||
|
}
|
||||||
|
texts := []string{}
|
||||||
|
lineNumber := tokens[0].Position.Line
|
||||||
|
for _, tk := range tokens {
|
||||||
|
lines := strings.Split(tk.Origin, "\n")
|
||||||
|
prop := p.property(tk)
|
||||||
|
header := ""
|
||||||
|
if p.LineNumber {
|
||||||
|
header = p.LineNumberFormat(lineNumber)
|
||||||
|
}
|
||||||
|
if len(lines) == 1 {
|
||||||
|
line := prop.Prefix + lines[0] + prop.Suffix
|
||||||
|
if len(texts) == 0 {
|
||||||
|
texts = append(texts, header+line)
|
||||||
|
lineNumber++
|
||||||
|
} else {
|
||||||
|
text := texts[len(texts)-1]
|
||||||
|
texts[len(texts)-1] = text + line
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for idx, src := range lines {
|
||||||
|
if p.LineNumber {
|
||||||
|
header = p.LineNumberFormat(lineNumber)
|
||||||
|
}
|
||||||
|
line := prop.Prefix + src + prop.Suffix
|
||||||
|
if idx == 0 {
|
||||||
|
if len(texts) == 0 {
|
||||||
|
texts = append(texts, header+line)
|
||||||
|
lineNumber++
|
||||||
|
} else {
|
||||||
|
text := texts[len(texts)-1]
|
||||||
|
texts[len(texts)-1] = text + line
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
texts = append(texts, fmt.Sprintf("%s%s", header, line))
|
||||||
|
lineNumber++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return strings.Join(texts, "\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrintNode create text from ast.Node
|
||||||
|
func (p *Printer) PrintNode(node ast.Node) []byte {
|
||||||
|
return []byte(fmt.Sprintf("%+v\n", node))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Printer) setDefaultColorSet() {
|
||||||
|
p.Bool = func() *Property {
|
||||||
|
return &Property{
|
||||||
|
Prefix: format(ColorFgHiMagenta),
|
||||||
|
Suffix: format(ColorReset),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p.Number = func() *Property {
|
||||||
|
return &Property{
|
||||||
|
Prefix: format(ColorFgHiMagenta),
|
||||||
|
Suffix: format(ColorReset),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p.MapKey = func() *Property {
|
||||||
|
return &Property{
|
||||||
|
Prefix: format(ColorFgHiCyan),
|
||||||
|
Suffix: format(ColorReset),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p.Anchor = func() *Property {
|
||||||
|
return &Property{
|
||||||
|
Prefix: format(ColorFgHiYellow),
|
||||||
|
Suffix: format(ColorReset),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p.Alias = func() *Property {
|
||||||
|
return &Property{
|
||||||
|
Prefix: format(ColorFgHiYellow),
|
||||||
|
Suffix: format(ColorReset),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p.String = func() *Property {
|
||||||
|
return &Property{
|
||||||
|
Prefix: format(ColorFgHiGreen),
|
||||||
|
Suffix: format(ColorReset),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p.Comment = func() *Property {
|
||||||
|
return &Property{
|
||||||
|
Prefix: format(ColorFgHiBlack),
|
||||||
|
Suffix: format(ColorReset),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Printer) PrintErrorMessage(msg string, isColored bool) string {
|
||||||
|
if isColored {
|
||||||
|
return fmt.Sprintf("%s%s%s",
|
||||||
|
format(ColorFgHiRed),
|
||||||
|
msg,
|
||||||
|
format(ColorReset),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
return msg
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Printer) removeLeftSideNewLineChar(src string) string {
|
||||||
|
return strings.TrimLeft(strings.TrimLeft(strings.TrimLeft(src, "\r"), "\n"), "\r\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Printer) removeRightSideNewLineChar(src string) string {
|
||||||
|
return strings.TrimRight(strings.TrimRight(strings.TrimRight(src, "\r"), "\n"), "\r\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Printer) removeRightSideWhiteSpaceChar(src string) string {
|
||||||
|
return p.removeRightSideNewLineChar(strings.TrimRight(src, " "))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Printer) newLineCount(s string) int {
|
||||||
|
src := []rune(s)
|
||||||
|
size := len(src)
|
||||||
|
cnt := 0
|
||||||
|
for i := 0; i < size; i++ {
|
||||||
|
c := src[i]
|
||||||
|
switch c {
|
||||||
|
case '\r':
|
||||||
|
if i+1 < size && src[i+1] == '\n' {
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
cnt++
|
||||||
|
case '\n':
|
||||||
|
cnt++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return cnt
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Printer) isNewLineLastChar(s string) bool {
|
||||||
|
for i := len(s) - 1; i > 0; i-- {
|
||||||
|
c := s[i]
|
||||||
|
switch c {
|
||||||
|
case ' ':
|
||||||
|
continue
|
||||||
|
case '\n', '\r':
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Printer) printBeforeTokens(tk *token.Token, minLine, extLine int) token.Tokens {
|
||||||
|
for tk.Prev != nil {
|
||||||
|
if tk.Prev.Position.Line < minLine {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
tk = tk.Prev
|
||||||
|
}
|
||||||
|
minTk := tk.Clone()
|
||||||
|
if minTk.Prev != nil {
|
||||||
|
// add white spaces to minTk by prev token
|
||||||
|
prev := minTk.Prev
|
||||||
|
whiteSpaceLen := len(prev.Origin) - len(strings.TrimRight(prev.Origin, " "))
|
||||||
|
minTk.Origin = strings.Repeat(" ", whiteSpaceLen) + minTk.Origin
|
||||||
|
}
|
||||||
|
minTk.Origin = p.removeLeftSideNewLineChar(minTk.Origin)
|
||||||
|
tokens := token.Tokens{minTk}
|
||||||
|
tk = minTk.Next
|
||||||
|
for tk != nil && tk.Position.Line <= extLine {
|
||||||
|
clonedTk := tk.Clone()
|
||||||
|
tokens.Add(clonedTk)
|
||||||
|
tk = clonedTk.Next
|
||||||
|
}
|
||||||
|
lastTk := tokens[len(tokens)-1]
|
||||||
|
trimmedOrigin := p.removeRightSideWhiteSpaceChar(lastTk.Origin)
|
||||||
|
suffix := lastTk.Origin[len(trimmedOrigin):]
|
||||||
|
lastTk.Origin = trimmedOrigin
|
||||||
|
|
||||||
|
if lastTk.Next != nil && len(suffix) > 1 {
|
||||||
|
next := lastTk.Next.Clone()
|
||||||
|
// add suffix to header of next token
|
||||||
|
if suffix[0] == '\n' || suffix[0] == '\r' {
|
||||||
|
suffix = suffix[1:]
|
||||||
|
}
|
||||||
|
next.Origin = suffix + next.Origin
|
||||||
|
lastTk.Next = next
|
||||||
|
}
|
||||||
|
return tokens
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Printer) printAfterTokens(tk *token.Token, maxLine int) token.Tokens {
|
||||||
|
tokens := token.Tokens{}
|
||||||
|
if tk == nil {
|
||||||
|
return tokens
|
||||||
|
}
|
||||||
|
if tk.Position.Line > maxLine {
|
||||||
|
return tokens
|
||||||
|
}
|
||||||
|
minTk := tk.Clone()
|
||||||
|
minTk.Origin = p.removeLeftSideNewLineChar(minTk.Origin)
|
||||||
|
tokens.Add(minTk)
|
||||||
|
tk = minTk.Next
|
||||||
|
for tk != nil && tk.Position.Line <= maxLine {
|
||||||
|
clonedTk := tk.Clone()
|
||||||
|
tokens.Add(clonedTk)
|
||||||
|
tk = clonedTk.Next
|
||||||
|
}
|
||||||
|
return tokens
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Printer) setupErrorTokenFormat(annotateLine int, isColored bool) {
|
||||||
|
prefix := func(annotateLine, num int) string {
|
||||||
|
if annotateLine == num {
|
||||||
|
return fmt.Sprintf("> %2d | ", num)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf(" %2d | ", num)
|
||||||
|
}
|
||||||
|
p.LineNumber = true
|
||||||
|
p.LineNumberFormat = func(num int) string {
|
||||||
|
if isColored {
|
||||||
|
return colorize(prefix(annotateLine, num), ColorBold, ColorFgHiWhite)
|
||||||
|
}
|
||||||
|
return prefix(annotateLine, num)
|
||||||
|
}
|
||||||
|
if isColored {
|
||||||
|
p.setDefaultColorSet()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Printer) PrintErrorToken(tk *token.Token, isColored bool) string {
|
||||||
|
errToken := tk
|
||||||
|
curLine := tk.Position.Line
|
||||||
|
curExtLine := curLine + p.newLineCount(p.removeLeftSideNewLineChar(tk.Origin))
|
||||||
|
if p.isNewLineLastChar(tk.Origin) {
|
||||||
|
// if last character ( exclude white space ) is new line character, ignore it.
|
||||||
|
curExtLine--
|
||||||
|
}
|
||||||
|
|
||||||
|
minLine := int(math.Max(float64(curLine-3), 1))
|
||||||
|
maxLine := curExtLine + 3
|
||||||
|
p.setupErrorTokenFormat(curLine, isColored)
|
||||||
|
|
||||||
|
beforeTokens := p.printBeforeTokens(tk, minLine, curExtLine)
|
||||||
|
lastTk := beforeTokens[len(beforeTokens)-1]
|
||||||
|
afterTokens := p.printAfterTokens(lastTk.Next, maxLine)
|
||||||
|
|
||||||
|
beforeSource := p.PrintTokens(beforeTokens)
|
||||||
|
prefixSpaceNum := len(fmt.Sprintf(" %2d | ", curLine))
|
||||||
|
annotateLine := strings.Repeat(" ", prefixSpaceNum+errToken.Position.Column-1) + "^"
|
||||||
|
afterSource := p.PrintTokens(afterTokens)
|
||||||
|
return fmt.Sprintf("%s\n%s\n%s", beforeSource, annotateLine, afterSource)
|
||||||
|
}
|
||||||
452
vendor/github.com/goccy/go-yaml/scanner/context.go
generated
vendored
Normal file
452
vendor/github.com/goccy/go-yaml/scanner/context.go
generated
vendored
Normal file
@@ -0,0 +1,452 @@
|
|||||||
|
package scanner
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/goccy/go-yaml/token"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Context context at scanning
|
||||||
|
type Context struct {
|
||||||
|
idx int
|
||||||
|
size int
|
||||||
|
notSpaceCharPos int
|
||||||
|
notSpaceOrgCharPos int
|
||||||
|
src []rune
|
||||||
|
buf []rune
|
||||||
|
obuf []rune
|
||||||
|
tokens token.Tokens
|
||||||
|
mstate *MultiLineState
|
||||||
|
}
|
||||||
|
|
||||||
|
type MultiLineState struct {
|
||||||
|
opt string
|
||||||
|
firstLineIndentColumn int
|
||||||
|
prevLineIndentColumn int
|
||||||
|
lineIndentColumn int
|
||||||
|
lastNotSpaceOnlyLineIndentColumn int
|
||||||
|
spaceOnlyIndentColumn int
|
||||||
|
foldedNewLine bool
|
||||||
|
isRawFolded bool
|
||||||
|
isLiteral bool
|
||||||
|
isFolded bool
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
ctxPool = sync.Pool{
|
||||||
|
New: func() interface{} {
|
||||||
|
return createContext()
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func createContext() *Context {
|
||||||
|
return &Context{
|
||||||
|
idx: 0,
|
||||||
|
tokens: token.Tokens{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newContext(src []rune) *Context {
|
||||||
|
ctx, _ := ctxPool.Get().(*Context)
|
||||||
|
ctx.reset(src)
|
||||||
|
return ctx
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Context) release() {
|
||||||
|
ctxPool.Put(c)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Context) clear() {
|
||||||
|
c.resetBuffer()
|
||||||
|
c.mstate = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Context) reset(src []rune) {
|
||||||
|
c.idx = 0
|
||||||
|
c.size = len(src)
|
||||||
|
c.src = src
|
||||||
|
c.tokens = c.tokens[:0]
|
||||||
|
c.resetBuffer()
|
||||||
|
c.mstate = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Context) resetBuffer() {
|
||||||
|
c.buf = c.buf[:0]
|
||||||
|
c.obuf = c.obuf[:0]
|
||||||
|
c.notSpaceCharPos = 0
|
||||||
|
c.notSpaceOrgCharPos = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Context) breakMultiLine() {
|
||||||
|
c.mstate = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Context) getMultiLineState() *MultiLineState {
|
||||||
|
return c.mstate
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Context) setLiteral(lastDelimColumn int, opt string) {
|
||||||
|
mstate := &MultiLineState{
|
||||||
|
isLiteral: true,
|
||||||
|
opt: opt,
|
||||||
|
}
|
||||||
|
indent := firstLineIndentColumnByOpt(opt)
|
||||||
|
if indent > 0 {
|
||||||
|
mstate.firstLineIndentColumn = lastDelimColumn + indent
|
||||||
|
}
|
||||||
|
c.mstate = mstate
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Context) setFolded(lastDelimColumn int, opt string) {
|
||||||
|
mstate := &MultiLineState{
|
||||||
|
isFolded: true,
|
||||||
|
opt: opt,
|
||||||
|
}
|
||||||
|
indent := firstLineIndentColumnByOpt(opt)
|
||||||
|
if indent > 0 {
|
||||||
|
mstate.firstLineIndentColumn = lastDelimColumn + indent
|
||||||
|
}
|
||||||
|
c.mstate = mstate
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Context) setRawFolded(column int) {
|
||||||
|
mstate := &MultiLineState{
|
||||||
|
isRawFolded: true,
|
||||||
|
}
|
||||||
|
mstate.updateIndentColumn(column)
|
||||||
|
c.mstate = mstate
|
||||||
|
}
|
||||||
|
|
||||||
|
func firstLineIndentColumnByOpt(opt string) int {
|
||||||
|
opt = strings.TrimPrefix(opt, "-")
|
||||||
|
opt = strings.TrimPrefix(opt, "+")
|
||||||
|
opt = strings.TrimSuffix(opt, "-")
|
||||||
|
opt = strings.TrimSuffix(opt, "+")
|
||||||
|
i, _ := strconv.ParseInt(opt, 10, 64)
|
||||||
|
return int(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *MultiLineState) lastDelimColumn() int {
|
||||||
|
if s.firstLineIndentColumn == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return s.firstLineIndentColumn - 1
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *MultiLineState) updateIndentColumn(column int) {
|
||||||
|
if s.firstLineIndentColumn == 0 {
|
||||||
|
s.firstLineIndentColumn = column
|
||||||
|
}
|
||||||
|
if s.lineIndentColumn == 0 {
|
||||||
|
s.lineIndentColumn = column
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *MultiLineState) updateSpaceOnlyIndentColumn(column int) {
|
||||||
|
if s.firstLineIndentColumn != 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.spaceOnlyIndentColumn = column
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *MultiLineState) validateIndentAfterSpaceOnly(column int) error {
|
||||||
|
if s.firstLineIndentColumn != 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if s.spaceOnlyIndentColumn > column {
|
||||||
|
return errors.New("invalid number of indent is specified after space only")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *MultiLineState) validateIndentColumn() error {
|
||||||
|
if firstLineIndentColumnByOpt(s.opt) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if s.firstLineIndentColumn > s.lineIndentColumn {
|
||||||
|
return errors.New("invalid number of indent is specified in the multi-line header")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *MultiLineState) updateNewLineState() {
|
||||||
|
s.prevLineIndentColumn = s.lineIndentColumn
|
||||||
|
if s.lineIndentColumn != 0 {
|
||||||
|
s.lastNotSpaceOnlyLineIndentColumn = s.lineIndentColumn
|
||||||
|
}
|
||||||
|
s.foldedNewLine = true
|
||||||
|
s.lineIndentColumn = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *MultiLineState) isIndentColumn(column int) bool {
|
||||||
|
if s.firstLineIndentColumn == 0 {
|
||||||
|
return column == 1
|
||||||
|
}
|
||||||
|
return s.firstLineIndentColumn > column
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *MultiLineState) addIndent(ctx *Context, column int) {
|
||||||
|
if s.firstLineIndentColumn == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the first line of the document has already been evaluated, the number is treated as the threshold, since the `firstLineIndentColumn` is a positive number.
|
||||||
|
if column < s.firstLineIndentColumn {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// `c.foldedNewLine` is a variable that is set to true for every newline.
|
||||||
|
if !s.isLiteral && s.foldedNewLine {
|
||||||
|
s.foldedNewLine = false
|
||||||
|
}
|
||||||
|
// Since addBuf ignore space character, add to the buffer directly.
|
||||||
|
ctx.buf = append(ctx.buf, ' ')
|
||||||
|
ctx.notSpaceCharPos = len(ctx.buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateNewLineInFolded if Folded or RawFolded context and the content on the current line starts at the same column as the previous line,
|
||||||
|
// treat the new-line-char as a space.
|
||||||
|
func (s *MultiLineState) updateNewLineInFolded(ctx *Context, column int) {
|
||||||
|
if s.isLiteral {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Folded or RawFolded.
|
||||||
|
|
||||||
|
if !s.foldedNewLine {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
lastChar rune
|
||||||
|
prevLastChar rune
|
||||||
|
)
|
||||||
|
if len(ctx.buf) != 0 {
|
||||||
|
lastChar = ctx.buf[len(ctx.buf)-1]
|
||||||
|
}
|
||||||
|
if len(ctx.buf) > 1 {
|
||||||
|
prevLastChar = ctx.buf[len(ctx.buf)-2]
|
||||||
|
}
|
||||||
|
if s.lineIndentColumn == s.prevLineIndentColumn {
|
||||||
|
// ---
|
||||||
|
// >
|
||||||
|
// a
|
||||||
|
// b
|
||||||
|
if lastChar == '\n' {
|
||||||
|
ctx.buf[len(ctx.buf)-1] = ' '
|
||||||
|
}
|
||||||
|
} else if s.prevLineIndentColumn == 0 && s.lastNotSpaceOnlyLineIndentColumn == column {
|
||||||
|
// if previous line is indent-space and new-line-char only, prevLineIndentColumn is zero.
|
||||||
|
// In this case, last new-line-char is removed.
|
||||||
|
// ---
|
||||||
|
// >
|
||||||
|
// a
|
||||||
|
//
|
||||||
|
// b
|
||||||
|
if lastChar == '\n' && prevLastChar == '\n' {
|
||||||
|
ctx.buf = ctx.buf[:len(ctx.buf)-1]
|
||||||
|
ctx.notSpaceCharPos = len(ctx.buf)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.foldedNewLine = false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *MultiLineState) hasTrimAllEndNewlineOpt() bool {
|
||||||
|
return strings.HasPrefix(s.opt, "-") || strings.HasSuffix(s.opt, "-") || s.isRawFolded
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *MultiLineState) hasKeepAllEndNewlineOpt() bool {
|
||||||
|
return strings.HasPrefix(s.opt, "+") || strings.HasSuffix(s.opt, "+")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Context) addToken(tk *token.Token) {
|
||||||
|
if tk == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c.tokens = append(c.tokens, tk)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Context) addBuf(r rune) {
|
||||||
|
if len(c.buf) == 0 && (r == ' ' || r == '\t') {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c.buf = append(c.buf, r)
|
||||||
|
if r != ' ' && r != '\t' {
|
||||||
|
c.notSpaceCharPos = len(c.buf)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Context) addBufWithTab(r rune) {
|
||||||
|
if len(c.buf) == 0 && r == ' ' {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c.buf = append(c.buf, r)
|
||||||
|
if r != ' ' {
|
||||||
|
c.notSpaceCharPos = len(c.buf)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Context) addOriginBuf(r rune) {
|
||||||
|
c.obuf = append(c.obuf, r)
|
||||||
|
if r != ' ' && r != '\t' {
|
||||||
|
c.notSpaceOrgCharPos = len(c.obuf)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Context) removeRightSpaceFromBuf() {
|
||||||
|
trimmedBuf := c.obuf[:c.notSpaceOrgCharPos]
|
||||||
|
buflen := len(trimmedBuf)
|
||||||
|
diff := len(c.obuf) - buflen
|
||||||
|
if diff > 0 {
|
||||||
|
c.obuf = c.obuf[:buflen]
|
||||||
|
c.buf = c.bufferedSrc()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Context) isEOS() bool {
|
||||||
|
return len(c.src)-1 <= c.idx
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Context) isNextEOS() bool {
|
||||||
|
return len(c.src) <= c.idx+1
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Context) next() bool {
|
||||||
|
return c.idx < c.size
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Context) source(s, e int) string {
|
||||||
|
return string(c.src[s:e])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Context) previousChar() rune {
|
||||||
|
if c.idx > 0 {
|
||||||
|
return c.src[c.idx-1]
|
||||||
|
}
|
||||||
|
return rune(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Context) currentChar() rune {
|
||||||
|
if c.size > c.idx {
|
||||||
|
return c.src[c.idx]
|
||||||
|
}
|
||||||
|
return rune(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Context) nextChar() rune {
|
||||||
|
if c.size > c.idx+1 {
|
||||||
|
return c.src[c.idx+1]
|
||||||
|
}
|
||||||
|
return rune(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Context) repeatNum(r rune) int {
|
||||||
|
cnt := 0
|
||||||
|
for i := c.idx; i < c.size; i++ {
|
||||||
|
if c.src[i] == r {
|
||||||
|
cnt++
|
||||||
|
} else {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return cnt
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Context) progress(num int) {
|
||||||
|
c.idx += num
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Context) existsBuffer() bool {
|
||||||
|
return len(c.bufferedSrc()) != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Context) isMultiLine() bool {
|
||||||
|
return c.mstate != nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Context) bufferedSrc() []rune {
|
||||||
|
src := c.buf[:c.notSpaceCharPos]
|
||||||
|
if c.isMultiLine() {
|
||||||
|
mstate := c.getMultiLineState()
|
||||||
|
// remove end '\n' character and trailing empty lines.
|
||||||
|
// https://yaml.org/spec/1.2.2/#8112-block-chomping-indicator
|
||||||
|
if mstate.hasTrimAllEndNewlineOpt() {
|
||||||
|
// If the '-' flag is specified, all trailing newline characters will be removed.
|
||||||
|
src = []rune(strings.TrimRight(string(src), "\n"))
|
||||||
|
} else if !mstate.hasKeepAllEndNewlineOpt() {
|
||||||
|
// Normally, all but one of the trailing newline characters are removed.
|
||||||
|
var newLineCharCount int
|
||||||
|
for i := len(src) - 1; i >= 0; i-- {
|
||||||
|
if src[i] == '\n' {
|
||||||
|
newLineCharCount++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
removedNewLineCharCount := newLineCharCount - 1
|
||||||
|
for removedNewLineCharCount > 0 {
|
||||||
|
src = []rune(strings.TrimSuffix(string(src), "\n"))
|
||||||
|
removedNewLineCharCount--
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the text ends with a space character, remove all of them.
|
||||||
|
if mstate.hasTrimAllEndNewlineOpt() {
|
||||||
|
src = []rune(strings.TrimRight(string(src), " "))
|
||||||
|
}
|
||||||
|
if string(src) == "\n" {
|
||||||
|
// If the content consists only of a newline,
|
||||||
|
// it can be considered as the document ending without any specified value,
|
||||||
|
// so it is treated as an empty string.
|
||||||
|
src = []rune{}
|
||||||
|
}
|
||||||
|
if mstate.hasKeepAllEndNewlineOpt() && len(src) == 0 {
|
||||||
|
src = []rune{'\n'}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return src
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Context) bufferedToken(pos *token.Position) *token.Token {
|
||||||
|
if c.idx == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
source := c.bufferedSrc()
|
||||||
|
if len(source) == 0 {
|
||||||
|
c.buf = c.buf[:0] // clear value's buffer only.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var tk *token.Token
|
||||||
|
if c.isMultiLine() {
|
||||||
|
tk = token.String(string(source), string(c.obuf), pos)
|
||||||
|
} else {
|
||||||
|
tk = token.New(string(source), string(c.obuf), pos)
|
||||||
|
}
|
||||||
|
c.setTokenTypeByPrevTag(tk)
|
||||||
|
c.resetBuffer()
|
||||||
|
return tk
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Context) setTokenTypeByPrevTag(tk *token.Token) {
|
||||||
|
lastTk := c.lastToken()
|
||||||
|
if lastTk == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if lastTk.Type != token.TagType {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
tag := token.ReservedTagKeyword(lastTk.Value)
|
||||||
|
if _, exists := token.ReservedTagKeywordMap[tag]; !exists {
|
||||||
|
tk.Type = token.StringType
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Context) lastToken() *token.Token {
|
||||||
|
if len(c.tokens) != 0 {
|
||||||
|
return c.tokens[len(c.tokens)-1]
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
17
vendor/github.com/goccy/go-yaml/scanner/error.go
generated
vendored
Normal file
17
vendor/github.com/goccy/go-yaml/scanner/error.go
generated
vendored
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
package scanner
|
||||||
|
|
||||||
|
import "github.com/goccy/go-yaml/token"
|
||||||
|
|
||||||
|
type InvalidTokenError struct {
|
||||||
|
Token *token.Token
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *InvalidTokenError) Error() string {
|
||||||
|
return e.Token.Error
|
||||||
|
}
|
||||||
|
|
||||||
|
func ErrInvalidToken(tk *token.Token) *InvalidTokenError {
|
||||||
|
return &InvalidTokenError{
|
||||||
|
Token: tk,
|
||||||
|
}
|
||||||
|
}
|
||||||
1536
vendor/github.com/goccy/go-yaml/scanner/scanner.go
generated
vendored
Normal file
1536
vendor/github.com/goccy/go-yaml/scanner/scanner.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
113
vendor/github.com/goccy/go-yaml/stdlib_quote.go
generated
vendored
Normal file
113
vendor/github.com/goccy/go-yaml/stdlib_quote.go
generated
vendored
Normal file
@@ -0,0 +1,113 @@
|
|||||||
|
// Copied and trimmed down from https://github.com/golang/go/blob/e3769299cd3484e018e0e2a6e1b95c2b18ce4f41/src/strconv/quote.go
|
||||||
|
// We want to use the standard library's private "quoteWith" function rather than write our own so that we get robust unicode support.
|
||||||
|
// Every private function called by quoteWith was copied.
|
||||||
|
// There are 2 modifications to simplify the code:
|
||||||
|
// 1. The unicode.IsPrint function was substituted for the custom implementation of IsPrint
|
||||||
|
// 2. All code paths reachable only when ASCIIonly or grphicOnly are set to true were removed.
|
||||||
|
|
||||||
|
// Copyright 2009 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package yaml
|
||||||
|
|
||||||
|
import (
|
||||||
|
"unicode"
|
||||||
|
"unicode/utf8"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
lowerhex = "0123456789abcdef"
|
||||||
|
)
|
||||||
|
|
||||||
|
func quoteWith(s string, quote byte) string {
|
||||||
|
return string(appendQuotedWith(make([]byte, 0, 3*len(s)/2), s, quote))
|
||||||
|
}
|
||||||
|
|
||||||
|
func appendQuotedWith(buf []byte, s string, quote byte) []byte {
|
||||||
|
// Often called with big strings, so preallocate. If there's quoting,
|
||||||
|
// this is conservative but still helps a lot.
|
||||||
|
if cap(buf)-len(buf) < len(s) {
|
||||||
|
nBuf := make([]byte, len(buf), len(buf)+1+len(s)+1)
|
||||||
|
copy(nBuf, buf)
|
||||||
|
buf = nBuf
|
||||||
|
}
|
||||||
|
buf = append(buf, quote)
|
||||||
|
for width := 0; len(s) > 0; s = s[width:] {
|
||||||
|
r := rune(s[0])
|
||||||
|
width = 1
|
||||||
|
if r >= utf8.RuneSelf {
|
||||||
|
r, width = utf8.DecodeRuneInString(s)
|
||||||
|
}
|
||||||
|
if width == 1 && r == utf8.RuneError {
|
||||||
|
buf = append(buf, `\x`...)
|
||||||
|
buf = append(buf, lowerhex[s[0]>>4])
|
||||||
|
buf = append(buf, lowerhex[s[0]&0xF])
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
buf = appendEscapedRune(buf, r, quote)
|
||||||
|
}
|
||||||
|
buf = append(buf, quote)
|
||||||
|
return buf
|
||||||
|
}
|
||||||
|
|
||||||
|
func appendEscapedRune(buf []byte, r rune, quote byte) []byte {
|
||||||
|
var runeTmp [utf8.UTFMax]byte
|
||||||
|
// goccy/go-yaml patch on top of the standard library's appendEscapedRune function.
|
||||||
|
//
|
||||||
|
// We use this to implement the YAML single-quoted string, where the only escape sequence is '', which represents a single quote.
|
||||||
|
// The below snippet from the standard library is for escaping e.g. \ with \\, which is not what we want for the single-quoted string.
|
||||||
|
//
|
||||||
|
// if r == rune(quote) || r == '\\' { // always backslashed
|
||||||
|
// buf = append(buf, '\\')
|
||||||
|
// buf = append(buf, byte(r))
|
||||||
|
// return buf
|
||||||
|
// }
|
||||||
|
if r == rune(quote) {
|
||||||
|
buf = append(buf, byte(r))
|
||||||
|
buf = append(buf, byte(r))
|
||||||
|
return buf
|
||||||
|
}
|
||||||
|
if unicode.IsPrint(r) {
|
||||||
|
n := utf8.EncodeRune(runeTmp[:], r)
|
||||||
|
buf = append(buf, runeTmp[:n]...)
|
||||||
|
return buf
|
||||||
|
}
|
||||||
|
switch r {
|
||||||
|
case '\a':
|
||||||
|
buf = append(buf, `\a`...)
|
||||||
|
case '\b':
|
||||||
|
buf = append(buf, `\b`...)
|
||||||
|
case '\f':
|
||||||
|
buf = append(buf, `\f`...)
|
||||||
|
case '\n':
|
||||||
|
buf = append(buf, `\n`...)
|
||||||
|
case '\r':
|
||||||
|
buf = append(buf, `\r`...)
|
||||||
|
case '\t':
|
||||||
|
buf = append(buf, `\t`...)
|
||||||
|
case '\v':
|
||||||
|
buf = append(buf, `\v`...)
|
||||||
|
default:
|
||||||
|
switch {
|
||||||
|
case r < ' ':
|
||||||
|
buf = append(buf, `\x`...)
|
||||||
|
buf = append(buf, lowerhex[byte(r)>>4])
|
||||||
|
buf = append(buf, lowerhex[byte(r)&0xF])
|
||||||
|
case r > utf8.MaxRune:
|
||||||
|
r = 0xFFFD
|
||||||
|
fallthrough
|
||||||
|
case r < 0x10000:
|
||||||
|
buf = append(buf, `\u`...)
|
||||||
|
for s := 12; s >= 0; s -= 4 {
|
||||||
|
buf = append(buf, lowerhex[r>>uint(s)&0xF])
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
buf = append(buf, `\U`...)
|
||||||
|
for s := 28; s >= 0; s -= 4 {
|
||||||
|
buf = append(buf, lowerhex[r>>uint(s)&0xF])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return buf
|
||||||
|
}
|
||||||
128
vendor/github.com/goccy/go-yaml/struct.go
generated
vendored
Normal file
128
vendor/github.com/goccy/go-yaml/struct.go
generated
vendored
Normal file
@@ -0,0 +1,128 @@
|
|||||||
|
package yaml
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// StructTagName tag keyword for Marshal/Unmarshal
|
||||||
|
StructTagName = "yaml"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StructField information for each the field in structure
|
||||||
|
type StructField struct {
|
||||||
|
FieldName string
|
||||||
|
RenderName string
|
||||||
|
AnchorName string
|
||||||
|
AliasName string
|
||||||
|
IsAutoAnchor bool
|
||||||
|
IsAutoAlias bool
|
||||||
|
IsOmitEmpty bool
|
||||||
|
IsOmitZero bool
|
||||||
|
IsFlow bool
|
||||||
|
IsInline bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func getTag(field reflect.StructField) string {
|
||||||
|
// If struct tag `yaml` exist, use that. If no `yaml`
|
||||||
|
// exists, but `json` does, use that and try the best to
|
||||||
|
// adhere to its rules
|
||||||
|
tag := field.Tag.Get(StructTagName)
|
||||||
|
if tag == "" {
|
||||||
|
tag = field.Tag.Get(`json`)
|
||||||
|
}
|
||||||
|
return tag
|
||||||
|
}
|
||||||
|
|
||||||
|
func structField(field reflect.StructField) *StructField {
|
||||||
|
tag := getTag(field)
|
||||||
|
fieldName := strings.ToLower(field.Name)
|
||||||
|
options := strings.Split(tag, ",")
|
||||||
|
if len(options) > 0 {
|
||||||
|
if options[0] != "" {
|
||||||
|
fieldName = options[0]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sf := &StructField{
|
||||||
|
FieldName: field.Name,
|
||||||
|
RenderName: fieldName,
|
||||||
|
}
|
||||||
|
if len(options) > 1 {
|
||||||
|
for _, opt := range options[1:] {
|
||||||
|
switch {
|
||||||
|
case opt == "omitempty":
|
||||||
|
sf.IsOmitEmpty = true
|
||||||
|
case opt == "omitzero":
|
||||||
|
sf.IsOmitZero = true
|
||||||
|
case opt == "flow":
|
||||||
|
sf.IsFlow = true
|
||||||
|
case opt == "inline":
|
||||||
|
sf.IsInline = true
|
||||||
|
case strings.HasPrefix(opt, "anchor"):
|
||||||
|
anchor := strings.Split(opt, "=")
|
||||||
|
if len(anchor) > 1 {
|
||||||
|
sf.AnchorName = anchor[1]
|
||||||
|
} else {
|
||||||
|
sf.IsAutoAnchor = true
|
||||||
|
}
|
||||||
|
case strings.HasPrefix(opt, "alias"):
|
||||||
|
alias := strings.Split(opt, "=")
|
||||||
|
if len(alias) > 1 {
|
||||||
|
sf.AliasName = alias[1]
|
||||||
|
} else {
|
||||||
|
sf.IsAutoAlias = true
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return sf
|
||||||
|
}
|
||||||
|
|
||||||
|
func isIgnoredStructField(field reflect.StructField) bool {
|
||||||
|
if field.PkgPath != "" && !field.Anonymous {
|
||||||
|
// private field
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return getTag(field) == "-"
|
||||||
|
}
|
||||||
|
|
||||||
|
type StructFieldMap map[string]*StructField
|
||||||
|
|
||||||
|
func (m StructFieldMap) isIncludedRenderName(name string) bool {
|
||||||
|
for _, v := range m {
|
||||||
|
if !v.IsInline && v.RenderName == name {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m StructFieldMap) hasMergeProperty() bool {
|
||||||
|
for _, v := range m {
|
||||||
|
if v.IsOmitEmpty && v.IsInline && v.IsAutoAlias {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func structFieldMap(structType reflect.Type) (StructFieldMap, error) {
|
||||||
|
fieldMap := StructFieldMap{}
|
||||||
|
renderNameMap := map[string]struct{}{}
|
||||||
|
for i := 0; i < structType.NumField(); i++ {
|
||||||
|
field := structType.Field(i)
|
||||||
|
if isIgnoredStructField(field) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
sf := structField(field)
|
||||||
|
if _, exists := renderNameMap[sf.RenderName]; exists {
|
||||||
|
return nil, fmt.Errorf("duplicated struct field name %s", sf.RenderName)
|
||||||
|
}
|
||||||
|
fieldMap[sf.FieldName] = sf
|
||||||
|
renderNameMap[sf.RenderName] = struct{}{}
|
||||||
|
}
|
||||||
|
return fieldMap, nil
|
||||||
|
}
|
||||||
1177
vendor/github.com/goccy/go-yaml/token/token.go
generated
vendored
Normal file
1177
vendor/github.com/goccy/go-yaml/token/token.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
13
vendor/github.com/goccy/go-yaml/validate.go
generated
vendored
Normal file
13
vendor/github.com/goccy/go-yaml/validate.go
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
package yaml
|
||||||
|
|
||||||
|
// StructValidator need to implement Struct method only
|
||||||
|
// ( see https://pkg.go.dev/github.com/go-playground/validator/v10#Validate.Struct )
|
||||||
|
type StructValidator interface {
|
||||||
|
Struct(interface{}) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// FieldError need to implement StructField method only
|
||||||
|
// ( see https://pkg.go.dev/github.com/go-playground/validator/v10#FieldError )
|
||||||
|
type FieldError interface {
|
||||||
|
StructField() string
|
||||||
|
}
|
||||||
357
vendor/github.com/goccy/go-yaml/yaml.go
generated
vendored
Normal file
357
vendor/github.com/goccy/go-yaml/yaml.go
generated
vendored
Normal file
@@ -0,0 +1,357 @@
|
|||||||
|
package yaml
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"reflect"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/goccy/go-yaml/ast"
|
||||||
|
"github.com/goccy/go-yaml/internal/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// BytesMarshaler interface may be implemented by types to customize their
|
||||||
|
// behavior when being marshaled into a YAML document. The returned value
|
||||||
|
// is marshaled in place of the original value implementing Marshaler.
|
||||||
|
//
|
||||||
|
// If an error is returned by MarshalYAML, the marshaling procedure stops
|
||||||
|
// and returns with the provided error.
|
||||||
|
type BytesMarshaler interface {
|
||||||
|
MarshalYAML() ([]byte, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BytesMarshalerContext interface use BytesMarshaler with context.Context.
|
||||||
|
type BytesMarshalerContext interface {
|
||||||
|
MarshalYAML(context.Context) ([]byte, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InterfaceMarshaler interface has MarshalYAML compatible with github.com/go-yaml/yaml package.
|
||||||
|
type InterfaceMarshaler interface {
|
||||||
|
MarshalYAML() (interface{}, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InterfaceMarshalerContext interface use InterfaceMarshaler with context.Context.
|
||||||
|
type InterfaceMarshalerContext interface {
|
||||||
|
MarshalYAML(context.Context) (interface{}, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BytesUnmarshaler interface may be implemented by types to customize their
|
||||||
|
// behavior when being unmarshaled from a YAML document.
|
||||||
|
type BytesUnmarshaler interface {
|
||||||
|
UnmarshalYAML([]byte) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// BytesUnmarshalerContext interface use BytesUnmarshaler with context.Context.
|
||||||
|
type BytesUnmarshalerContext interface {
|
||||||
|
UnmarshalYAML(context.Context, []byte) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// InterfaceUnmarshaler interface has UnmarshalYAML compatible with github.com/go-yaml/yaml package.
|
||||||
|
type InterfaceUnmarshaler interface {
|
||||||
|
UnmarshalYAML(func(interface{}) error) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// InterfaceUnmarshalerContext interface use InterfaceUnmarshaler with context.Context.
|
||||||
|
type InterfaceUnmarshalerContext interface {
|
||||||
|
UnmarshalYAML(context.Context, func(interface{}) error) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// NodeUnmarshaler interface is similar to BytesUnmarshaler but provide related AST node instead of raw YAML source.
|
||||||
|
type NodeUnmarshaler interface {
|
||||||
|
UnmarshalYAML(ast.Node) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// NodeUnmarshalerContext interface is similar to BytesUnmarshaler but provide related AST node instead of raw YAML source.
|
||||||
|
type NodeUnmarshalerContext interface {
|
||||||
|
UnmarshalYAML(context.Context, ast.Node) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// MapItem is an item in a MapSlice.
|
||||||
|
type MapItem struct {
|
||||||
|
Key, Value interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MapSlice encodes and decodes as a YAML map.
|
||||||
|
// The order of keys is preserved when encoding and decoding.
|
||||||
|
type MapSlice []MapItem
|
||||||
|
|
||||||
|
// ToMap convert to map[interface{}]interface{}.
|
||||||
|
func (s MapSlice) ToMap() map[interface{}]interface{} {
|
||||||
|
v := map[interface{}]interface{}{}
|
||||||
|
for _, item := range s {
|
||||||
|
v[item.Key] = item.Value
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Marshal serializes the value provided into a YAML document. The structure
|
||||||
|
// of the generated document will reflect the structure of the value itself.
|
||||||
|
// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
|
||||||
|
//
|
||||||
|
// Struct fields are only marshaled if they are exported (have an upper case
|
||||||
|
// first letter), and are marshaled using the field name lowercased as the
|
||||||
|
// default key. Custom keys may be defined via the "yaml" name in the field
|
||||||
|
// tag: the content preceding the first comma is used as the key, and the
|
||||||
|
// following comma-separated options are used to tweak the marshaling process.
|
||||||
|
// Conflicting names result in a runtime error.
|
||||||
|
//
|
||||||
|
// The field tag format accepted is:
|
||||||
|
//
|
||||||
|
// `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
|
||||||
|
//
|
||||||
|
// The following flags are currently supported:
|
||||||
|
//
|
||||||
|
// omitempty Only include the field if it's not set to the zero
|
||||||
|
// value for the type or to empty slices or maps.
|
||||||
|
// Zero valued structs will be omitted if all their public
|
||||||
|
// fields are zero, unless they implement an IsZero
|
||||||
|
// method (see the IsZeroer interface type), in which
|
||||||
|
// case the field will be included if that method returns true.
|
||||||
|
// Note that this definition is slightly different from the Go's
|
||||||
|
// encoding/json 'omitempty' definition. It combines some elements
|
||||||
|
// of 'omitempty' and 'omitzero'. See https://github.com/goccy/go-yaml/issues/695.
|
||||||
|
//
|
||||||
|
// omitzero The omitzero tag behaves in the same way as the interpretation of the omitzero tag in the encoding/json library.
|
||||||
|
// 1) If the field type has an "IsZero() bool" method, that will be used to determine whether the value is zero.
|
||||||
|
// 2) Otherwise, the value is zero if it is the zero value for its type.
|
||||||
|
//
|
||||||
|
// flow Marshal using a flow style (useful for structs,
|
||||||
|
// sequences and maps).
|
||||||
|
//
|
||||||
|
// inline Inline the field, which must be a struct or a map,
|
||||||
|
// causing all of its fields or keys to be processed as if
|
||||||
|
// they were part of the outer struct. For maps, keys must
|
||||||
|
// not conflict with the yaml keys of other struct fields.
|
||||||
|
//
|
||||||
|
// anchor Marshal with anchor. If want to define anchor name explicitly, use anchor=name style.
|
||||||
|
// Otherwise, if used 'anchor' name only, used the field name lowercased as the anchor name
|
||||||
|
//
|
||||||
|
// alias Marshal with alias. If want to define alias name explicitly, use alias=name style.
|
||||||
|
// Otherwise, If omitted alias name and the field type is pointer type,
|
||||||
|
// assigned anchor name automatically from same pointer address.
|
||||||
|
//
|
||||||
|
// In addition, if the key is "-", the field is ignored.
|
||||||
|
//
|
||||||
|
// For example:
|
||||||
|
//
|
||||||
|
// type T struct {
|
||||||
|
// F int `yaml:"a,omitempty"`
|
||||||
|
// B int
|
||||||
|
// }
|
||||||
|
// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
|
||||||
|
// yaml.Marshal(&T{F: 1}) // Returns "a: 1\nb: 0\n"
|
||||||
|
func Marshal(v interface{}) ([]byte, error) {
|
||||||
|
return MarshalWithOptions(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalWithOptions serializes the value provided into a YAML document with EncodeOptions.
|
||||||
|
func MarshalWithOptions(v interface{}, opts ...EncodeOption) ([]byte, error) {
|
||||||
|
return MarshalContext(context.Background(), v, opts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalContext serializes the value provided into a YAML document with context.Context and EncodeOptions.
|
||||||
|
func MarshalContext(ctx context.Context, v interface{}, opts ...EncodeOption) ([]byte, error) {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
if err := NewEncoder(&buf, opts...).EncodeContext(ctx, v); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return buf.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValueToNode convert from value to ast.Node.
|
||||||
|
func ValueToNode(v interface{}, opts ...EncodeOption) (ast.Node, error) {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
node, err := NewEncoder(&buf, opts...).EncodeToNode(v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal decodes the first document found within the in byte slice
|
||||||
|
// and assigns decoded values into the out value.
|
||||||
|
//
|
||||||
|
// Struct fields are only unmarshalled if they are exported (have an
|
||||||
|
// upper case first letter), and are unmarshalled using the field name
|
||||||
|
// lowercased as the default key. Custom keys may be defined via the
|
||||||
|
// "yaml" name in the field tag: the content preceding the first comma
|
||||||
|
// is used as the key, and the following comma-separated options are
|
||||||
|
// used to tweak the marshaling process (see Marshal).
|
||||||
|
// Conflicting names result in a runtime error.
|
||||||
|
//
|
||||||
|
// For example:
|
||||||
|
//
|
||||||
|
// type T struct {
|
||||||
|
// F int `yaml:"a,omitempty"`
|
||||||
|
// B int
|
||||||
|
// }
|
||||||
|
// var t T
|
||||||
|
// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
|
||||||
|
//
|
||||||
|
// See the documentation of Marshal for the format of tags and a list of
|
||||||
|
// supported tag options.
|
||||||
|
func Unmarshal(data []byte, v interface{}) error {
|
||||||
|
return UnmarshalWithOptions(data, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalWithOptions decodes with DecodeOptions the first document found within the in byte slice
|
||||||
|
// and assigns decoded values into the out value.
|
||||||
|
func UnmarshalWithOptions(data []byte, v interface{}, opts ...DecodeOption) error {
|
||||||
|
return UnmarshalContext(context.Background(), data, v, opts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalContext decodes with context.Context and DecodeOptions.
|
||||||
|
func UnmarshalContext(ctx context.Context, data []byte, v interface{}, opts ...DecodeOption) error {
|
||||||
|
dec := NewDecoder(bytes.NewBuffer(data), opts...)
|
||||||
|
if err := dec.DecodeContext(ctx, v); err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NodeToValue converts node to the value pointed to by v.
|
||||||
|
func NodeToValue(node ast.Node, v interface{}, opts ...DecodeOption) error {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
if err := NewDecoder(&buf, opts...).DecodeFromNode(node, v); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FormatError is a utility function that takes advantage of the metadata
|
||||||
|
// stored in the errors returned by this package's parser.
|
||||||
|
//
|
||||||
|
// If the second argument `colored` is true, the error message is colorized.
|
||||||
|
// If the third argument `inclSource` is true, the error message will
|
||||||
|
// contain snippets of the YAML source that was used.
|
||||||
|
func FormatError(e error, colored, inclSource bool) string {
|
||||||
|
var yamlErr Error
|
||||||
|
if errors.As(e, &yamlErr) {
|
||||||
|
return yamlErr.FormatError(colored, inclSource)
|
||||||
|
}
|
||||||
|
|
||||||
|
return e.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
// YAMLToJSON convert YAML bytes to JSON.
|
||||||
|
func YAMLToJSON(bytes []byte) ([]byte, error) {
|
||||||
|
var v interface{}
|
||||||
|
if err := UnmarshalWithOptions(bytes, &v, UseOrderedMap()); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
out, err := MarshalWithOptions(v, JSON())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// JSONToYAML convert JSON bytes to YAML.
|
||||||
|
func JSONToYAML(bytes []byte) ([]byte, error) {
|
||||||
|
var v interface{}
|
||||||
|
if err := UnmarshalWithOptions(bytes, &v, UseOrderedMap()); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
out, err := Marshal(v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
globalCustomMarshalerMu sync.Mutex
|
||||||
|
globalCustomUnmarshalerMu sync.Mutex
|
||||||
|
globalCustomMarshalerMap = map[reflect.Type]func(context.Context, interface{}) ([]byte, error){}
|
||||||
|
globalCustomUnmarshalerMap = map[reflect.Type]func(context.Context, interface{}, []byte) error{}
|
||||||
|
)
|
||||||
|
|
||||||
|
// RegisterCustomMarshaler overrides any encoding process for the type specified in generics.
|
||||||
|
// If you want to switch the behavior for each encoder, use `CustomMarshaler` defined as EncodeOption.
|
||||||
|
//
|
||||||
|
// NOTE: If type T implements MarshalYAML for pointer receiver, the type specified in RegisterCustomMarshaler must be *T.
|
||||||
|
// If RegisterCustomMarshaler and CustomMarshaler of EncodeOption are specified for the same type,
|
||||||
|
// the CustomMarshaler specified in EncodeOption takes precedence.
|
||||||
|
func RegisterCustomMarshaler[T any](marshaler func(T) ([]byte, error)) {
|
||||||
|
globalCustomMarshalerMu.Lock()
|
||||||
|
defer globalCustomMarshalerMu.Unlock()
|
||||||
|
|
||||||
|
var typ T
|
||||||
|
globalCustomMarshalerMap[reflect.TypeOf(typ)] = func(ctx context.Context, v interface{}) ([]byte, error) {
|
||||||
|
return marshaler(v.(T))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterCustomMarshalerContext overrides any encoding process for the type specified in generics.
|
||||||
|
// Similar to RegisterCustomMarshalerContext, but allows passing a context to the unmarshaler function.
|
||||||
|
func RegisterCustomMarshalerContext[T any](marshaler func(context.Context, T) ([]byte, error)) {
|
||||||
|
globalCustomMarshalerMu.Lock()
|
||||||
|
defer globalCustomMarshalerMu.Unlock()
|
||||||
|
|
||||||
|
var typ T
|
||||||
|
globalCustomMarshalerMap[reflect.TypeOf(typ)] = func(ctx context.Context, v interface{}) ([]byte, error) {
|
||||||
|
return marshaler(ctx, v.(T))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterCustomUnmarshaler overrides any decoding process for the type specified in generics.
|
||||||
|
// If you want to switch the behavior for each decoder, use `CustomUnmarshaler` defined as DecodeOption.
|
||||||
|
//
|
||||||
|
// NOTE: If RegisterCustomUnmarshaler and CustomUnmarshaler of DecodeOption are specified for the same type,
|
||||||
|
// the CustomUnmarshaler specified in DecodeOption takes precedence.
|
||||||
|
func RegisterCustomUnmarshaler[T any](unmarshaler func(*T, []byte) error) {
|
||||||
|
globalCustomUnmarshalerMu.Lock()
|
||||||
|
defer globalCustomUnmarshalerMu.Unlock()
|
||||||
|
|
||||||
|
var typ *T
|
||||||
|
globalCustomUnmarshalerMap[reflect.TypeOf(typ)] = func(ctx context.Context, v interface{}, b []byte) error {
|
||||||
|
return unmarshaler(v.(*T), b)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterCustomUnmarshalerContext overrides any decoding process for the type specified in generics.
|
||||||
|
// Similar to RegisterCustomUnmarshalerContext, but allows passing a context to the unmarshaler function.
|
||||||
|
func RegisterCustomUnmarshalerContext[T any](unmarshaler func(context.Context, *T, []byte) error) {
|
||||||
|
globalCustomUnmarshalerMu.Lock()
|
||||||
|
defer globalCustomUnmarshalerMu.Unlock()
|
||||||
|
|
||||||
|
var typ *T
|
||||||
|
globalCustomUnmarshalerMap[reflect.TypeOf(typ)] = func(ctx context.Context, v interface{}, b []byte) error {
|
||||||
|
return unmarshaler(ctx, v.(*T), b)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RawMessage is a raw encoded YAML value. It implements [BytesMarshaler] and
|
||||||
|
// [BytesUnmarshaler] and can be used to delay YAML decoding or precompute a YAML
|
||||||
|
// encoding.
|
||||||
|
// It also implements [json.Marshaler] and [json.Unmarshaler].
|
||||||
|
//
|
||||||
|
// This is similar to [json.RawMessage] in the stdlib.
|
||||||
|
type RawMessage []byte
|
||||||
|
|
||||||
|
func (m RawMessage) MarshalYAML() ([]byte, error) {
|
||||||
|
if m == nil {
|
||||||
|
return []byte("null"), nil
|
||||||
|
}
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *RawMessage) UnmarshalYAML(dt []byte) error {
|
||||||
|
if m == nil {
|
||||||
|
return errors.New("yaml.RawMessage: UnmarshalYAML on nil pointer")
|
||||||
|
}
|
||||||
|
*m = append((*m)[0:0], dt...)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *RawMessage) UnmarshalJSON(b []byte) error {
|
||||||
|
return m.UnmarshalYAML(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m RawMessage) MarshalJSON() ([]byte, error) {
|
||||||
|
return YAMLToJSON(m)
|
||||||
|
}
|
||||||
75
vendor/github.com/hanwen/go-fuse/v2/AUTHORS
generated
vendored
Normal file
75
vendor/github.com/hanwen/go-fuse/v2/AUTHORS
generated
vendored
Normal file
@@ -0,0 +1,75 @@
|
|||||||
|
Adam Goode <agoode@google.com>
|
||||||
|
Adam H. Leventhal <adam.leventhal@gmail.com>
|
||||||
|
Alex Fishman <alex@fuse-t.org>
|
||||||
|
Amir Hardon <ahardon@gmail.com>
|
||||||
|
Andrew Chambers <ac@acha.ninja>
|
||||||
|
Brandon Duffany <brandon@buildbuddy.io>
|
||||||
|
C.U <github@wmchris.de>
|
||||||
|
Chris Marget <cmarget@mutualink.net>
|
||||||
|
Daniel Martí <mvdan@mvdan.cc>
|
||||||
|
Dmitriy Smotrov <dsxack@gmail.com>
|
||||||
|
Dustin Oprea <myselfasunder@gmail.com>
|
||||||
|
Ed Schouten <ed.schouten@prodrive-technologies.com>
|
||||||
|
Eliot Courtney <edcourtney@google.com>
|
||||||
|
Fazlul Shahriar <fshahriar@gmail.com>
|
||||||
|
Frederick Akalin <akalin@gmail.com>
|
||||||
|
Garret Kelly <gdk@google.com>
|
||||||
|
Glonee <glonee@foxmail.com>
|
||||||
|
Google Inc.
|
||||||
|
Grant Monroe <grant@tnarg.com>
|
||||||
|
Haitao Li <lihaitao@gmail.com>
|
||||||
|
Han-Wen Nienhuys <hanwenn@gmail.com>
|
||||||
|
Henry Wang <henwang@amazon.com>
|
||||||
|
Ivan Krasin <imkrasin@gmail.com>
|
||||||
|
Ivan Volosyuk <ivan.volosyuk@gmail.com>
|
||||||
|
Jakob Unterwurzacher <jakobunt@gmail.com>
|
||||||
|
James D. Nurmi <james@abneptis.com>
|
||||||
|
Jan Pfeifer <janpf@google.com>
|
||||||
|
Jeff <leterip@me.com>
|
||||||
|
Jeff Hodges <jeff@somethingsimilar.com>
|
||||||
|
Jille Timmermans <jille@quis.cx>
|
||||||
|
Johannes Brüderl <johannes.bruederl@gmail.com>
|
||||||
|
Jonathon Reinhart <Jonathon.Reinhart@gmail.com>
|
||||||
|
Kaoet Ibe <kaoet.ibe@outlook.com>
|
||||||
|
Kirill Smelkov <kirr@nexedi.com>
|
||||||
|
Kohei Tokunaga <ktokunaga.mail@gmail.com>
|
||||||
|
Levin Zimmermann <levin.zimmermann@nexedi.com>
|
||||||
|
Logan Hanks <logan@bitcasa.com>
|
||||||
|
Lucas Manning <lucas.manning21@gmail.com>
|
||||||
|
M. J. Fromberger <michael.j.fromberger@gmail.com>
|
||||||
|
Manuel Klimek <klimek@google.com>
|
||||||
|
Maria Shaldibina <mshaldibina@pivotal.io>
|
||||||
|
Mark Karpeles <magicaltux@gmail.com>
|
||||||
|
Mike Gray <mike@mikegray.org>
|
||||||
|
Natalie Fioretti <naadl.93+github@gmail.com>
|
||||||
|
Nick Cooper <gh@smoogle.org>
|
||||||
|
Nick Craig-Wood <nick@craig-wood.com>
|
||||||
|
OneOfOne <oneofone@gmail.com>
|
||||||
|
Orivej Desh <orivej@gmx.fr>
|
||||||
|
Patrick Crosby <pcrosby@gmail.com>
|
||||||
|
Paul Jolly <paul@myitcv.org.uk>
|
||||||
|
Paul Warren <paul.warren@emc.com>
|
||||||
|
Rueian <rueiancsie@gmail.com>
|
||||||
|
Ryan Guest <ryanguest@gmail.com>
|
||||||
|
Ryan Lamore <rlamore@salesforce.com>
|
||||||
|
Sebastien Binet <binet@cern.ch>
|
||||||
|
Shayan Pooya <shayan@arista.com>
|
||||||
|
Stavros Panakakis <stavrospanakakis@gmail.com>
|
||||||
|
Tamas Kerecsen <kerecsen@gmail.com>
|
||||||
|
Tiziano Santoro <tzn@google.com>
|
||||||
|
Tommy Lindgren <tommy.lindgren@gmail.com>
|
||||||
|
Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
|
||||||
|
Valient Gough <vgough@pobox.com>
|
||||||
|
WeidiDeng <weidi_deng@icloud.com>
|
||||||
|
Xiaoyi <ashi009@users.noreply.github.com>
|
||||||
|
Yasin Turan <turyasin@amazon.com>
|
||||||
|
Yongwoo Park <nnnlife@gmail.com>
|
||||||
|
Yufeng Cheng <chengyufeng@megvii.com>
|
||||||
|
ZheNing Hu <adlternative@gmail.com>
|
||||||
|
Zoey Greer <zoey@buildbuddy.io>
|
||||||
|
abitduck <abitduck@hotmail.com>
|
||||||
|
companycy <companycy@gmail.com>
|
||||||
|
hotaery <626910647@qq.com>
|
||||||
|
lch <lchopn@gmail.com>
|
||||||
|
midchildan <git@midchildan.org>
|
||||||
|
sunjiapeng <782615313@qq.com>
|
||||||
30
vendor/github.com/hanwen/go-fuse/v2/LICENSE
generated
vendored
Normal file
30
vendor/github.com/hanwen/go-fuse/v2/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
New BSD License
|
||||||
|
|
||||||
|
Copyright (c) 2010 the Go-FUSE Authors. All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above
|
||||||
|
copyright notice, this list of conditions and the following disclaimer
|
||||||
|
in the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
* Neither the name of Ivan Krasin nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from
|
||||||
|
this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
50
vendor/github.com/hanwen/go-fuse/v2/fs/README.md
generated
vendored
Normal file
50
vendor/github.com/hanwen/go-fuse/v2/fs/README.md
generated
vendored
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
|
||||||
|
Objective
|
||||||
|
=========
|
||||||
|
|
||||||
|
A high-performance FUSE API that minimizes pitfalls with writing
|
||||||
|
correct filesystems.
|
||||||
|
|
||||||
|
Decisions
|
||||||
|
=========
|
||||||
|
|
||||||
|
* Nodes contain references to their children. This is useful
|
||||||
|
because most filesystems will need to construct tree-like
|
||||||
|
structures.
|
||||||
|
|
||||||
|
* Nodes contain references to their parents. As a result, we can
|
||||||
|
derive the path for each Inode, and there is no need for a
|
||||||
|
separate PathFS.
|
||||||
|
|
||||||
|
* Nodes can be "persistent", meaning their lifetime is not under
|
||||||
|
control of the kernel. This is useful for constructing FS trees
|
||||||
|
in advance, rather than driven by LOOKUP.
|
||||||
|
|
||||||
|
* The NodeID (used for communicating with the kernel, not to be
|
||||||
|
confused with the inode number reported by `ls -i`) is generated
|
||||||
|
internally and immutable for an Inode. This avoids any races
|
||||||
|
between LOOKUP, NOTIFY and FORGET.
|
||||||
|
|
||||||
|
* The mode of an Inode is defined on creation. Files cannot change
|
||||||
|
type during their lifetime. This also prevents the common error
|
||||||
|
of forgetting to return the filetype in Lookup/GetAttr.
|
||||||
|
|
||||||
|
* No global treelock, to ensure scalability.
|
||||||
|
|
||||||
|
* Support for hard links. libfuse doesn't support this in the
|
||||||
|
high-level API. Extra care for race conditions is needed when
|
||||||
|
looking up the same file through different paths.
|
||||||
|
|
||||||
|
* do not issue Notify{Entry,Delete} as part of
|
||||||
|
AddChild/RmChild/MvChild: because NodeIDs are unique and
|
||||||
|
immutable, there is no confusion about which nodes are
|
||||||
|
invalidated, and the notification doesn't have to happen under
|
||||||
|
lock.
|
||||||
|
|
||||||
|
* Directory reading uses the FileHandles as well, the API for read
|
||||||
|
is one DirEntry at a time. FileHandles may implement seeking, and we
|
||||||
|
call the Seek if we see Offsets change in the incoming request.
|
||||||
|
|
||||||
|
* Method names are based on syscall names. Where there is no
|
||||||
|
syscall (eg. "open directory"), we bias towards writing
|
||||||
|
everything together (Opendir)
|
||||||
822
vendor/github.com/hanwen/go-fuse/v2/fs/api.go
generated
vendored
Normal file
822
vendor/github.com/hanwen/go-fuse/v2/fs/api.go
generated
vendored
Normal file
@@ -0,0 +1,822 @@
|
|||||||
|
// Copyright 2019 the Go-FUSE Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package fs provides infrastructure to build tree-organized filesystems.
|
||||||
|
//
|
||||||
|
// # Structure of a file system implementation
|
||||||
|
//
|
||||||
|
// To create a file system, you should first define types for the
|
||||||
|
// nodes of the file system tree.
|
||||||
|
//
|
||||||
|
// type myNode struct {
|
||||||
|
// fs.Inode
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// // Node types must be InodeEmbedders
|
||||||
|
// var _ = (fs.InodeEmbedder)((*myNode)(nil))
|
||||||
|
//
|
||||||
|
// // Node types should implement some file system operations, eg. Lookup
|
||||||
|
// var _ = (fs.NodeLookuper)((*myNode)(nil))
|
||||||
|
//
|
||||||
|
// func (n *myNode) Lookup(ctx context.Context, name string, out *fuse.EntryOut) (*fs.Inode, syscall.Errno) {
|
||||||
|
// ops := myNode{}
|
||||||
|
// out.Mode = 0755
|
||||||
|
// out.Size = 42
|
||||||
|
// return n.NewInode(ctx, &ops, fs.StableAttr{Mode: syscall.S_IFREG}), 0
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// The method names are inspired on the system call names, so we have
|
||||||
|
// Listxattr rather than ListXAttr.
|
||||||
|
//
|
||||||
|
// the file system is mounted by calling mount on the root of the tree,
|
||||||
|
//
|
||||||
|
// server, err := fs.Mount("/tmp/mnt", &myNode{}, &fs.Options{})
|
||||||
|
// ..
|
||||||
|
// // start serving the file system
|
||||||
|
// server.Wait()
|
||||||
|
//
|
||||||
|
// # Error handling
|
||||||
|
//
|
||||||
|
// All error reporting must use the syscall.Errno type. This is an
|
||||||
|
// integer with predefined error codes, where the value 0 (`OK`)
|
||||||
|
// should be used to indicate success.
|
||||||
|
//
|
||||||
|
// # File system concepts
|
||||||
|
//
|
||||||
|
// The FUSE API is very similar to Linux' internal VFS API for
|
||||||
|
// defining file systems in the kernel. It is therefore useful to
|
||||||
|
// understand some terminology.
|
||||||
|
//
|
||||||
|
// File content: the raw bytes that we store inside regular files.
|
||||||
|
//
|
||||||
|
// Path: a /-separated string path that describes location of a node
|
||||||
|
// in the file system tree. For example
|
||||||
|
//
|
||||||
|
// dir1/file
|
||||||
|
//
|
||||||
|
// describes path root → dir1 → file.
|
||||||
|
//
|
||||||
|
// There can be several paths leading from tree root to a particular node,
|
||||||
|
// known as hard-linking, for example
|
||||||
|
//
|
||||||
|
// root
|
||||||
|
// / \
|
||||||
|
// dir1 dir2
|
||||||
|
// \ /
|
||||||
|
// file
|
||||||
|
//
|
||||||
|
// Inode: ("index node") points to the file content, and stores
|
||||||
|
// metadata (size, timestamps) about a file or directory. Each
|
||||||
|
// inode has a type (directory, symlink, regular file, etc.) and
|
||||||
|
// an identity (a 64-bit number, unique to the file
|
||||||
|
// system). Directories can have children.
|
||||||
|
//
|
||||||
|
// The inode in the kernel is represented in Go-FUSE as the Inode
|
||||||
|
// type.
|
||||||
|
//
|
||||||
|
// While common OS APIs are phrased in terms of paths (strings), the
|
||||||
|
// precise semantics of a file system are better described in terms of
|
||||||
|
// Inodes. This allows us to specify what happens in corner cases,
|
||||||
|
// such as writing data to deleted files.
|
||||||
|
//
|
||||||
|
// File descriptor: a handle returned to opening a file. File
|
||||||
|
// descriptors always refer to a single inode.
|
||||||
|
//
|
||||||
|
// Dentry: a dirent maps (parent inode number, name string) tuple to
|
||||||
|
// child inode, thus representing a parent/child relation (or the
|
||||||
|
// absense thereof). Dentries do not have an equivalent type inside
|
||||||
|
// Go-FUSE, but the result of Lookup operation essentially is a
|
||||||
|
// dentry, which the kernel puts in a cache.
|
||||||
|
//
|
||||||
|
// # Kernel caching
|
||||||
|
//
|
||||||
|
// The kernel caches several pieces of information from the FUSE process:
|
||||||
|
//
|
||||||
|
// 1. File contents: enabled with the fuse.FOPEN_KEEP_CACHE return flag
|
||||||
|
// in Open, manipulated with ReadCache and WriteCache, and invalidated
|
||||||
|
// with Inode.NotifyContent
|
||||||
|
//
|
||||||
|
// 2. File Attributes (size, mtime, etc.): controlled with the
|
||||||
|
// attribute timeout fields in fuse.AttrOut and fuse.EntryOut, which
|
||||||
|
// get be populated from Getattr and Lookup
|
||||||
|
//
|
||||||
|
// 3. Dentries (parent/child relations in the FS tree):
|
||||||
|
// controlled with the timeout fields in fuse.EntryOut, and
|
||||||
|
// invalidated with Inode.NotifyEntry and Inode.NotifyDelete.
|
||||||
|
//
|
||||||
|
// Without entry timeouts, every operation on file "a/b/c"
|
||||||
|
// must first do lookups for "a", "a/b" and "a/b/c", which is
|
||||||
|
// expensive because of context switches between the kernel and the
|
||||||
|
// FUSE process.
|
||||||
|
//
|
||||||
|
// Unsuccessful entry lookups can also be cached by setting an entry
|
||||||
|
// timeout when Lookup returns ENOENT.
|
||||||
|
//
|
||||||
|
// The libfuse C library specifies 1 second timeouts for both
|
||||||
|
// attribute and directory entries, but no timeout for negative
|
||||||
|
// entries. by default. This can be achieve in go-fuse by setting
|
||||||
|
// options on mount, eg.
|
||||||
|
//
|
||||||
|
// sec := time.Second
|
||||||
|
// opts := fs.Options{
|
||||||
|
// EntryTimeout: &sec,
|
||||||
|
// AttrTimeout: &sec,
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// # Interrupts
|
||||||
|
//
|
||||||
|
// If the process accessing a FUSE file system is interrupted, the
|
||||||
|
// kernel sends an interrupt message, which cancels the context passed
|
||||||
|
// to the NodeXxxxx methods. If the file system chooses to honor this
|
||||||
|
// cancellation, the method must return [syscall.EINTR]. All unmasked
|
||||||
|
// signals generate an interrupt. In particular, the SIGURG signal
|
||||||
|
// (which the Go runtime uses for managing goroutine preemption) also
|
||||||
|
// generates an interrupt.
|
||||||
|
//
|
||||||
|
// # Locking
|
||||||
|
//
|
||||||
|
// Locks for networked filesystems are supported through the suite of
|
||||||
|
// Getlk, Setlk and Setlkw methods. They alllow locks on regions of
|
||||||
|
// regular files.
|
||||||
|
//
|
||||||
|
// # Parallelism
|
||||||
|
//
|
||||||
|
// The VFS layer in the kernel is optimized to be highly parallel, and
|
||||||
|
// this parallelism also affects FUSE file systems: many FUSE
|
||||||
|
// operations can run in parallel, and this invites race
|
||||||
|
// conditions. It is strongly recommended to test your FUSE file
|
||||||
|
// system issuing file operations in parallel, and using the race
|
||||||
|
// detector to weed out data races.
|
||||||
|
//
|
||||||
|
// # Deadlocks
|
||||||
|
//
|
||||||
|
// The Go runtime multiplexes Goroutines onto operating system
|
||||||
|
// threads, and makes assumptions that some system calls do not
|
||||||
|
// block. When accessing a file system from the same process that
|
||||||
|
// serves the file system (e.g. in unittests), this can lead to
|
||||||
|
// deadlocks, especially when GOMAXPROCS=1, when the Go runtime
|
||||||
|
// assumes a system call does not block, but actually is served by the
|
||||||
|
// Go-FUSE process.
|
||||||
|
//
|
||||||
|
// The following deadlocks are known:
|
||||||
|
//
|
||||||
|
// 1. Spawning a subprocess uses a fork/exec sequence: the process
|
||||||
|
// forks itself into a parent and child. The parent waits for the
|
||||||
|
// child to signal that the exec failed or succeeded, while the child
|
||||||
|
// prepares for calling exec(). Any setup step in the child that
|
||||||
|
// triggers a FUSE request can cause a deadlock.
|
||||||
|
//
|
||||||
|
// 1a. If the subprocess has a directory specified, the child will
|
||||||
|
// chdir into that directory. This generates an ACCESS operation on
|
||||||
|
// the directory.
|
||||||
|
//
|
||||||
|
// This deadlock can be avoided by disabling the ACCESS
|
||||||
|
// operation: return syscall.ENOSYS in the Access implementation, and
|
||||||
|
// ensure it is triggered called before initiating the subprocess.
|
||||||
|
//
|
||||||
|
// 1b. If the subprocess inherits files, the child process uses dup3()
|
||||||
|
// to remap file descriptors. If the destination fd happens to be
|
||||||
|
// backed by Go-FUSE, the dup3() call will implicitly close the fd,
|
||||||
|
// generating a FLUSH operation, eg.
|
||||||
|
//
|
||||||
|
// f1, err := os.Open("/fusemnt/file1")
|
||||||
|
// // f1.Fd() == 3
|
||||||
|
// f2, err := os.Open("/fusemnt/file1")
|
||||||
|
// // f2.Fd() == 4
|
||||||
|
//
|
||||||
|
// cmd := exec.Command("/bin/true")
|
||||||
|
// cmd.ExtraFiles = []*os.File{f2}
|
||||||
|
// // f2 (fd 4) is moved to fd 3. Deadlocks with GOMAXPROCS=1.
|
||||||
|
// cmd.Start()
|
||||||
|
//
|
||||||
|
// This deadlock can be avoided by ensuring that file descriptors
|
||||||
|
// pointing into FUSE mounts and file descriptors passed into
|
||||||
|
// subprocesses do not overlap, e.g. inserting the following before
|
||||||
|
// the above example:
|
||||||
|
//
|
||||||
|
// for {
|
||||||
|
// f, _ := os.Open("/dev/null")
|
||||||
|
// defer f.Close()
|
||||||
|
// if f.Fd() > 3 {
|
||||||
|
// break
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// The library tries to reserve fd 3, because FUSE mounts are created
|
||||||
|
// by calling "fusermount" with an inherited file descriptor, but the
|
||||||
|
// same problem may occur for other file descriptors.
|
||||||
|
//
|
||||||
|
// 1c. If the executable is on the FUSE mount. In this case, the child
|
||||||
|
// calls exec, which reads the file to execute, which triggers an OPEN
|
||||||
|
// opcode. This can be worked around by invoking the subprocess
|
||||||
|
// through a wrapper, eg `bash -c file/on/fuse-mount`.
|
||||||
|
//
|
||||||
|
// 2. The Go runtime uses the epoll system call to understand which
|
||||||
|
// goroutines can respond to I/O. The runtime assumes that epoll does
|
||||||
|
// not block, but if files are on a FUSE filesystem, the kernel will
|
||||||
|
// generate a POLL operation. To prevent this from happening, Go-FUSE
|
||||||
|
// disables the POLL opcode on mount. To ensure this has happened, call
|
||||||
|
// WaitMount.
|
||||||
|
//
|
||||||
|
// 3. Memory mapping a file served by FUSE. Accessing the mapped
|
||||||
|
// memory generates a page fault, which blocks the OS thread running
|
||||||
|
// the goroutine.
|
||||||
|
//
|
||||||
|
// # Dynamically discovered file systems
|
||||||
|
//
|
||||||
|
// File system data usually cannot fit all in RAM, so the kernel must
|
||||||
|
// discover the file system dynamically: as you are entering and list
|
||||||
|
// directory contents, the kernel asks the FUSE server about the files
|
||||||
|
// and directories you are busy reading/writing, and forgets parts of
|
||||||
|
// your file system when it is low on memory.
|
||||||
|
//
|
||||||
|
// The two important operations for dynamic file systems are:
|
||||||
|
// 1. Lookup, part of the NodeLookuper interface for discovering
|
||||||
|
// individual children of directories, and 2. Readdir, part of the
|
||||||
|
// NodeReaddirer interface for listing the contents of a directory.
|
||||||
|
//
|
||||||
|
// # Static in-memory file systems
|
||||||
|
//
|
||||||
|
// For small, read-only file systems, getting the locking mechanics of
|
||||||
|
// Lookup correct is tedious, so Go-FUSE provides a feature to
|
||||||
|
// simplify building such file systems.
|
||||||
|
//
|
||||||
|
// Instead of discovering the FS tree on the fly, you can construct
|
||||||
|
// the entire tree from an OnAdd method. Then, that in-memory tree
|
||||||
|
// structure becomes the source of truth. This means that Go-FUSE must
|
||||||
|
// remember Inodes even if the kernel is no longer interested in
|
||||||
|
// them. This is done by instantiating "persistent" inodes from the
|
||||||
|
// OnAdd method of the root node. See the ZipFS example for a
|
||||||
|
// runnable example of how to do this.
|
||||||
|
package fs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"log"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hanwen/go-fuse/v2/fuse"
|
||||||
|
)
|
||||||
|
|
||||||
|
// InodeEmbedder is an interface for structs that embed Inode.
|
||||||
|
//
|
||||||
|
// InodeEmbedder objects usually should implement some of the NodeXxxx
|
||||||
|
// interfaces, to provide user-defined file system behaviors.
|
||||||
|
//
|
||||||
|
// In general, if an InodeEmbedder does not implement specific
|
||||||
|
// filesystem methods, the filesystem will react as if it is a
|
||||||
|
// read-only filesystem with a predefined tree structure.
|
||||||
|
type InodeEmbedder interface {
|
||||||
|
// inode is used internally to link Inode to a Node.
|
||||||
|
//
|
||||||
|
// See Inode() for the public API to retrieve an inode from Node.
|
||||||
|
embed() *Inode
|
||||||
|
|
||||||
|
// EmbeddedInode returns a pointer to the embedded inode.
|
||||||
|
EmbeddedInode() *Inode
|
||||||
|
}
|
||||||
|
|
||||||
|
// Statfs implements statistics for the filesystem that holds this
|
||||||
|
// Inode. If not defined, the `out` argument will zeroed with an OK
|
||||||
|
// result. This is because OSX filesystems must Statfs, or the mount
|
||||||
|
// will not work.
|
||||||
|
type NodeStatfser interface {
|
||||||
|
Statfs(ctx context.Context, out *fuse.StatfsOut) syscall.Errno
|
||||||
|
}
|
||||||
|
|
||||||
|
// Access should return if the caller can access the file with the
|
||||||
|
// given mode. This is used for two purposes: to determine if a user
|
||||||
|
// may enter a directory, and to implement the access system
|
||||||
|
// call. In the latter case, the context has data about the real
|
||||||
|
// UID. For example, a root-SUID binary called by user susan gets the
|
||||||
|
// UID and GID for susan here.
|
||||||
|
//
|
||||||
|
// If not defined, a default implementation will check traditional
|
||||||
|
// unix permissions of the Getattr result agains the caller. If access
|
||||||
|
// permissions must be obeyed precisely, the filesystem should return
|
||||||
|
// permissions from GetAttr/Lookup, and set [Options.NullPermissions].
|
||||||
|
// Without [Options.NullPermissions], a missing permission (mode =
|
||||||
|
// 0000) is interpreted as 0755 for directories, and chdir is always
|
||||||
|
// allowed.
|
||||||
|
type NodeAccesser interface {
|
||||||
|
Access(ctx context.Context, mask uint32) syscall.Errno
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAttr reads attributes for an Inode. The library will ensure that
|
||||||
|
// Mode and Ino are set correctly. For files that are not opened with
|
||||||
|
// FOPEN_DIRECTIO, Size should be set so it can be read correctly. If
|
||||||
|
// returning zeroed permissions, the default behavior is to change the
|
||||||
|
// mode of 0755 (directory) or 0644 (files). This can be switched off
|
||||||
|
// with the Options.NullPermissions setting. If blksize is unset, 4096
|
||||||
|
// is assumed, and the 'blocks' field is set accordingly. The 'f'
|
||||||
|
// argument is provided for consistency, however, in practice the
|
||||||
|
// kernel never sends a file handle, even if the Getattr call
|
||||||
|
// originated from an fstat system call.
|
||||||
|
type NodeGetattrer interface {
|
||||||
|
Getattr(ctx context.Context, f FileHandle, out *fuse.AttrOut) syscall.Errno
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetAttr sets attributes for an Inode. Default is to return ENOTSUP.
|
||||||
|
type NodeSetattrer interface {
|
||||||
|
Setattr(ctx context.Context, f FileHandle, in *fuse.SetAttrIn, out *fuse.AttrOut) syscall.Errno
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnAdd is called when this InodeEmbedder is initialized.
|
||||||
|
type NodeOnAdder interface {
|
||||||
|
OnAdd(ctx context.Context)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Getxattr should read data for the given attribute into
|
||||||
|
// `dest` and return the number of bytes. If `dest` is too
|
||||||
|
// small, it should return ERANGE and the size of the attribute.
|
||||||
|
// If not defined, Getxattr will return ENOATTR.
|
||||||
|
type NodeGetxattrer interface {
|
||||||
|
Getxattr(ctx context.Context, attr string, dest []byte) (uint32, syscall.Errno)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setxattr should store data for the given attribute. See
|
||||||
|
// setxattr(2) for information about flags.
|
||||||
|
// If not defined, Setxattr will return ENOATTR.
|
||||||
|
type NodeSetxattrer interface {
|
||||||
|
Setxattr(ctx context.Context, attr string, data []byte, flags uint32) syscall.Errno
|
||||||
|
}
|
||||||
|
|
||||||
|
// Removexattr should delete the given attribute.
|
||||||
|
// If not defined, Removexattr will return ENOATTR.
|
||||||
|
type NodeRemovexattrer interface {
|
||||||
|
Removexattr(ctx context.Context, attr string) syscall.Errno
|
||||||
|
}
|
||||||
|
|
||||||
|
// Listxattr should read all attributes (null terminated) into
|
||||||
|
// `dest`. If the `dest` buffer is too small, it should return ERANGE
|
||||||
|
// and the correct size. If not defined, return an empty list and
|
||||||
|
// success.
|
||||||
|
type NodeListxattrer interface {
|
||||||
|
Listxattr(ctx context.Context, dest []byte) (uint32, syscall.Errno)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Readlink reads the content of a symlink.
|
||||||
|
type NodeReadlinker interface {
|
||||||
|
Readlink(ctx context.Context) ([]byte, syscall.Errno)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open opens an Inode (of regular file type) for reading. It
|
||||||
|
// is optional but recommended to return a FileHandle.
|
||||||
|
type NodeOpener interface {
|
||||||
|
Open(ctx context.Context, flags uint32) (fh FileHandle, fuseFlags uint32, errno syscall.Errno)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reads data from a file. The data should be returned as
|
||||||
|
// ReadResult, which may be constructed from the incoming
|
||||||
|
// `dest` buffer. If the file was opened without FileHandle,
|
||||||
|
// the FileHandle argument here is nil. The default
|
||||||
|
// implementation forwards to the FileHandle.
|
||||||
|
type NodeReader interface {
|
||||||
|
Read(ctx context.Context, f FileHandle, dest []byte, off int64) (fuse.ReadResult, syscall.Errno)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Writes the data into the file handle at given offset. After
|
||||||
|
// returning, the data will be reused and may not referenced.
|
||||||
|
// The default implementation forwards to the FileHandle.
|
||||||
|
type NodeWriter interface {
|
||||||
|
Write(ctx context.Context, f FileHandle, data []byte, off int64) (written uint32, errno syscall.Errno)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fsync is a signal to ensure writes to the Inode are flushed
|
||||||
|
// to stable storage.
|
||||||
|
type NodeFsyncer interface {
|
||||||
|
Fsync(ctx context.Context, f FileHandle, flags uint32) syscall.Errno
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flush is called for the close(2) call on a file descriptor. In case
|
||||||
|
// of a descriptor that was duplicated using dup(2), it may be called
|
||||||
|
// more than once for the same FileHandle. The default implementation
|
||||||
|
// forwards to the FileHandle, or if the handle does not support
|
||||||
|
// FileFlusher, returns OK.
|
||||||
|
type NodeFlusher interface {
|
||||||
|
Flush(ctx context.Context, f FileHandle) syscall.Errno
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is called to before a FileHandle is forgotten. The
|
||||||
|
// kernel ignores the return value of this method,
|
||||||
|
// so any cleanup that requires specific synchronization or
|
||||||
|
// could fail with I/O errors should happen in Flush instead.
|
||||||
|
// The default implementation forwards to the FileHandle.
|
||||||
|
type NodeReleaser interface {
|
||||||
|
Release(ctx context.Context, f FileHandle) syscall.Errno
|
||||||
|
|
||||||
|
// TODO - what about ReleaseIn?
|
||||||
|
}
|
||||||
|
|
||||||
|
// Allocate preallocates space for future writes, so they will
|
||||||
|
// never encounter ESPACE.
|
||||||
|
type NodeAllocater interface {
|
||||||
|
Allocate(ctx context.Context, f FileHandle, off uint64, size uint64, mode uint32) syscall.Errno
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyFileRange copies data between sections of two files,
|
||||||
|
// without the data having to pass through the calling process.
|
||||||
|
type NodeCopyFileRanger interface {
|
||||||
|
CopyFileRange(ctx context.Context, fhIn FileHandle,
|
||||||
|
offIn uint64, out *Inode, fhOut FileHandle, offOut uint64,
|
||||||
|
len uint64, flags uint64) (uint32, syscall.Errno)
|
||||||
|
|
||||||
|
// Ugh. should have been called Copyfilerange
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodeStatxer interface {
|
||||||
|
Statx(ctx context.Context, f FileHandle, flags uint32, mask uint32, out *fuse.StatxOut) syscall.Errno
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lseek is used to implement holes: it should return the
|
||||||
|
// first offset beyond `off` where there is data (SEEK_DATA)
|
||||||
|
// or where there is a hole (SEEK_HOLE).
|
||||||
|
type NodeLseeker interface {
|
||||||
|
Lseek(ctx context.Context, f FileHandle, Off uint64, whence uint32) (uint64, syscall.Errno)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Getlk returns locks that would conflict with the given input
|
||||||
|
// lock. If no locks conflict, the output has type L_UNLCK. See
|
||||||
|
// fcntl(2) for more information.
|
||||||
|
// If not defined, returns ENOTSUP
|
||||||
|
type NodeGetlker interface {
|
||||||
|
Getlk(ctx context.Context, f FileHandle, owner uint64, lk *fuse.FileLock, flags uint32, out *fuse.FileLock) syscall.Errno
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setlk obtains a lock on a file, or fail if the lock could not
|
||||||
|
// obtained. See fcntl(2) for more information. If not defined,
|
||||||
|
// returns ENOTSUP
|
||||||
|
type NodeSetlker interface {
|
||||||
|
Setlk(ctx context.Context, f FileHandle, owner uint64, lk *fuse.FileLock, flags uint32) syscall.Errno
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setlkw obtains a lock on a file, waiting if necessary. See fcntl(2)
|
||||||
|
// for more information. If not defined, returns ENOTSUP
|
||||||
|
type NodeSetlkwer interface {
|
||||||
|
Setlkw(ctx context.Context, f FileHandle, owner uint64, lk *fuse.FileLock, flags uint32) syscall.Errno
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ioctl implements an ioctl on an open file.
|
||||||
|
type NodeIoctler interface {
|
||||||
|
Ioctl(ctx context.Context, f FileHandle, cmd uint32, arg uint64, input []byte, output []byte) (result int32, errno syscall.Errno)
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnForget is called when the node becomes unreachable. This can
|
||||||
|
// happen because the kernel issues a FORGET request,
|
||||||
|
// ForgetPersistent() is called on the inode, the last child of the
|
||||||
|
// directory disappears, or (for the root node) unmounting the file
|
||||||
|
// system. Implementers must make sure that the inode cannot be
|
||||||
|
// revived concurrently by a LOOKUP call. Modifying the tree using
|
||||||
|
// RmChild and AddChild can also trigger a spurious OnForget; use
|
||||||
|
// MvChild instead.
|
||||||
|
type NodeOnForgetter interface {
|
||||||
|
OnForget()
|
||||||
|
}
|
||||||
|
|
||||||
|
// DirStream lists directory entries.
|
||||||
|
type DirStream interface {
|
||||||
|
// HasNext indicates if there are further entries. HasNext
|
||||||
|
// might be called on already closed streams.
|
||||||
|
HasNext() bool
|
||||||
|
|
||||||
|
// Next retrieves the next entry. It is only called if HasNext
|
||||||
|
// has previously returned true. The Errno return may be used to
|
||||||
|
// indicate I/O errors
|
||||||
|
Next() (fuse.DirEntry, syscall.Errno)
|
||||||
|
|
||||||
|
// Close releases resources related to this directory
|
||||||
|
// stream.
|
||||||
|
Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lookup should find a direct child of a directory by the child's name. If
|
||||||
|
// the entry does not exist, it should return ENOENT and optionally
|
||||||
|
// set a NegativeTimeout in `out`. If it does exist, it should return
|
||||||
|
// attribute data in `out` and return the Inode for the child. A new
|
||||||
|
// inode can be created using `Inode.NewInode`. The new Inode will be
|
||||||
|
// added to the FS tree automatically if the return status is OK.
|
||||||
|
//
|
||||||
|
// If a directory does not implement NodeLookuper, the library looks
|
||||||
|
// for an existing child with the given name.
|
||||||
|
//
|
||||||
|
// The input to a Lookup is {parent directory, name string}.
|
||||||
|
//
|
||||||
|
// Lookup, if successful, must return an *Inode. Once the Inode is
|
||||||
|
// returned to the kernel, the kernel can issue further operations,
|
||||||
|
// such as Open or Getxattr on that node.
|
||||||
|
//
|
||||||
|
// A successful Lookup also returns an EntryOut. Among others, this
|
||||||
|
// contains file attributes (mode, size, mtime, etc.).
|
||||||
|
//
|
||||||
|
// FUSE supports other operations that modify the namespace. For
|
||||||
|
// example, the Symlink, Create, Mknod, Link methods all create new
|
||||||
|
// children in directories. Hence, they also return *Inode and must
|
||||||
|
// populate their fuse.EntryOut arguments.
|
||||||
|
type NodeLookuper interface {
|
||||||
|
Lookup(ctx context.Context, name string, out *fuse.EntryOut) (*Inode, syscall.Errno)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NodeWrapChilder wraps a FS node implementation in another one. If
|
||||||
|
// defined, it is called automatically from NewInode and
|
||||||
|
// NewPersistentInode. Thus, existing file system implementations,
|
||||||
|
// even from other packages, can be customized by wrapping them. The
|
||||||
|
// following example is a loopback file system that forbids deletions.
|
||||||
|
//
|
||||||
|
// type NoDelete struct {
|
||||||
|
// *fs.LoopbackNode
|
||||||
|
// }
|
||||||
|
// func (w *NoDelete) Unlink(ctx context.Context, name string) syscall.Errno {
|
||||||
|
// return syscall.EPERM
|
||||||
|
// }
|
||||||
|
// func (w *NoDelete) WrapChild(ctx context.Context, ops fs.InodeEmbedder) fs.InodeEmbedder {
|
||||||
|
// return &NoDelete{ops.(*LoopbackNode)}
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// See also the LoopbackReuse example for a more practical
|
||||||
|
// application.
|
||||||
|
type NodeWrapChilder interface {
|
||||||
|
WrapChild(ctx context.Context, ops InodeEmbedder) InodeEmbedder
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpenDir opens a directory Inode for reading its
|
||||||
|
// contents. The actual reading is driven from Readdir, so
|
||||||
|
// this method is just for performing sanity/permission
|
||||||
|
// checks. The default is to return success.
|
||||||
|
type NodeOpendirer interface {
|
||||||
|
Opendir(ctx context.Context) syscall.Errno
|
||||||
|
}
|
||||||
|
|
||||||
|
// Readdir opens a stream of directory entries.
|
||||||
|
//
|
||||||
|
// Readdir essentiallly returns a list of strings, and it is allowed
|
||||||
|
// for Readdir to return different results from Lookup. For example,
|
||||||
|
// you can return nothing for Readdir ("ls my-fuse-mount" is empty),
|
||||||
|
// while still implementing Lookup ("ls my-fuse-mount/a-specific-file"
|
||||||
|
// shows a single file). The DirStream returned must be deterministic;
|
||||||
|
// a randomized result (e.g. due to map iteration) can lead to entries
|
||||||
|
// disappearing if multiple processes read the same directory
|
||||||
|
// concurrently.
|
||||||
|
//
|
||||||
|
// If a directory does not implement NodeReaddirer, a list of
|
||||||
|
// currently known children from the tree is returned. This means that
|
||||||
|
// static in-memory file systems need not implement NodeReaddirer.
|
||||||
|
type NodeReaddirer interface {
|
||||||
|
Readdir(ctx context.Context) (DirStream, syscall.Errno)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mkdir is similar to Lookup, but must create a directory entry and Inode.
|
||||||
|
// Default is to return ENOTSUP.
|
||||||
|
type NodeMkdirer interface {
|
||||||
|
Mkdir(ctx context.Context, name string, mode uint32, out *fuse.EntryOut) (*Inode, syscall.Errno)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mknod is similar to Lookup, but must create a device entry and Inode.
|
||||||
|
// Default is to return ENOTSUP.
|
||||||
|
type NodeMknoder interface {
|
||||||
|
Mknod(ctx context.Context, name string, mode uint32, dev uint32, out *fuse.EntryOut) (*Inode, syscall.Errno)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Link is similar to Lookup, but must create a new link to an existing Inode.
|
||||||
|
// Default is to return ENOTSUP.
|
||||||
|
type NodeLinker interface {
|
||||||
|
Link(ctx context.Context, target InodeEmbedder, name string, out *fuse.EntryOut) (node *Inode, errno syscall.Errno)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Symlink is similar to Lookup, but must create a new symbolic link.
|
||||||
|
// Default is to return ENOTSUP.
|
||||||
|
type NodeSymlinker interface {
|
||||||
|
Symlink(ctx context.Context, target, name string, out *fuse.EntryOut) (node *Inode, errno syscall.Errno)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create is similar to Lookup, but should create a new
|
||||||
|
// child. It typically also returns a FileHandle as a
|
||||||
|
// reference for future reads/writes.
|
||||||
|
// Default is to return EROFS.
|
||||||
|
type NodeCreater interface {
|
||||||
|
Create(ctx context.Context, name string, flags uint32, mode uint32, out *fuse.EntryOut) (node *Inode, fh FileHandle, fuseFlags uint32, errno syscall.Errno)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unlink should remove a child from this directory. If the
|
||||||
|
// return status is OK, the Inode is removed as child in the
|
||||||
|
// FS tree automatically. Default is to return success.
|
||||||
|
type NodeUnlinker interface {
|
||||||
|
Unlink(ctx context.Context, name string) syscall.Errno
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rmdir is like Unlink but for directories.
|
||||||
|
// Default is to return success.
|
||||||
|
type NodeRmdirer interface {
|
||||||
|
Rmdir(ctx context.Context, name string) syscall.Errno
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rename should move a child from one directory to a different
|
||||||
|
// one. The change is effected in the FS tree if the return status is
|
||||||
|
// OK. Default is to return ENOTSUP.
|
||||||
|
type NodeRenamer interface {
|
||||||
|
Rename(ctx context.Context, name string, newParent InodeEmbedder, newName string, flags uint32) syscall.Errno
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileHandle is a resource identifier for opened files. Usually, a
|
||||||
|
// FileHandle should implement some of the FileXxxx interfaces.
|
||||||
|
//
|
||||||
|
// All of the FileXxxx operations can also be implemented at the
|
||||||
|
// InodeEmbedder level, for example, one can implement NodeReader
|
||||||
|
// instead of FileReader.
|
||||||
|
//
|
||||||
|
// FileHandles are useful in two cases: First, if the underlying
|
||||||
|
// storage systems needs a handle for reading/writing. This is the
|
||||||
|
// case with Unix system calls, which need a file descriptor (See also
|
||||||
|
// the function `NewLoopbackFile`). Second, it is useful for
|
||||||
|
// implementing files whose contents are not tied to an inode. For
|
||||||
|
// example, a file like `/proc/interrupts` has no fixed content, but
|
||||||
|
// changes on each open call. This means that each file handle must
|
||||||
|
// have its own view of the content; this view can be tied to a
|
||||||
|
// FileHandle. Files that have such dynamic content should return the
|
||||||
|
// FOPEN_DIRECT_IO flag from their `Open` method. See directio_test.go
|
||||||
|
// for an example.
|
||||||
|
type FileHandle interface {
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilePassthroughFder is a file backed by a physical
|
||||||
|
// file. PassthroughFd should return an open file descriptor (and
|
||||||
|
// true), and the kernel will execute read/write operations directly
|
||||||
|
// on the backing file, bypassing the FUSE process. This function will
|
||||||
|
// be called once when processing the Create or Open operation, so
|
||||||
|
// there is no concern about concurrent access to the Fd. If the
|
||||||
|
// function returns false, passthrough will not be used for this file.
|
||||||
|
type FilePassthroughFder interface {
|
||||||
|
PassthroughFd() (int, bool)
|
||||||
|
}
|
||||||
|
|
||||||
|
// See NodeReleaser.
|
||||||
|
type FileReleaser interface {
|
||||||
|
Release(ctx context.Context) syscall.Errno
|
||||||
|
}
|
||||||
|
|
||||||
|
// See NodeGetattrer.
|
||||||
|
type FileGetattrer interface {
|
||||||
|
Getattr(ctx context.Context, out *fuse.AttrOut) syscall.Errno
|
||||||
|
}
|
||||||
|
|
||||||
|
type FileStatxer interface {
|
||||||
|
Statx(ctx context.Context, flags uint32, mask uint32, out *fuse.StatxOut) syscall.Errno
|
||||||
|
}
|
||||||
|
|
||||||
|
// See NodeReader.
|
||||||
|
type FileReader interface {
|
||||||
|
Read(ctx context.Context, dest []byte, off int64) (fuse.ReadResult, syscall.Errno)
|
||||||
|
}
|
||||||
|
|
||||||
|
// See NodeWriter.
|
||||||
|
type FileWriter interface {
|
||||||
|
Write(ctx context.Context, data []byte, off int64) (written uint32, errno syscall.Errno)
|
||||||
|
}
|
||||||
|
|
||||||
|
// See NodeGetlker.
|
||||||
|
type FileGetlker interface {
|
||||||
|
Getlk(ctx context.Context, owner uint64, lk *fuse.FileLock, flags uint32, out *fuse.FileLock) syscall.Errno
|
||||||
|
}
|
||||||
|
|
||||||
|
// See NodeSetlker.
|
||||||
|
type FileSetlker interface {
|
||||||
|
Setlk(ctx context.Context, owner uint64, lk *fuse.FileLock, flags uint32) syscall.Errno
|
||||||
|
}
|
||||||
|
|
||||||
|
// See NodeSetlkwer.
|
||||||
|
type FileSetlkwer interface {
|
||||||
|
Setlkw(ctx context.Context, owner uint64, lk *fuse.FileLock, flags uint32) syscall.Errno
|
||||||
|
}
|
||||||
|
|
||||||
|
// See NodeLseeker.
|
||||||
|
type FileLseeker interface {
|
||||||
|
Lseek(ctx context.Context, off uint64, whence uint32) (uint64, syscall.Errno)
|
||||||
|
}
|
||||||
|
|
||||||
|
// See NodeFlusher.
|
||||||
|
type FileFlusher interface {
|
||||||
|
Flush(ctx context.Context) syscall.Errno
|
||||||
|
}
|
||||||
|
|
||||||
|
// See NodeFsync.
|
||||||
|
type FileFsyncer interface {
|
||||||
|
Fsync(ctx context.Context, flags uint32) syscall.Errno
|
||||||
|
}
|
||||||
|
|
||||||
|
// See NodeFsync.
|
||||||
|
type FileSetattrer interface {
|
||||||
|
Setattr(ctx context.Context, in *fuse.SetAttrIn, out *fuse.AttrOut) syscall.Errno
|
||||||
|
}
|
||||||
|
|
||||||
|
// See NodeAllocater.
|
||||||
|
type FileAllocater interface {
|
||||||
|
Allocate(ctx context.Context, off uint64, size uint64, mode uint32) syscall.Errno
|
||||||
|
}
|
||||||
|
|
||||||
|
// See NodeIoctler.
|
||||||
|
type FileIoctler interface {
|
||||||
|
Ioctl(ctx context.Context, cmd uint32, arg uint64, input []byte, output []byte) (result int32, errno syscall.Errno)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Opens a directory. This supersedes NodeOpendirer, allowing to pass
|
||||||
|
// back flags (eg. FOPEN_CACHE_DIR).
|
||||||
|
type NodeOpendirHandler interface {
|
||||||
|
OpendirHandle(ctx context.Context, flags uint32) (fh FileHandle, fuseFlags uint32, errno syscall.Errno)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileReaddirenter is a directory that supports reading.
|
||||||
|
type FileReaddirenter interface {
|
||||||
|
// Read a single directory entry.
|
||||||
|
Readdirent(ctx context.Context) (*fuse.DirEntry, syscall.Errno)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileLookuper is a directory handle that supports lookup. If this is
|
||||||
|
// defined, FileLookuper.Lookup on the directory is called for
|
||||||
|
// READDIRPLUS calls, rather than NodeLookuper.Lookup. The name passed
|
||||||
|
// in will always be the last name produced by Readdirent. If a child
|
||||||
|
// with the given name already exists, that should be returned. In
|
||||||
|
// case of directory seeks that straddle response boundaries,
|
||||||
|
// Readdirent may be called without a subsequent Lookup call.
|
||||||
|
type FileLookuper interface {
|
||||||
|
Lookup(ctx context.Context, name string, out *fuse.EntryOut) (child *Inode, errno syscall.Errno)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileFsyncer is a directory that supports fsyncdir.
|
||||||
|
type FileFsyncdirer interface {
|
||||||
|
Fsyncdir(ctx context.Context, flags uint32) syscall.Errno
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileSeekdirer is directory that supports seeking. `off` is an
|
||||||
|
// opaque uint64 value, where only the value 0 is reserved for the
|
||||||
|
// start of the stream. (See https://lwn.net/Articles/544520/ for
|
||||||
|
// background).
|
||||||
|
type FileSeekdirer interface {
|
||||||
|
Seekdir(ctx context.Context, off uint64) syscall.Errno
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileReleasedirer is a directory that supports a cleanup operation.
|
||||||
|
type FileReleasedirer interface {
|
||||||
|
Releasedir(ctx context.Context, releaseFlags uint32)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Options are options for the entire filesystem.
|
||||||
|
type Options struct {
|
||||||
|
// MountOptions contain the options for mounting the fuse server.
|
||||||
|
fuse.MountOptions
|
||||||
|
|
||||||
|
// EntryTimeout, if non-nil, defines the overall entry timeout
|
||||||
|
// for the file system. See [fuse.EntryOut] for more information.
|
||||||
|
EntryTimeout *time.Duration
|
||||||
|
|
||||||
|
// AttrTimeout, if non-nil, defines the overall attribute
|
||||||
|
// timeout for the file system. See [fuse.AttrOut] for more
|
||||||
|
// information.
|
||||||
|
AttrTimeout *time.Duration
|
||||||
|
|
||||||
|
// NegativeTimeout, if non-nil, defines the overall entry timeout
|
||||||
|
// for failed lookups (fuse.ENOENT). See [fuse.EntryOut] for
|
||||||
|
// more information.
|
||||||
|
NegativeTimeout *time.Duration
|
||||||
|
|
||||||
|
// FirstAutomaticIno is start of the automatic inode numbers that are handed
|
||||||
|
// out sequentially.
|
||||||
|
//
|
||||||
|
// If unset, the default is 2^63.
|
||||||
|
FirstAutomaticIno uint64
|
||||||
|
|
||||||
|
// OnAdd, if non-nil, is an alternative way to specify the OnAdd
|
||||||
|
// functionality of the root node.
|
||||||
|
OnAdd func(ctx context.Context)
|
||||||
|
|
||||||
|
// NullPermissions, if set, leaves null file permissions
|
||||||
|
// alone. Otherwise, they are set to 755 (dirs) or 644 (other
|
||||||
|
// files.), which is necessary for doing a chdir into the FUSE
|
||||||
|
// directories.
|
||||||
|
NullPermissions bool
|
||||||
|
|
||||||
|
// UID, if nonzero, is the default UID to use instead of the
|
||||||
|
// zero (zero) UID.
|
||||||
|
UID uint32
|
||||||
|
|
||||||
|
// GID, if nonzero, is the default GID to use instead of the
|
||||||
|
// zero (zero) GID.
|
||||||
|
GID uint32
|
||||||
|
|
||||||
|
// ServerCallbacks are optional callbacks to stub out notification functions
|
||||||
|
// for testing a filesystem without mounting it.
|
||||||
|
ServerCallbacks ServerCallbacks
|
||||||
|
|
||||||
|
// Logger is a sink for diagnostic messages. Diagnostic
|
||||||
|
// messages are printed under conditions where we cannot
|
||||||
|
// return error, but want to signal something seems off
|
||||||
|
// anyway. If unset, no messages are printed.
|
||||||
|
//
|
||||||
|
// This field shadows (and thus, is distinct) from
|
||||||
|
// MountOptions.Logger.
|
||||||
|
Logger *log.Logger
|
||||||
|
|
||||||
|
// RootStableAttr is an optional way to set e.g. Ino and/or Gen for
|
||||||
|
// the root directory when calling fs.Mount(), Mode is ignored.
|
||||||
|
RootStableAttr *StableAttr
|
||||||
|
}
|
||||||
1327
vendor/github.com/hanwen/go-fuse/v2/fs/bridge.go
generated
vendored
Normal file
1327
vendor/github.com/hanwen/go-fuse/v2/fs/bridge.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
60
vendor/github.com/hanwen/go-fuse/v2/fs/bridge_linux.go
generated
vendored
Normal file
60
vendor/github.com/hanwen/go-fuse/v2/fs/bridge_linux.go
generated
vendored
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
package fs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/hanwen/go-fuse/v2/fuse"
|
||||||
|
)
|
||||||
|
|
||||||
|
// see rawBridge.setAttr
|
||||||
|
func (b *rawBridge) setStatx(out *fuse.Statx) {
|
||||||
|
if !b.options.NullPermissions && out.Mode&07777 == 0 {
|
||||||
|
out.Mode |= 0644
|
||||||
|
if out.Mode&syscall.S_IFDIR != 0 {
|
||||||
|
out.Mode |= 0111
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if b.options.UID != 0 && out.Uid == 0 {
|
||||||
|
out.Uid = b.options.UID
|
||||||
|
}
|
||||||
|
if b.options.GID != 0 && out.Gid == 0 {
|
||||||
|
out.Gid = b.options.GID
|
||||||
|
}
|
||||||
|
setStatxBlocks(out)
|
||||||
|
}
|
||||||
|
|
||||||
|
// see rawBridge.setAttrTimeout
|
||||||
|
func (b *rawBridge) setStatxTimeout(out *fuse.StatxOut) {
|
||||||
|
if b.options.AttrTimeout != nil && out.Timeout() == 0 {
|
||||||
|
out.SetTimeout(*b.options.AttrTimeout)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *rawBridge) Statx(cancel <-chan struct{}, in *fuse.StatxIn, out *fuse.StatxOut) fuse.Status {
|
||||||
|
n, fe := b.inode(in.NodeId, in.Fh)
|
||||||
|
var fh FileHandle
|
||||||
|
if fe != nil {
|
||||||
|
fh = fe.file
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := &fuse.Context{Caller: in.Caller, Cancel: cancel}
|
||||||
|
|
||||||
|
errno := syscall.ENOSYS
|
||||||
|
if sx, ok := n.ops.(NodeStatxer); ok {
|
||||||
|
errno = sx.Statx(ctx, fh, in.SxFlags, in.SxMask, out)
|
||||||
|
} else if fsx, ok := n.ops.(FileStatxer); ok {
|
||||||
|
errno = fsx.Statx(ctx, in.SxFlags, in.SxMask, out)
|
||||||
|
}
|
||||||
|
|
||||||
|
if errno == 0 {
|
||||||
|
if out.Ino != 0 && n.stableAttr.Ino > 1 && out.Ino != n.stableAttr.Ino {
|
||||||
|
b.logf("warning: rawBridge.getattr: overriding ino %d with %d", out.Ino, n.stableAttr.Ino)
|
||||||
|
}
|
||||||
|
out.Ino = n.stableAttr.Ino
|
||||||
|
out.Mode = (out.Statx.Mode & 07777) | uint16(n.stableAttr.Mode)
|
||||||
|
b.setStatx(&out.Statx)
|
||||||
|
b.setStatxTimeout(out)
|
||||||
|
}
|
||||||
|
|
||||||
|
return errnoToStatus(errno)
|
||||||
|
}
|
||||||
9
vendor/github.com/hanwen/go-fuse/v2/fs/bridge_nonlinux.go
generated
vendored
Normal file
9
vendor/github.com/hanwen/go-fuse/v2/fs/bridge_nonlinux.go
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
//go:build !linux
|
||||||
|
|
||||||
|
package fs
|
||||||
|
|
||||||
|
import "github.com/hanwen/go-fuse/v2/fuse"
|
||||||
|
|
||||||
|
func (b *rawBridge) Statx(cancel <-chan struct{}, in *fuse.StatxIn, out *fuse.StatxOut) fuse.Status {
|
||||||
|
return fuse.ENOSYS
|
||||||
|
}
|
||||||
33
vendor/github.com/hanwen/go-fuse/v2/fs/constants.go
generated
vendored
Normal file
33
vendor/github.com/hanwen/go-fuse/v2/fs/constants.go
generated
vendored
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
// Copyright 2019 the Go-FUSE Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package fs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/hanwen/go-fuse/v2/fuse"
|
||||||
|
"github.com/hanwen/go-fuse/v2/internal/xattr"
|
||||||
|
)
|
||||||
|
|
||||||
|
// OK is the Errno return value to indicate absense of errors.
|
||||||
|
var OK = syscall.Errno(0)
|
||||||
|
|
||||||
|
// ToErrno exhumes the syscall.Errno error from wrapped error values.
|
||||||
|
func ToErrno(err error) syscall.Errno {
|
||||||
|
s := fuse.ToStatus(err)
|
||||||
|
return syscall.Errno(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RENAME_EXCHANGE is a flag argument for renameat2()
|
||||||
|
const RENAME_EXCHANGE = 0x2
|
||||||
|
|
||||||
|
// seek to the next data
|
||||||
|
const _SEEK_DATA = 3
|
||||||
|
|
||||||
|
// seek to the next hole
|
||||||
|
const _SEEK_HOLE = 4
|
||||||
|
|
||||||
|
// ENOATTR indicates that an extended attribute was not present.
|
||||||
|
const ENOATTR = xattr.ENOATTR
|
||||||
5
vendor/github.com/hanwen/go-fuse/v2/fs/default.go
generated
vendored
Normal file
5
vendor/github.com/hanwen/go-fuse/v2/fs/default.go
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
// Copyright 2019 the Go-FUSE Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package fs
|
||||||
216
vendor/github.com/hanwen/go-fuse/v2/fs/dirstream.go
generated
vendored
Normal file
216
vendor/github.com/hanwen/go-fuse/v2/fs/dirstream.go
generated
vendored
Normal file
@@ -0,0 +1,216 @@
|
|||||||
|
// Copyright 2019 the Go-FUSE Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package fs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/hanwen/go-fuse/v2/fuse"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
type dirArray struct {
|
||||||
|
idx int
|
||||||
|
entries []fuse.DirEntry
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *dirArray) HasNext() bool {
|
||||||
|
return a.idx < len(a.entries)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *dirArray) Next() (fuse.DirEntry, syscall.Errno) {
|
||||||
|
e := a.entries[a.idx]
|
||||||
|
a.idx++
|
||||||
|
e.Off = uint64(a.idx)
|
||||||
|
return e, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *dirArray) Seekdir(ctx context.Context, off uint64) syscall.Errno {
|
||||||
|
idx := int(off)
|
||||||
|
if idx < 0 || idx > len(a.entries) {
|
||||||
|
return syscall.EINVAL
|
||||||
|
}
|
||||||
|
a.idx = idx
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *dirArray) Close() {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *dirArray) Releasedir(ctx context.Context, releaseFlags uint32) {}
|
||||||
|
|
||||||
|
func (a *dirArray) Readdirent(ctx context.Context) (de *fuse.DirEntry, errno syscall.Errno) {
|
||||||
|
if !a.HasNext() {
|
||||||
|
return nil, 0
|
||||||
|
}
|
||||||
|
e, errno := a.Next()
|
||||||
|
return &e, errno
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewLoopbackDirStream opens a directory for reading as a DirStream
|
||||||
|
func NewLoopbackDirStream(name string) (DirStream, syscall.Errno) {
|
||||||
|
// TODO: should return concrete type.
|
||||||
|
fd, err := syscall.Open(name, syscall.O_DIRECTORY|syscall.O_CLOEXEC, 0755)
|
||||||
|
if err != nil {
|
||||||
|
return nil, ToErrno(err)
|
||||||
|
}
|
||||||
|
return NewLoopbackDirStreamFd(fd)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewListDirStream wraps a slice of DirEntry as a DirStream.
|
||||||
|
func NewListDirStream(list []fuse.DirEntry) DirStream {
|
||||||
|
return &dirArray{entries: list}
|
||||||
|
}
|
||||||
|
|
||||||
|
// implement FileReaddirenter/FileReleasedirer
|
||||||
|
type dirStreamAsFile struct {
|
||||||
|
creator func(context.Context) (DirStream, syscall.Errno)
|
||||||
|
ds DirStream
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *dirStreamAsFile) Releasedir(ctx context.Context, releaseFlags uint32) {
|
||||||
|
if d.ds != nil {
|
||||||
|
d.ds.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *dirStreamAsFile) Readdirent(ctx context.Context) (de *fuse.DirEntry, errno syscall.Errno) {
|
||||||
|
if d.ds == nil {
|
||||||
|
d.ds, errno = d.creator(ctx)
|
||||||
|
if errno != 0 {
|
||||||
|
return nil, errno
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !d.ds.HasNext() {
|
||||||
|
return nil, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
e, errno := d.ds.Next()
|
||||||
|
return &e, errno
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *dirStreamAsFile) Seekdir(ctx context.Context, off uint64) syscall.Errno {
|
||||||
|
if d.ds == nil {
|
||||||
|
var errno syscall.Errno
|
||||||
|
d.ds, errno = d.creator(ctx)
|
||||||
|
if errno != 0 {
|
||||||
|
return errno
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if sd, ok := d.ds.(FileSeekdirer); ok {
|
||||||
|
return sd.Seekdir(ctx, off)
|
||||||
|
}
|
||||||
|
return syscall.ENOTSUP
|
||||||
|
}
|
||||||
|
|
||||||
|
type loopbackDirStream struct {
|
||||||
|
buf []byte
|
||||||
|
|
||||||
|
// Protects mutable members
|
||||||
|
mu sync.Mutex
|
||||||
|
|
||||||
|
// mutable
|
||||||
|
todo []byte
|
||||||
|
todoErrno syscall.Errno
|
||||||
|
fd int
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewLoopbackDirStreamFd reads the directory opened at file descriptor fd as
|
||||||
|
// a DirStream
|
||||||
|
func NewLoopbackDirStreamFd(fd int) (DirStream, syscall.Errno) {
|
||||||
|
ds := &loopbackDirStream{
|
||||||
|
buf: make([]byte, 4096),
|
||||||
|
fd: fd,
|
||||||
|
}
|
||||||
|
ds.load()
|
||||||
|
return ds, OK
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ds *loopbackDirStream) Close() {
|
||||||
|
ds.mu.Lock()
|
||||||
|
defer ds.mu.Unlock()
|
||||||
|
if ds.fd != -1 {
|
||||||
|
syscall.Close(ds.fd)
|
||||||
|
ds.fd = -1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ = (FileReleasedirer)((*loopbackDirStream)(nil))
|
||||||
|
|
||||||
|
func (ds *loopbackDirStream) Releasedir(ctx context.Context, flags uint32) {
|
||||||
|
ds.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ = (FileSeekdirer)((*loopbackDirStream)(nil))
|
||||||
|
|
||||||
|
func (ds *loopbackDirStream) Seekdir(ctx context.Context, off uint64) syscall.Errno {
|
||||||
|
ds.mu.Lock()
|
||||||
|
defer ds.mu.Unlock()
|
||||||
|
_, errno := unix.Seek(ds.fd, int64(off), unix.SEEK_SET)
|
||||||
|
if errno != nil {
|
||||||
|
return ToErrno(errno)
|
||||||
|
}
|
||||||
|
|
||||||
|
ds.todo = nil
|
||||||
|
ds.todoErrno = 0
|
||||||
|
ds.load()
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ = (FileFsyncdirer)((*loopbackDirStream)(nil))
|
||||||
|
|
||||||
|
func (ds *loopbackDirStream) Fsyncdir(ctx context.Context, flags uint32) syscall.Errno {
|
||||||
|
ds.mu.Lock()
|
||||||
|
defer ds.mu.Unlock()
|
||||||
|
return ToErrno(syscall.Fsync(ds.fd))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ds *loopbackDirStream) HasNext() bool {
|
||||||
|
ds.mu.Lock()
|
||||||
|
defer ds.mu.Unlock()
|
||||||
|
return len(ds.todo) > 0 || ds.todoErrno != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ = (FileReaddirenter)((*loopbackDirStream)(nil))
|
||||||
|
|
||||||
|
func (ds *loopbackDirStream) Readdirent(ctx context.Context) (*fuse.DirEntry, syscall.Errno) {
|
||||||
|
if !ds.HasNext() {
|
||||||
|
return nil, 0
|
||||||
|
}
|
||||||
|
de, errno := ds.Next()
|
||||||
|
return &de, errno
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ds *loopbackDirStream) Next() (fuse.DirEntry, syscall.Errno) {
|
||||||
|
ds.mu.Lock()
|
||||||
|
defer ds.mu.Unlock()
|
||||||
|
|
||||||
|
if ds.todoErrno != 0 {
|
||||||
|
return fuse.DirEntry{}, ds.todoErrno
|
||||||
|
}
|
||||||
|
var res fuse.DirEntry
|
||||||
|
n := res.Parse(ds.todo)
|
||||||
|
ds.todo = ds.todo[n:]
|
||||||
|
if len(ds.todo) == 0 {
|
||||||
|
ds.load()
|
||||||
|
}
|
||||||
|
return res, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ds *loopbackDirStream) load() {
|
||||||
|
if len(ds.todo) > 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
n, err := getdents(ds.fd, ds.buf)
|
||||||
|
if n < 0 {
|
||||||
|
n = 0
|
||||||
|
}
|
||||||
|
ds.todo = ds.buf[:n]
|
||||||
|
ds.todoErrno = ToErrno(err)
|
||||||
|
}
|
||||||
11
vendor/github.com/hanwen/go-fuse/v2/fs/dirstream_darwin.go
generated
vendored
Normal file
11
vendor/github.com/hanwen/go-fuse/v2/fs/dirstream_darwin.go
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
// Copyright 2019 the Go-FUSE Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package fs
|
||||||
|
|
||||||
|
import "golang.org/x/sys/unix"
|
||||||
|
|
||||||
|
func getdents(fd int, buf []byte) (int, error) {
|
||||||
|
return unix.Getdirentries(fd, buf, nil)
|
||||||
|
}
|
||||||
13
vendor/github.com/hanwen/go-fuse/v2/fs/dirstream_unix.go
generated
vendored
Normal file
13
vendor/github.com/hanwen/go-fuse/v2/fs/dirstream_unix.go
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
//go:build !darwin
|
||||||
|
|
||||||
|
// Copyright 2019 the Go-FUSE Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package fs
|
||||||
|
|
||||||
|
import "golang.org/x/sys/unix"
|
||||||
|
|
||||||
|
func getdents(fd int, buf []byte) (int, error) {
|
||||||
|
return unix.Getdents(fd, buf)
|
||||||
|
}
|
||||||
277
vendor/github.com/hanwen/go-fuse/v2/fs/files.go
generated
vendored
Normal file
277
vendor/github.com/hanwen/go-fuse/v2/fs/files.go
generated
vendored
Normal file
@@ -0,0 +1,277 @@
|
|||||||
|
// Copyright 2019 the Go-FUSE Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package fs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/hanwen/go-fuse/v2/fuse"
|
||||||
|
"github.com/hanwen/go-fuse/v2/internal/fallocate"
|
||||||
|
"github.com/hanwen/go-fuse/v2/internal/ioctl"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewLoopbackFile creates a FileHandle out of a file descriptor. All
|
||||||
|
// operations are implemented. When using the Fd from a *os.File, call
|
||||||
|
// syscall.Dup() on the fd, to avoid os.File's finalizer from closing
|
||||||
|
// the file descriptor.
|
||||||
|
func NewLoopbackFile(fd int) FileHandle {
|
||||||
|
return &loopbackFile{fd: fd}
|
||||||
|
}
|
||||||
|
|
||||||
|
type loopbackFile struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
fd int
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ = (FileHandle)((*loopbackFile)(nil))
|
||||||
|
var _ = (FileReleaser)((*loopbackFile)(nil))
|
||||||
|
var _ = (FileGetattrer)((*loopbackFile)(nil))
|
||||||
|
var _ = (FileReader)((*loopbackFile)(nil))
|
||||||
|
var _ = (FileWriter)((*loopbackFile)(nil))
|
||||||
|
var _ = (FileGetlker)((*loopbackFile)(nil))
|
||||||
|
var _ = (FileSetlker)((*loopbackFile)(nil))
|
||||||
|
var _ = (FileSetlkwer)((*loopbackFile)(nil))
|
||||||
|
var _ = (FileLseeker)((*loopbackFile)(nil))
|
||||||
|
var _ = (FileFlusher)((*loopbackFile)(nil))
|
||||||
|
var _ = (FileFsyncer)((*loopbackFile)(nil))
|
||||||
|
var _ = (FileSetattrer)((*loopbackFile)(nil))
|
||||||
|
var _ = (FileAllocater)((*loopbackFile)(nil))
|
||||||
|
var _ = (FilePassthroughFder)((*loopbackFile)(nil))
|
||||||
|
|
||||||
|
func (f *loopbackFile) PassthroughFd() (int, bool) {
|
||||||
|
// This Fd is not accessed concurrently, but lock anyway for uniformity.
|
||||||
|
f.mu.Lock()
|
||||||
|
defer f.mu.Unlock()
|
||||||
|
return f.fd, true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *loopbackFile) Read(ctx context.Context, buf []byte, off int64) (res fuse.ReadResult, errno syscall.Errno) {
|
||||||
|
f.mu.Lock()
|
||||||
|
defer f.mu.Unlock()
|
||||||
|
r := fuse.ReadResultFd(uintptr(f.fd), off, len(buf))
|
||||||
|
return r, OK
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *loopbackFile) Write(ctx context.Context, data []byte, off int64) (uint32, syscall.Errno) {
|
||||||
|
f.mu.Lock()
|
||||||
|
defer f.mu.Unlock()
|
||||||
|
n, err := syscall.Pwrite(f.fd, data, off)
|
||||||
|
return uint32(n), ToErrno(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *loopbackFile) Release(ctx context.Context) syscall.Errno {
|
||||||
|
f.mu.Lock()
|
||||||
|
defer f.mu.Unlock()
|
||||||
|
if f.fd != -1 {
|
||||||
|
err := syscall.Close(f.fd)
|
||||||
|
f.fd = -1
|
||||||
|
return ToErrno(err)
|
||||||
|
}
|
||||||
|
return syscall.EBADF
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *loopbackFile) Flush(ctx context.Context) syscall.Errno {
|
||||||
|
f.mu.Lock()
|
||||||
|
defer f.mu.Unlock()
|
||||||
|
// Since Flush() may be called for each dup'd fd, we don't
|
||||||
|
// want to really close the file, we just want to flush. This
|
||||||
|
// is achieved by closing a dup'd fd.
|
||||||
|
newFd, err := syscall.Dup(f.fd)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return ToErrno(err)
|
||||||
|
}
|
||||||
|
err = syscall.Close(newFd)
|
||||||
|
return ToErrno(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *loopbackFile) Fsync(ctx context.Context, flags uint32) (errno syscall.Errno) {
|
||||||
|
f.mu.Lock()
|
||||||
|
defer f.mu.Unlock()
|
||||||
|
r := ToErrno(syscall.Fsync(f.fd))
|
||||||
|
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
_OFD_GETLK = 36
|
||||||
|
_OFD_SETLK = 37
|
||||||
|
_OFD_SETLKW = 38
|
||||||
|
)
|
||||||
|
|
||||||
|
func (f *loopbackFile) Getlk(ctx context.Context, owner uint64, lk *fuse.FileLock, flags uint32, out *fuse.FileLock) (errno syscall.Errno) {
|
||||||
|
f.mu.Lock()
|
||||||
|
defer f.mu.Unlock()
|
||||||
|
flk := syscall.Flock_t{}
|
||||||
|
lk.ToFlockT(&flk)
|
||||||
|
errno = ToErrno(syscall.FcntlFlock(uintptr(f.fd), _OFD_GETLK, &flk))
|
||||||
|
out.FromFlockT(&flk)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *loopbackFile) Setlk(ctx context.Context, owner uint64, lk *fuse.FileLock, flags uint32) (errno syscall.Errno) {
|
||||||
|
return f.setLock(ctx, owner, lk, flags, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *loopbackFile) Setlkw(ctx context.Context, owner uint64, lk *fuse.FileLock, flags uint32) (errno syscall.Errno) {
|
||||||
|
return f.setLock(ctx, owner, lk, flags, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *loopbackFile) setLock(ctx context.Context, owner uint64, lk *fuse.FileLock, flags uint32, blocking bool) (errno syscall.Errno) {
|
||||||
|
f.mu.Lock()
|
||||||
|
defer f.mu.Unlock()
|
||||||
|
if (flags & fuse.FUSE_LK_FLOCK) != 0 {
|
||||||
|
var op int
|
||||||
|
switch lk.Typ {
|
||||||
|
case syscall.F_RDLCK:
|
||||||
|
op = syscall.LOCK_SH
|
||||||
|
case syscall.F_WRLCK:
|
||||||
|
op = syscall.LOCK_EX
|
||||||
|
case syscall.F_UNLCK:
|
||||||
|
op = syscall.LOCK_UN
|
||||||
|
default:
|
||||||
|
return syscall.EINVAL
|
||||||
|
}
|
||||||
|
if !blocking {
|
||||||
|
op |= syscall.LOCK_NB
|
||||||
|
}
|
||||||
|
return ToErrno(syscall.Flock(f.fd, op))
|
||||||
|
} else {
|
||||||
|
flk := syscall.Flock_t{}
|
||||||
|
lk.ToFlockT(&flk)
|
||||||
|
var op int
|
||||||
|
if blocking {
|
||||||
|
op = _OFD_SETLKW
|
||||||
|
} else {
|
||||||
|
op = _OFD_SETLK
|
||||||
|
}
|
||||||
|
return ToErrno(syscall.FcntlFlock(uintptr(f.fd), op, &flk))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *loopbackFile) Setattr(ctx context.Context, in *fuse.SetAttrIn, out *fuse.AttrOut) syscall.Errno {
|
||||||
|
if errno := f.setAttr(ctx, in); errno != 0 {
|
||||||
|
return errno
|
||||||
|
}
|
||||||
|
|
||||||
|
return f.Getattr(ctx, out)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *loopbackFile) fchmod(mode uint32) syscall.Errno {
|
||||||
|
f.mu.Lock()
|
||||||
|
defer f.mu.Unlock()
|
||||||
|
return ToErrno(syscall.Fchmod(f.fd, mode))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *loopbackFile) fchown(uid, gid int) syscall.Errno {
|
||||||
|
f.mu.Lock()
|
||||||
|
defer f.mu.Unlock()
|
||||||
|
return ToErrno(syscall.Fchown(f.fd, uid, gid))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *loopbackFile) ftruncate(sz uint64) syscall.Errno {
|
||||||
|
return ToErrno(syscall.Ftruncate(f.fd, int64(sz)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *loopbackFile) setAttr(ctx context.Context, in *fuse.SetAttrIn) syscall.Errno {
|
||||||
|
var errno syscall.Errno
|
||||||
|
if mode, ok := in.GetMode(); ok {
|
||||||
|
if errno := f.fchmod(mode); errno != 0 {
|
||||||
|
return errno
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
uid32, uOk := in.GetUID()
|
||||||
|
gid32, gOk := in.GetGID()
|
||||||
|
if uOk || gOk {
|
||||||
|
uid := -1
|
||||||
|
gid := -1
|
||||||
|
|
||||||
|
if uOk {
|
||||||
|
uid = int(uid32)
|
||||||
|
}
|
||||||
|
if gOk {
|
||||||
|
gid = int(gid32)
|
||||||
|
}
|
||||||
|
if errno := f.fchown(uid, gid); errno != 0 {
|
||||||
|
return errno
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
mtime, mok := in.GetMTime()
|
||||||
|
atime, aok := in.GetATime()
|
||||||
|
|
||||||
|
if mok || aok {
|
||||||
|
ap := &atime
|
||||||
|
mp := &mtime
|
||||||
|
if !aok {
|
||||||
|
ap = nil
|
||||||
|
}
|
||||||
|
if !mok {
|
||||||
|
mp = nil
|
||||||
|
}
|
||||||
|
errno = f.utimens(ap, mp)
|
||||||
|
if errno != 0 {
|
||||||
|
return errno
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if sz, ok := in.GetSize(); ok {
|
||||||
|
if errno := f.ftruncate(sz); errno != 0 {
|
||||||
|
return errno
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return OK
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *loopbackFile) Getattr(ctx context.Context, a *fuse.AttrOut) syscall.Errno {
|
||||||
|
f.mu.Lock()
|
||||||
|
defer f.mu.Unlock()
|
||||||
|
st := syscall.Stat_t{}
|
||||||
|
err := syscall.Fstat(f.fd, &st)
|
||||||
|
if err != nil {
|
||||||
|
return ToErrno(err)
|
||||||
|
}
|
||||||
|
a.FromStat(&st)
|
||||||
|
|
||||||
|
return OK
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *loopbackFile) Lseek(ctx context.Context, off uint64, whence uint32) (uint64, syscall.Errno) {
|
||||||
|
f.mu.Lock()
|
||||||
|
defer f.mu.Unlock()
|
||||||
|
n, err := unix.Seek(f.fd, int64(off), int(whence))
|
||||||
|
return uint64(n), ToErrno(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *loopbackFile) Allocate(ctx context.Context, off uint64, sz uint64, mode uint32) syscall.Errno {
|
||||||
|
f.mu.Lock()
|
||||||
|
defer f.mu.Unlock()
|
||||||
|
err := fallocate.Fallocate(f.fd, mode, int64(off), int64(sz))
|
||||||
|
if err != nil {
|
||||||
|
return ToErrno(err)
|
||||||
|
}
|
||||||
|
return OK
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *loopbackFile) Ioctl(ctx context.Context, cmd uint32, arg uint64, input []byte, output []byte) (result int32, errno syscall.Errno) {
|
||||||
|
f.mu.Lock()
|
||||||
|
defer f.mu.Unlock()
|
||||||
|
|
||||||
|
argWord := uintptr(arg)
|
||||||
|
ioc := ioctl.Command(cmd)
|
||||||
|
if ioc.Read() {
|
||||||
|
argWord = uintptr(unsafe.Pointer(&input[0]))
|
||||||
|
} else if ioc.Write() {
|
||||||
|
argWord = uintptr(unsafe.Pointer(&output[0]))
|
||||||
|
}
|
||||||
|
|
||||||
|
res, _, errno := syscall.Syscall(syscall.SYS_IOCTL, uintptr(f.fd), uintptr(cmd), argWord)
|
||||||
|
return int32(res), errno
|
||||||
|
}
|
||||||
32
vendor/github.com/hanwen/go-fuse/v2/fs/files_darwin.go
generated
vendored
Normal file
32
vendor/github.com/hanwen/go-fuse/v2/fs/files_darwin.go
generated
vendored
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
// Copyright 2019 the Go-FUSE Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package fs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hanwen/go-fuse/v2/fuse"
|
||||||
|
"github.com/hanwen/go-fuse/v2/internal/utimens"
|
||||||
|
)
|
||||||
|
|
||||||
|
func setBlocks(out *fuse.Attr) {
|
||||||
|
}
|
||||||
|
|
||||||
|
// MacOS before High Sierra lacks utimensat() and UTIME_OMIT.
|
||||||
|
// We emulate using utimes() and extra Getattr() calls.
|
||||||
|
func (f *loopbackFile) utimens(a *time.Time, m *time.Time) syscall.Errno {
|
||||||
|
var attr fuse.AttrOut
|
||||||
|
if a == nil || m == nil {
|
||||||
|
errno := f.Getattr(context.Background(), &attr)
|
||||||
|
if errno != 0 {
|
||||||
|
return errno
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tv := utimens.Fill(a, m, &attr.Attr)
|
||||||
|
err := syscall.Futimes(int(f.fd), tv)
|
||||||
|
return ToErrno(err)
|
||||||
|
}
|
||||||
6
vendor/github.com/hanwen/go-fuse/v2/fs/files_freebsd.go
generated
vendored
Normal file
6
vendor/github.com/hanwen/go-fuse/v2/fs/files_freebsd.go
generated
vendored
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
package fs
|
||||||
|
|
||||||
|
import "github.com/hanwen/go-fuse/v2/fuse"
|
||||||
|
|
||||||
|
func setBlocks(out *fuse.Attr) {
|
||||||
|
}
|
||||||
46
vendor/github.com/hanwen/go-fuse/v2/fs/files_linux.go
generated
vendored
Normal file
46
vendor/github.com/hanwen/go-fuse/v2/fs/files_linux.go
generated
vendored
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
// Copyright 2019 the Go-FUSE Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package fs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/hanwen/go-fuse/v2/fuse"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
func setBlocks(out *fuse.Attr) {
|
||||||
|
if out.Blksize > 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
out.Blksize = 4096
|
||||||
|
pages := (out.Size + 4095) / 4096
|
||||||
|
out.Blocks = pages * 8
|
||||||
|
}
|
||||||
|
|
||||||
|
func setStatxBlocks(out *fuse.Statx) {
|
||||||
|
if out.Blksize > 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
out.Blksize = 4096
|
||||||
|
pages := (out.Size + 4095) / 4096
|
||||||
|
out.Blocks = pages * 8
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *loopbackFile) Statx(ctx context.Context, flags uint32, mask uint32, out *fuse.StatxOut) syscall.Errno {
|
||||||
|
f.mu.Lock()
|
||||||
|
defer f.mu.Unlock()
|
||||||
|
st := unix.Statx_t{}
|
||||||
|
err := unix.Statx(f.fd, "", int(flags), int(mask), &st)
|
||||||
|
if err != nil {
|
||||||
|
return ToErrno(err)
|
||||||
|
}
|
||||||
|
out.FromStatx(&st)
|
||||||
|
|
||||||
|
return OK
|
||||||
|
}
|
||||||
30
vendor/github.com/hanwen/go-fuse/v2/fs/files_unix.go
generated
vendored
Normal file
30
vendor/github.com/hanwen/go-fuse/v2/fs/files_unix.go
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
//go:build !darwin
|
||||||
|
|
||||||
|
package fs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/hanwen/go-fuse/v2/fuse"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Utimens - file handle based version of loopbackFileSystem.Utimens()
|
||||||
|
func (f *loopbackFile) utimens(a *time.Time, m *time.Time) syscall.Errno {
|
||||||
|
var ts [2]syscall.Timespec
|
||||||
|
ts[0] = fuse.UtimeToTimespec(a)
|
||||||
|
ts[1] = fuse.UtimeToTimespec(m)
|
||||||
|
err := futimens(int(f.fd), &ts)
|
||||||
|
return ToErrno(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// futimens - futimens(3) calls utimensat(2) with "pathname" set to null and
|
||||||
|
// "flags" set to zero
|
||||||
|
func futimens(fd int, times *[2]syscall.Timespec) (err error) {
|
||||||
|
_, _, e1 := syscall.Syscall6(syscall.SYS_UTIMENSAT, uintptr(fd), 0, uintptr(unsafe.Pointer(times)), uintptr(0), 0, 0)
|
||||||
|
if e1 != 0 {
|
||||||
|
err = syscall.Errno(e1)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
762
vendor/github.com/hanwen/go-fuse/v2/fs/inode.go
generated
vendored
Normal file
762
vendor/github.com/hanwen/go-fuse/v2/fs/inode.go
generated
vendored
Normal file
@@ -0,0 +1,762 @@
|
|||||||
|
// Copyright 2019 the Go-FUSE Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package fs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"math/rand"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/hanwen/go-fuse/v2/fuse"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StableAttr holds immutable attributes of a object in the filesystem.
|
||||||
|
type StableAttr struct {
|
||||||
|
// Each Inode has a type, which does not change over the
|
||||||
|
// lifetime of the inode, for example fuse.S_IFDIR. The default (0)
|
||||||
|
// is interpreted as S_IFREG (regular file).
|
||||||
|
Mode uint32
|
||||||
|
|
||||||
|
// The inode number must be unique among the currently live
|
||||||
|
// objects in the file system. It is used to communicate to
|
||||||
|
// the kernel about this file object. The value uint64(-1)
|
||||||
|
// is reserved. When using Ino==0, a unique, sequential
|
||||||
|
// number is assigned (starting at 2^63 by default) on Inode creation.
|
||||||
|
Ino uint64
|
||||||
|
|
||||||
|
// When reusing a previously used inode number for a new
|
||||||
|
// object, the new object must have a different Gen
|
||||||
|
// number. This is irrelevant if the FS is not exported over
|
||||||
|
// NFS
|
||||||
|
Gen uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reserved returns if the StableAttr is using reserved Inode numbers.
|
||||||
|
func (i *StableAttr) Reserved() bool {
|
||||||
|
return i.Ino == ^uint64(0) // fuse.pollHackInode = ^uint64(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Inode is a node in VFS tree. Inodes are one-to-one mapped to
|
||||||
|
// Operations instances, which is the extension interface for file
|
||||||
|
// systems. One can create fully-formed trees of Inodes ahead of time
|
||||||
|
// by creating "persistent" Inodes.
|
||||||
|
//
|
||||||
|
// The Inode struct contains a lock, so it should not be
|
||||||
|
// copied. Inodes should be obtained by calling Inode.NewInode() or
|
||||||
|
// Inode.NewPersistentInode().
|
||||||
|
type Inode struct {
|
||||||
|
stableAttr StableAttr
|
||||||
|
|
||||||
|
ops InodeEmbedder
|
||||||
|
bridge *rawBridge
|
||||||
|
|
||||||
|
// The *Node ID* is an arbitrary uint64 identifier chosen by the FUSE library.
|
||||||
|
// It is used the identify *nodes* (files/directories/symlinks/...) in the
|
||||||
|
// communication between the FUSE library and the Linux kernel.
|
||||||
|
nodeId uint64
|
||||||
|
|
||||||
|
// Following data is mutable.
|
||||||
|
|
||||||
|
// file handles.
|
||||||
|
// protected by bridge.mu
|
||||||
|
openFiles []uint32
|
||||||
|
|
||||||
|
// backing files, protected by bridge.mu
|
||||||
|
backingIDRefcount int
|
||||||
|
backingID int32
|
||||||
|
backingFd int
|
||||||
|
|
||||||
|
// mu protects the following mutable fields. When locking
|
||||||
|
// multiple Inodes, locks must be acquired using
|
||||||
|
// lockNodes/unlockNodes
|
||||||
|
mu sync.Mutex
|
||||||
|
|
||||||
|
// persistent indicates that this node should not be removed
|
||||||
|
// from the tree, even if there are no live references. This
|
||||||
|
// must be set on creation, and can only be changed to false
|
||||||
|
// by calling removeRef.
|
||||||
|
// When you change this, you MUST increment changeCounter.
|
||||||
|
persistent bool
|
||||||
|
|
||||||
|
// changeCounter increments every time the mutable state
|
||||||
|
// (lookupCount, persistent, children, parents) protected by
|
||||||
|
// mu is modified.
|
||||||
|
//
|
||||||
|
// This is used in places where we have to relock inode into inode
|
||||||
|
// group lock, and after locking the group we have to check if inode
|
||||||
|
// did not changed, and if it changed - retry the operation.
|
||||||
|
changeCounter uint32
|
||||||
|
|
||||||
|
// Number of kernel refs to this node.
|
||||||
|
// When you change this, you MUST increment changeCounter.
|
||||||
|
lookupCount uint64
|
||||||
|
|
||||||
|
// Children of this Inode.
|
||||||
|
// When you change this, you MUST increment changeCounter.
|
||||||
|
children inodeChildren
|
||||||
|
|
||||||
|
// Parents of this Inode. Can be more than one due to hard links.
|
||||||
|
// When you change this, you MUST increment changeCounter.
|
||||||
|
parents inodeParents
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *Inode) IsDir() bool {
|
||||||
|
return n.stableAttr.Mode&syscall.S_IFMT == syscall.S_IFDIR
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *Inode) embed() *Inode {
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *Inode) EmbeddedInode() *Inode {
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func initInode(n *Inode, ops InodeEmbedder, attr StableAttr, bridge *rawBridge, persistent bool, nodeId uint64) {
|
||||||
|
n.ops = ops
|
||||||
|
n.stableAttr = attr
|
||||||
|
n.bridge = bridge
|
||||||
|
n.persistent = persistent
|
||||||
|
n.nodeId = nodeId
|
||||||
|
if attr.Mode == fuse.S_IFDIR {
|
||||||
|
n.children.init()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set node ID and mode in EntryOut
|
||||||
|
func (n *Inode) setEntryOut(out *fuse.EntryOut) {
|
||||||
|
out.NodeId = n.nodeId
|
||||||
|
out.Ino = n.stableAttr.Ino
|
||||||
|
out.Mode = (out.Attr.Mode & 07777) | n.stableAttr.Mode
|
||||||
|
}
|
||||||
|
|
||||||
|
// StableAttr returns the (Ino, Gen) tuple for this node.
|
||||||
|
func (n *Inode) StableAttr() StableAttr {
|
||||||
|
return n.stableAttr
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mode returns the filetype
|
||||||
|
func (n *Inode) Mode() uint32 {
|
||||||
|
return n.stableAttr.Mode
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns the root of the tree
|
||||||
|
func (n *Inode) Root() *Inode {
|
||||||
|
return n.bridge.root
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns whether this is the root of the tree
|
||||||
|
func (n *Inode) IsRoot() bool {
|
||||||
|
return n.bridge.root == n
|
||||||
|
}
|
||||||
|
|
||||||
|
func modeStr(m uint32) string {
|
||||||
|
return map[uint32]string{
|
||||||
|
syscall.S_IFREG: "reg",
|
||||||
|
syscall.S_IFLNK: "lnk",
|
||||||
|
syscall.S_IFDIR: "dir",
|
||||||
|
syscall.S_IFSOCK: "soc",
|
||||||
|
syscall.S_IFIFO: "pip",
|
||||||
|
syscall.S_IFCHR: "chr",
|
||||||
|
syscall.S_IFBLK: "blk",
|
||||||
|
}[m]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a StableAttr) String() string {
|
||||||
|
return fmt.Sprintf("i%d g%d (%s)",
|
||||||
|
a.Ino, a.Gen, modeStr(a.Mode))
|
||||||
|
}
|
||||||
|
|
||||||
|
// debugString is used for debugging. Racy.
|
||||||
|
func (n *Inode) String() string {
|
||||||
|
n.mu.Lock()
|
||||||
|
defer n.mu.Unlock()
|
||||||
|
|
||||||
|
return fmt.Sprintf("%s: %s", n.stableAttr.String(), n.children.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
// sortNodes rearranges inode group in consistent order.
|
||||||
|
//
|
||||||
|
// The nodes are ordered by their in-RAM address, which gives consistency
|
||||||
|
// property: for any A and B inodes, sortNodes will either always order A < B,
|
||||||
|
// or always order A > B.
|
||||||
|
//
|
||||||
|
// See lockNodes where this property is used to avoid deadlock when taking
|
||||||
|
// locks on inode group.
|
||||||
|
func sortNodes(ns []*Inode) {
|
||||||
|
sort.Slice(ns, func(i, j int) bool {
|
||||||
|
return nodeLess(ns[i], ns[j])
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func nodeLess(a, b *Inode) bool {
|
||||||
|
return uintptr(unsafe.Pointer(a)) < uintptr(unsafe.Pointer(b))
|
||||||
|
}
|
||||||
|
|
||||||
|
// lockNodes locks group of inodes.
|
||||||
|
//
|
||||||
|
// It always lock the inodes in the same order - to avoid deadlocks.
|
||||||
|
// It also avoids locking an inode more than once, if it was specified multiple times.
|
||||||
|
// An example when an inode might be given multiple times is if dir/a and dir/b
|
||||||
|
// are hardlinked to the same inode and the caller needs to take locks on dir children.
|
||||||
|
func lockNodes(ns ...*Inode) {
|
||||||
|
sortNodes(ns)
|
||||||
|
|
||||||
|
// The default value nil prevents trying to lock nil nodes.
|
||||||
|
var nprev *Inode
|
||||||
|
for _, n := range ns {
|
||||||
|
if n != nprev {
|
||||||
|
n.mu.Lock()
|
||||||
|
nprev = n
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// lockNode2 locks a and b in order consistent with lockNodes.
|
||||||
|
func lockNode2(a, b *Inode) {
|
||||||
|
if a == b {
|
||||||
|
a.mu.Lock()
|
||||||
|
} else if nodeLess(a, b) {
|
||||||
|
a.mu.Lock()
|
||||||
|
b.mu.Lock()
|
||||||
|
} else {
|
||||||
|
b.mu.Lock()
|
||||||
|
a.mu.Lock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// unlockNode2 unlocks a and b
|
||||||
|
func unlockNode2(a, b *Inode) {
|
||||||
|
if a == b {
|
||||||
|
a.mu.Unlock()
|
||||||
|
} else {
|
||||||
|
a.mu.Unlock()
|
||||||
|
b.mu.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// unlockNodes releases locks taken by lockNodes.
|
||||||
|
func unlockNodes(ns ...*Inode) {
|
||||||
|
// we don't need to unlock in the same order that was used in lockNodes.
|
||||||
|
// however it still helps to have nodes sorted to avoid duplicates.
|
||||||
|
sortNodes(ns)
|
||||||
|
|
||||||
|
var nprev *Inode
|
||||||
|
for _, n := range ns {
|
||||||
|
if n != nprev {
|
||||||
|
n.mu.Unlock()
|
||||||
|
nprev = n
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Forgotten returns true if the kernel holds no references to this
|
||||||
|
// inode. This can be used for background cleanup tasks, since the
|
||||||
|
// kernel has no way of reviving forgotten nodes by its own
|
||||||
|
// initiative.
|
||||||
|
//
|
||||||
|
// Bugs: Forgotten() may momentarily return true in the window between
|
||||||
|
// creation (NewInode) and adding the node into the tree, which
|
||||||
|
// happens after Lookup/Mkdir/etc. return.
|
||||||
|
//
|
||||||
|
// Deprecated: use NodeOnForgetter instead.
|
||||||
|
func (n *Inode) Forgotten() bool {
|
||||||
|
n.mu.Lock()
|
||||||
|
defer n.mu.Unlock()
|
||||||
|
return n.lookupCount == 0 && n.parents.count() == 0 && !n.persistent
|
||||||
|
}
|
||||||
|
|
||||||
|
// Operations returns the object implementing the file system
|
||||||
|
// operations.
|
||||||
|
func (n *Inode) Operations() InodeEmbedder {
|
||||||
|
return n.ops
|
||||||
|
}
|
||||||
|
|
||||||
|
// Path returns a path string to the inode relative to `root`.
|
||||||
|
// Pass nil to walk the hierarchy as far up as possible.
|
||||||
|
//
|
||||||
|
// If you set `root`, Path() warns if it finds an orphaned Inode, i.e.
|
||||||
|
// if it does not end up at `root` after walking the hierarchy.
|
||||||
|
func (n *Inode) Path(root *Inode) string {
|
||||||
|
var segments []string
|
||||||
|
p := n
|
||||||
|
for p != nil && p != root {
|
||||||
|
// We don't try to take all locks at the same time, because
|
||||||
|
// the caller won't use the "path" string under lock anyway.
|
||||||
|
p.mu.Lock()
|
||||||
|
// Get last known parent
|
||||||
|
pd := p.parents.get()
|
||||||
|
p.mu.Unlock()
|
||||||
|
if pd == nil {
|
||||||
|
p = nil
|
||||||
|
break
|
||||||
|
}
|
||||||
|
segments = append(segments, pd.name)
|
||||||
|
p = pd.parent
|
||||||
|
}
|
||||||
|
|
||||||
|
if root != nil && root != p {
|
||||||
|
deletedPlaceholder := fmt.Sprintf(".go-fuse.%d/deleted", rand.Uint64())
|
||||||
|
n.bridge.logf("warning: Inode.Path: n%d is orphaned, replacing segment with %q",
|
||||||
|
n.nodeId, deletedPlaceholder)
|
||||||
|
// NOSUBMIT - should replace rather than append?
|
||||||
|
segments = append(segments, deletedPlaceholder)
|
||||||
|
}
|
||||||
|
|
||||||
|
i := 0
|
||||||
|
j := len(segments) - 1
|
||||||
|
|
||||||
|
for i < j {
|
||||||
|
segments[i], segments[j] = segments[j], segments[i]
|
||||||
|
i++
|
||||||
|
j--
|
||||||
|
}
|
||||||
|
|
||||||
|
path := strings.Join(segments, "/")
|
||||||
|
return path
|
||||||
|
}
|
||||||
|
|
||||||
|
// setEntry does `iparent[name] = ichild` linking.
|
||||||
|
//
|
||||||
|
// setEntry must not be called simultaneously for any of iparent or ichild.
|
||||||
|
// This, for example could be satisfied if both iparent and ichild are locked,
|
||||||
|
// but it could be also valid if only iparent is locked and ichild was just
|
||||||
|
// created and only one goroutine keeps referencing it.
|
||||||
|
func (iparent *Inode) setEntry(name string, ichild *Inode) {
|
||||||
|
if ichild.stableAttr.Mode == syscall.S_IFDIR {
|
||||||
|
// Directories cannot have more than one parent. Clear the map.
|
||||||
|
// This special-case is neccessary because ichild may still have a
|
||||||
|
// parent that was forgotten (i.e. removed from bridge.inoMap).
|
||||||
|
ichild.parents.clear()
|
||||||
|
}
|
||||||
|
iparent.children.set(iparent, name, ichild)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewPersistentInode returns an Inode whose lifetime is not in
|
||||||
|
// control of the kernel.
|
||||||
|
//
|
||||||
|
// When the kernel is short on memory, it will forget cached file
|
||||||
|
// system information (directory entries and inode metadata). This is
|
||||||
|
// announced with FORGET messages. There are no guarantees if or when
|
||||||
|
// this happens. When it happens, these are handled transparently by
|
||||||
|
// go-fuse: all Inodes created with NewInode are released
|
||||||
|
// automatically. NewPersistentInode creates inodes that go-fuse keeps
|
||||||
|
// in memory, even if the kernel is not interested in them. This is
|
||||||
|
// convenient for building static trees up-front.
|
||||||
|
func (n *Inode) NewPersistentInode(ctx context.Context, node InodeEmbedder, id StableAttr) *Inode {
|
||||||
|
return n.newInode(ctx, node, id, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForgetPersistent manually marks the node as no longer important. If
|
||||||
|
// it has no children, and if the kernel as no references, the nodes
|
||||||
|
// gets removed from the tree.
|
||||||
|
func (n *Inode) ForgetPersistent() {
|
||||||
|
n.removeRef(0, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewInode returns an inode for the given InodeEmbedder. The mode
|
||||||
|
// should be standard mode argument (eg. S_IFDIR). The inode number in
|
||||||
|
// id.Ino argument is used to implement hard-links. If it is given,
|
||||||
|
// and another node with the same ID is known, the new inode may be
|
||||||
|
// ignored, and the old one used instead. If the parent inode
|
||||||
|
// implements NodeWrapChilder, the returned Inode will have a
|
||||||
|
// different InodeEmbedder from the one passed in.
|
||||||
|
func (n *Inode) NewInode(ctx context.Context, node InodeEmbedder, id StableAttr) *Inode {
|
||||||
|
return n.newInode(ctx, node, id, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *Inode) newInode(ctx context.Context, ops InodeEmbedder, id StableAttr, persistent bool) *Inode {
|
||||||
|
if wc, ok := n.ops.(NodeWrapChilder); ok {
|
||||||
|
ops = wc.WrapChild(ctx, ops)
|
||||||
|
}
|
||||||
|
return n.bridge.newInode(ctx, ops, id, persistent)
|
||||||
|
}
|
||||||
|
|
||||||
|
// removeRef decreases references. Returns if this operation caused
|
||||||
|
// the node to be forgotten (for kernel references), and whether it is
|
||||||
|
// live (ie. was not dropped from the tree)
|
||||||
|
func (n *Inode) removeRef(nlookup uint64, dropPersistence bool) (hasLookups, isPersistent, hasChildren bool) {
|
||||||
|
var beforeLookups, beforePersistence, beforeChildren bool
|
||||||
|
var unusedParents []*Inode
|
||||||
|
beforeLookups, hasLookups, beforePersistence, isPersistent, beforeChildren, hasChildren, unusedParents = n.removeRefInner(nlookup, dropPersistence, unusedParents)
|
||||||
|
|
||||||
|
if !hasLookups && !isPersistent && !hasChildren && (beforeChildren || beforeLookups || beforePersistence) {
|
||||||
|
if nf, ok := n.ops.(NodeOnForgetter); ok {
|
||||||
|
nf.OnForget()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for len(unusedParents) > 0 {
|
||||||
|
l := len(unusedParents)
|
||||||
|
p := unusedParents[l-1]
|
||||||
|
unusedParents = unusedParents[:l-1]
|
||||||
|
_, _, _, _, _, _, unusedParents = p.removeRefInner(0, false, unusedParents)
|
||||||
|
|
||||||
|
if nf, ok := p.ops.(NodeOnForgetter); ok {
|
||||||
|
nf.OnForget()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *Inode) removeRefInner(nlookup uint64, dropPersistence bool, inputUnusedParents []*Inode) (beforeLookups, hasLookups, beforePersistent, isPersistent, beforeChildren, hasChildren bool, unusedParents []*Inode) {
|
||||||
|
var lockme []*Inode
|
||||||
|
var parents []parentData
|
||||||
|
|
||||||
|
unusedParents = inputUnusedParents
|
||||||
|
|
||||||
|
n.mu.Lock()
|
||||||
|
beforeLookups = n.lookupCount > 0
|
||||||
|
beforePersistent = n.persistent
|
||||||
|
beforeChildren = n.children.len() > 0
|
||||||
|
if nlookup > 0 && dropPersistence {
|
||||||
|
log.Panic("only one allowed")
|
||||||
|
} else if nlookup > n.lookupCount {
|
||||||
|
log.Panicf("n%d lookupCount underflow: lookupCount=%d, decrement=%d", n.nodeId, n.lookupCount, nlookup)
|
||||||
|
} else if nlookup > 0 {
|
||||||
|
n.lookupCount -= nlookup
|
||||||
|
n.changeCounter++
|
||||||
|
} else if dropPersistence && n.persistent {
|
||||||
|
n.persistent = false
|
||||||
|
n.changeCounter++
|
||||||
|
}
|
||||||
|
|
||||||
|
n.bridge.mu.Lock()
|
||||||
|
if n.lookupCount == 0 {
|
||||||
|
// Dropping the node from stableAttrs guarantees that no new references to this node are
|
||||||
|
// handed out to the kernel, hence we can also safely delete it from kernelNodeIds.
|
||||||
|
delete(n.bridge.stableAttrs, n.stableAttr)
|
||||||
|
delete(n.bridge.kernelNodeIds, n.nodeId)
|
||||||
|
}
|
||||||
|
n.bridge.mu.Unlock()
|
||||||
|
|
||||||
|
retry:
|
||||||
|
for {
|
||||||
|
lockme = append(lockme[:0], n)
|
||||||
|
parents = parents[:0]
|
||||||
|
nChange := n.changeCounter
|
||||||
|
hasLookups = n.lookupCount > 0
|
||||||
|
hasChildren = n.children.len() > 0
|
||||||
|
isPersistent = n.persistent
|
||||||
|
for _, p := range n.parents.all() {
|
||||||
|
parents = append(parents, p)
|
||||||
|
lockme = append(lockme, p.parent)
|
||||||
|
}
|
||||||
|
n.mu.Unlock()
|
||||||
|
|
||||||
|
if hasLookups || hasChildren || isPersistent {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
lockNodes(lockme...)
|
||||||
|
if n.changeCounter != nChange {
|
||||||
|
unlockNodes(lockme...)
|
||||||
|
// could avoid unlocking and relocking n here.
|
||||||
|
n.mu.Lock()
|
||||||
|
continue retry
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, p := range parents {
|
||||||
|
parentNode := p.parent
|
||||||
|
if parentNode.children.get(p.name) != n {
|
||||||
|
// another node has replaced us already
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
parentNode.children.del(p.parent, p.name)
|
||||||
|
|
||||||
|
if parentNode.children.len() == 0 && parentNode.lookupCount == 0 && !parentNode.persistent {
|
||||||
|
unusedParents = append(unusedParents, parentNode)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if n.lookupCount != 0 {
|
||||||
|
log.Panicf("n%d %p lookupCount changed: %d", n.nodeId, n, n.lookupCount)
|
||||||
|
}
|
||||||
|
|
||||||
|
unlockNodes(lockme...)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetChild returns a child node with the given name, or nil if the
|
||||||
|
// directory has no child by that name.
|
||||||
|
func (n *Inode) GetChild(name string) *Inode {
|
||||||
|
n.mu.Lock()
|
||||||
|
defer n.mu.Unlock()
|
||||||
|
return n.children.get(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddChild adds a child to this node. If overwrite is false, fail if
|
||||||
|
// the destination already exists.
|
||||||
|
func (n *Inode) AddChild(name string, ch *Inode, overwrite bool) (success bool) {
|
||||||
|
if len(name) == 0 {
|
||||||
|
log.Panic("empty name for inode")
|
||||||
|
}
|
||||||
|
|
||||||
|
retry:
|
||||||
|
for {
|
||||||
|
lockNode2(n, ch)
|
||||||
|
prev := n.children.get(name)
|
||||||
|
parentCounter := n.changeCounter
|
||||||
|
if prev == nil {
|
||||||
|
n.children.set(n, name, ch)
|
||||||
|
unlockNode2(n, ch)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
unlockNode2(n, ch)
|
||||||
|
if !overwrite {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
lockme := [3]*Inode{n, ch, prev}
|
||||||
|
|
||||||
|
lockNodes(lockme[:]...)
|
||||||
|
if parentCounter != n.changeCounter {
|
||||||
|
unlockNodes(lockme[:]...)
|
||||||
|
continue retry
|
||||||
|
}
|
||||||
|
|
||||||
|
prev.parents.delete(parentData{name, n})
|
||||||
|
n.children.set(n, name, ch)
|
||||||
|
prev.changeCounter++
|
||||||
|
unlockNodes(lockme[:]...)
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Children returns the list of children of this directory Inode.
|
||||||
|
func (n *Inode) Children() map[string]*Inode {
|
||||||
|
n.mu.Lock()
|
||||||
|
defer n.mu.Unlock()
|
||||||
|
return n.children.toMap()
|
||||||
|
}
|
||||||
|
|
||||||
|
// childrenList returns the list of children of this directory Inode.
|
||||||
|
// The result is guaranteed to be stable as long as the directory did
|
||||||
|
// not change.
|
||||||
|
func (n *Inode) childrenList() []childEntry {
|
||||||
|
n.mu.Lock()
|
||||||
|
defer n.mu.Unlock()
|
||||||
|
return n.children.list()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parents returns a parent of this Inode, or nil if this Inode is
|
||||||
|
// deleted or is the root
|
||||||
|
func (n *Inode) Parent() (string, *Inode) {
|
||||||
|
n.mu.Lock()
|
||||||
|
defer n.mu.Unlock()
|
||||||
|
p := n.parents.get()
|
||||||
|
if p == nil {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
return p.name, p.parent
|
||||||
|
}
|
||||||
|
|
||||||
|
// RmAllChildren recursively drops a tree, forgetting all persistent
|
||||||
|
// nodes.
|
||||||
|
func (n *Inode) RmAllChildren() {
|
||||||
|
for {
|
||||||
|
chs := n.Children()
|
||||||
|
if len(chs) == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
for nm, ch := range chs {
|
||||||
|
ch.RmAllChildren()
|
||||||
|
n.RmChild(nm)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
n.removeRef(0, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RmChild removes multiple children. Returns whether the removal
|
||||||
|
// succeeded and whether the node is still live afterward. The removal
|
||||||
|
// is transactional: it only succeeds if all names are children, and
|
||||||
|
// if they all were removed successfully. If the removal was
|
||||||
|
// successful, and there are no children left, the node may be removed
|
||||||
|
// from the FS tree. In that case, RmChild returns live==false.
|
||||||
|
func (n *Inode) RmChild(names ...string) (success, live bool) {
|
||||||
|
var lockme []*Inode
|
||||||
|
|
||||||
|
retry:
|
||||||
|
for {
|
||||||
|
n.mu.Lock()
|
||||||
|
lockme = append(lockme[:0], n)
|
||||||
|
nChange := n.changeCounter
|
||||||
|
for _, nm := range names {
|
||||||
|
ch := n.children.get(nm)
|
||||||
|
if ch == nil {
|
||||||
|
n.mu.Unlock()
|
||||||
|
return false, true
|
||||||
|
}
|
||||||
|
lockme = append(lockme, ch)
|
||||||
|
}
|
||||||
|
n.mu.Unlock()
|
||||||
|
|
||||||
|
lockNodes(lockme...)
|
||||||
|
|
||||||
|
if n.changeCounter != nChange {
|
||||||
|
unlockNodes(lockme...)
|
||||||
|
continue retry
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, nm := range names {
|
||||||
|
n.children.del(n, nm)
|
||||||
|
}
|
||||||
|
|
||||||
|
live = n.lookupCount > 0 || n.children.len() > 0 || n.persistent
|
||||||
|
unlockNodes(lockme...)
|
||||||
|
|
||||||
|
// removal successful
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if !live {
|
||||||
|
hasLookups, isPersistent, hasChildren := n.removeRef(0, false)
|
||||||
|
return true, (hasLookups || isPersistent || hasChildren)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// MvChild executes a rename. If overwrite is set, a child at the
|
||||||
|
// destination will be overwritten, should it exist. It returns false
|
||||||
|
// if 'overwrite' is false, and the destination exists.
|
||||||
|
func (n *Inode) MvChild(old string, newParent *Inode, newName string, overwrite bool) bool {
|
||||||
|
if len(newName) == 0 {
|
||||||
|
log.Panicf("empty newName for MvChild")
|
||||||
|
}
|
||||||
|
|
||||||
|
retry:
|
||||||
|
for {
|
||||||
|
lockNode2(n, newParent)
|
||||||
|
counter1 := n.changeCounter
|
||||||
|
counter2 := newParent.changeCounter
|
||||||
|
|
||||||
|
oldChild := n.children.get(old)
|
||||||
|
destChild := newParent.children.get(newName)
|
||||||
|
unlockNode2(n, newParent)
|
||||||
|
|
||||||
|
if destChild != nil && !overwrite {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
lockNodes(n, newParent, oldChild, destChild)
|
||||||
|
if counter2 != newParent.changeCounter || counter1 != n.changeCounter {
|
||||||
|
unlockNodes(n, newParent, oldChild, destChild)
|
||||||
|
continue retry
|
||||||
|
}
|
||||||
|
|
||||||
|
if oldChild != nil {
|
||||||
|
n.children.del(n, old)
|
||||||
|
}
|
||||||
|
|
||||||
|
if destChild != nil {
|
||||||
|
// This can cause the child to be slated for
|
||||||
|
// removal; see below
|
||||||
|
newParent.children.del(newParent, newName)
|
||||||
|
}
|
||||||
|
|
||||||
|
if oldChild != nil {
|
||||||
|
newParent.children.set(newParent, newName, oldChild)
|
||||||
|
}
|
||||||
|
|
||||||
|
unlockNodes(n, newParent, oldChild, destChild)
|
||||||
|
|
||||||
|
if destChild != nil {
|
||||||
|
destChild.removeRef(0, false)
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExchangeChild swaps the entries at (n, oldName) and (newParent,
|
||||||
|
// newName).
|
||||||
|
func (n *Inode) ExchangeChild(oldName string, newParent *Inode, newName string) {
|
||||||
|
oldParent := n
|
||||||
|
retry:
|
||||||
|
for {
|
||||||
|
lockNode2(oldParent, newParent)
|
||||||
|
counter1 := oldParent.changeCounter
|
||||||
|
counter2 := newParent.changeCounter
|
||||||
|
|
||||||
|
oldChild := oldParent.children.get(oldName)
|
||||||
|
destChild := newParent.children.get(newName)
|
||||||
|
unlockNode2(oldParent, newParent)
|
||||||
|
|
||||||
|
if destChild == oldChild {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
lockNodes(oldParent, newParent, oldChild, destChild)
|
||||||
|
if counter2 != newParent.changeCounter || counter1 != oldParent.changeCounter {
|
||||||
|
unlockNodes(oldParent, newParent, oldChild, destChild)
|
||||||
|
continue retry
|
||||||
|
}
|
||||||
|
|
||||||
|
// Detach
|
||||||
|
if oldChild != nil {
|
||||||
|
oldParent.children.del(oldParent, oldName)
|
||||||
|
}
|
||||||
|
|
||||||
|
if destChild != nil {
|
||||||
|
newParent.children.del(newParent, newName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attach
|
||||||
|
if oldChild != nil {
|
||||||
|
newParent.children.set(newParent, newName, oldChild)
|
||||||
|
}
|
||||||
|
|
||||||
|
if destChild != nil {
|
||||||
|
oldParent.children.set(oldParent, oldName, destChild)
|
||||||
|
}
|
||||||
|
unlockNodes(oldParent, newParent, oldChild, destChild)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotifyEntry notifies the kernel that data for a (directory, name)
|
||||||
|
// tuple should be invalidated. On next access, a LOOKUP operation
|
||||||
|
// will be started.
|
||||||
|
func (n *Inode) NotifyEntry(name string) syscall.Errno {
|
||||||
|
status := n.bridge.server.EntryNotify(n.nodeId, name)
|
||||||
|
return syscall.Errno(status)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotifyDelete notifies the kernel that the given inode was removed
|
||||||
|
// from this directory as entry under the given name. It is equivalent
|
||||||
|
// to NotifyEntry, but also sends an event to inotify watchers.
|
||||||
|
func (n *Inode) NotifyDelete(name string, child *Inode) syscall.Errno {
|
||||||
|
// XXX arg ordering?
|
||||||
|
return syscall.Errno(n.bridge.server.DeleteNotify(n.nodeId, child.nodeId, name))
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotifyContent notifies the kernel that content under the given
|
||||||
|
// inode should be flushed from buffers.
|
||||||
|
func (n *Inode) NotifyContent(off, sz int64) syscall.Errno {
|
||||||
|
// XXX how does this work for directories?
|
||||||
|
return syscall.Errno(n.bridge.server.InodeNotify(n.nodeId, off, sz))
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteCache stores data in the kernel cache.
|
||||||
|
func (n *Inode) WriteCache(offset int64, data []byte) syscall.Errno {
|
||||||
|
return syscall.Errno(n.bridge.server.InodeNotifyStoreCache(n.nodeId, offset, data))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadCache reads data from the kernel cache.
|
||||||
|
func (n *Inode) ReadCache(offset int64, dest []byte) (count int, errno syscall.Errno) {
|
||||||
|
c, s := n.bridge.server.InodeRetrieveCache(n.nodeId, offset, dest)
|
||||||
|
return c, syscall.Errno(s)
|
||||||
|
}
|
||||||
133
vendor/github.com/hanwen/go-fuse/v2/fs/inode_children.go
generated
vendored
Normal file
133
vendor/github.com/hanwen/go-fuse/v2/fs/inode_children.go
generated
vendored
Normal file
@@ -0,0 +1,133 @@
|
|||||||
|
// Copyright 2023 the Go-FUSE Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package fs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type childEntry struct {
|
||||||
|
Name string
|
||||||
|
Inode *Inode
|
||||||
|
|
||||||
|
// TODO: store int64 changeCounter of the parent, so we can
|
||||||
|
// use the changeCounter as a directory offset.
|
||||||
|
}
|
||||||
|
|
||||||
|
// inodeChildren is a hashmap with deterministic ordering. It is
|
||||||
|
// important to return the children in a deterministic order for 2
|
||||||
|
// reasons:
|
||||||
|
//
|
||||||
|
// 1. if the ordering is non-deterministic, multiple concurrent
|
||||||
|
// readdirs can lead to cache corruption (see issue #391)
|
||||||
|
//
|
||||||
|
// 2. it simplifies the implementation of directory seeking: the NFS
|
||||||
|
// protocol doesn't open and close directories. Instead, a directory
|
||||||
|
// read must always be continued from a previously handed out offset.
|
||||||
|
//
|
||||||
|
// By storing the entries in insertion order, and marking them with a
|
||||||
|
// int64 logical timestamp, the logical timestamp can serve as readdir
|
||||||
|
// cookie.
|
||||||
|
type inodeChildren struct {
|
||||||
|
// index into children slice.
|
||||||
|
childrenMap map[string]int
|
||||||
|
children []childEntry
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *inodeChildren) init() {
|
||||||
|
c.childrenMap = make(map[string]int)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *inodeChildren) String() string {
|
||||||
|
var ss []string
|
||||||
|
for _, e := range c.children {
|
||||||
|
ch := e.Inode
|
||||||
|
ss = append(ss, fmt.Sprintf("%q=i%d[%s]", e.Name, ch.stableAttr.Ino, modeStr(ch.stableAttr.Mode)))
|
||||||
|
}
|
||||||
|
return strings.Join(ss, ",")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *inodeChildren) get(name string) *Inode {
|
||||||
|
idx, ok := c.childrenMap[name]
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.children[idx].Inode
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *inodeChildren) compact() {
|
||||||
|
nc := make([]childEntry, 0, 2*len(c.childrenMap)+1)
|
||||||
|
nm := make(map[string]int, len(c.childrenMap))
|
||||||
|
for _, e := range c.children {
|
||||||
|
if e.Inode == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
nm[e.Name] = len(nc)
|
||||||
|
nc = append(nc, e)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.childrenMap = nm
|
||||||
|
c.children = nc
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *inodeChildren) set(parent *Inode, name string, ch *Inode) {
|
||||||
|
idx, ok := c.childrenMap[name]
|
||||||
|
if !ok {
|
||||||
|
if cap(c.children) == len(c.children) {
|
||||||
|
c.compact()
|
||||||
|
}
|
||||||
|
|
||||||
|
idx = len(c.children)
|
||||||
|
c.children = append(c.children, childEntry{})
|
||||||
|
}
|
||||||
|
|
||||||
|
c.childrenMap[name] = idx
|
||||||
|
c.children[idx] = childEntry{Name: name, Inode: ch}
|
||||||
|
parent.changeCounter++
|
||||||
|
|
||||||
|
ch.parents.add(parentData{name, parent})
|
||||||
|
ch.changeCounter++
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *inodeChildren) len() int {
|
||||||
|
return len(c.childrenMap)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *inodeChildren) toMap() map[string]*Inode {
|
||||||
|
r := make(map[string]*Inode, len(c.childrenMap))
|
||||||
|
for _, e := range c.children {
|
||||||
|
if e.Inode != nil {
|
||||||
|
r[e.Name] = e.Inode
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *inodeChildren) del(parent *Inode, name string) {
|
||||||
|
idx, ok := c.childrenMap[name]
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ch := c.children[idx].Inode
|
||||||
|
|
||||||
|
delete(c.childrenMap, name)
|
||||||
|
c.children[idx] = childEntry{}
|
||||||
|
ch.parents.delete(parentData{name, parent})
|
||||||
|
ch.changeCounter++
|
||||||
|
parent.changeCounter++
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *inodeChildren) list() []childEntry {
|
||||||
|
r := make([]childEntry, 0, len(c.childrenMap))
|
||||||
|
for _, e := range c.children {
|
||||||
|
if e.Inode != nil {
|
||||||
|
r = append(r, e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
||||||
101
vendor/github.com/hanwen/go-fuse/v2/fs/inode_parents.go
generated
vendored
Normal file
101
vendor/github.com/hanwen/go-fuse/v2/fs/inode_parents.go
generated
vendored
Normal file
@@ -0,0 +1,101 @@
|
|||||||
|
// Copyright 2021 the Go-FUSE Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package fs
|
||||||
|
|
||||||
|
// inodeParents stores zero or more parents of an Inode,
|
||||||
|
// remembering which one is the most recent.
|
||||||
|
//
|
||||||
|
// No internal locking: the caller is responsible for preventing
|
||||||
|
// concurrent access.
|
||||||
|
type inodeParents struct {
|
||||||
|
// newest is the most-recently add()'ed parent.
|
||||||
|
// nil when we don't have any parents.
|
||||||
|
newest *parentData
|
||||||
|
// other are parents in addition to the newest.
|
||||||
|
// nil or empty when we have <= 1 parents.
|
||||||
|
other map[parentData]struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// add adds a parent to the store.
|
||||||
|
func (p *inodeParents) add(n parentData) {
|
||||||
|
// one and only parent
|
||||||
|
if p.newest == nil {
|
||||||
|
p.newest = &n
|
||||||
|
}
|
||||||
|
// already known as `newest`
|
||||||
|
if *p.newest == n {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// old `newest` gets displaced into `other`
|
||||||
|
if p.other == nil {
|
||||||
|
p.other = make(map[parentData]struct{})
|
||||||
|
}
|
||||||
|
p.other[*p.newest] = struct{}{}
|
||||||
|
// new parent becomes `newest` (possibly moving up from `other`)
|
||||||
|
delete(p.other, n)
|
||||||
|
p.newest = &n
|
||||||
|
}
|
||||||
|
|
||||||
|
// get returns the most recent parent
|
||||||
|
// or nil if there is no parent at all.
|
||||||
|
func (p *inodeParents) get() *parentData {
|
||||||
|
return p.newest
|
||||||
|
}
|
||||||
|
|
||||||
|
// all returns all known parents
|
||||||
|
// or nil if there is no parent at all.
|
||||||
|
func (p *inodeParents) all() []parentData {
|
||||||
|
count := p.count()
|
||||||
|
if count == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := make([]parentData, 0, count)
|
||||||
|
out = append(out, *p.newest)
|
||||||
|
for i := range p.other {
|
||||||
|
out = append(out, i)
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *inodeParents) delete(n parentData) {
|
||||||
|
// We have zero parents, so we can't delete any.
|
||||||
|
if p.newest == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// If it's not the `newest` it must be in `other` (or nowhere).
|
||||||
|
if *p.newest != n {
|
||||||
|
delete(p.other, n)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// We want to delete `newest`, but there is no other to replace it.
|
||||||
|
if len(p.other) == 0 {
|
||||||
|
p.newest = nil
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Move random entry from `other` over `newest`.
|
||||||
|
var i parentData
|
||||||
|
for i = range p.other {
|
||||||
|
p.newest = &i
|
||||||
|
break
|
||||||
|
}
|
||||||
|
delete(p.other, i)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *inodeParents) clear() {
|
||||||
|
p.newest = nil
|
||||||
|
p.other = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *inodeParents) count() int {
|
||||||
|
if p.newest == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return 1 + len(p.other)
|
||||||
|
}
|
||||||
|
|
||||||
|
type parentData struct {
|
||||||
|
name string
|
||||||
|
parent *Inode
|
||||||
|
}
|
||||||
549
vendor/github.com/hanwen/go-fuse/v2/fs/loopback.go
generated
vendored
Normal file
549
vendor/github.com/hanwen/go-fuse/v2/fs/loopback.go
generated
vendored
Normal file
@@ -0,0 +1,549 @@
|
|||||||
|
// Copyright 2019 the Go-FUSE Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package fs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/hanwen/go-fuse/v2/fuse"
|
||||||
|
"github.com/hanwen/go-fuse/v2/internal/openat"
|
||||||
|
"github.com/hanwen/go-fuse/v2/internal/renameat"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// LoopbackRoot holds the parameters for creating a new loopback
|
||||||
|
// filesystem. Loopback filesystem delegate their operations to an
|
||||||
|
// underlying POSIX file system.
|
||||||
|
type LoopbackRoot struct {
|
||||||
|
// The path to the root of the underlying file system.
|
||||||
|
Path string
|
||||||
|
|
||||||
|
// The device on which the Path resides. This must be set if
|
||||||
|
// the underlying filesystem crosses file systems.
|
||||||
|
Dev uint64
|
||||||
|
|
||||||
|
// NewNode returns a new InodeEmbedder to be used to respond
|
||||||
|
// to a LOOKUP/CREATE/MKDIR/MKNOD opcode. If not set, use a
|
||||||
|
// LoopbackNode.
|
||||||
|
//
|
||||||
|
// Deprecated: use NodeWrapChilder instead.
|
||||||
|
NewNode func(rootData *LoopbackRoot, parent *Inode, name string, st *syscall.Stat_t) InodeEmbedder
|
||||||
|
|
||||||
|
// RootNode is the root of the Loopback. This must be set if
|
||||||
|
// the Loopback file system is not the root of the FUSE
|
||||||
|
// mount. It is set automatically by NewLoopbackRoot.
|
||||||
|
RootNode InodeEmbedder
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *LoopbackRoot) newNode(parent *Inode, name string, st *syscall.Stat_t) InodeEmbedder {
|
||||||
|
if r.NewNode != nil {
|
||||||
|
return r.NewNode(r, parent, name, st)
|
||||||
|
}
|
||||||
|
return &LoopbackNode{
|
||||||
|
RootData: r,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *LoopbackRoot) idFromStat(st *syscall.Stat_t) StableAttr {
|
||||||
|
// We compose an inode number by the underlying inode, and
|
||||||
|
// mixing in the device number. In traditional filesystems,
|
||||||
|
// the inode numbers are small. The device numbers are also
|
||||||
|
// small (typically 16 bit). Finally, we mask out the root
|
||||||
|
// device number of the root, so a loopback FS that does not
|
||||||
|
// encompass multiple mounts will reflect the inode numbers of
|
||||||
|
// the underlying filesystem
|
||||||
|
swapped := (uint64(st.Dev) << 32) | (uint64(st.Dev) >> 32)
|
||||||
|
swappedRootDev := (r.Dev << 32) | (r.Dev >> 32)
|
||||||
|
return StableAttr{
|
||||||
|
Mode: uint32(st.Mode),
|
||||||
|
Gen: 1,
|
||||||
|
// This should work well for traditional backing FSes,
|
||||||
|
// not so much for other go-fuse FS-es
|
||||||
|
Ino: (swapped ^ swappedRootDev) ^ st.Ino,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoopbackNode is a filesystem node in a loopback file system. It is
|
||||||
|
// public so it can be used as a basis for other loopback based
|
||||||
|
// filesystems. See NewLoopbackFile or LoopbackRoot for more
|
||||||
|
// information.
|
||||||
|
type LoopbackNode struct {
|
||||||
|
Inode
|
||||||
|
|
||||||
|
// RootData points back to the root of the loopback filesystem.
|
||||||
|
RootData *LoopbackRoot
|
||||||
|
}
|
||||||
|
|
||||||
|
// loopbackNodeEmbedder can only be implemented by the LoopbackNode
|
||||||
|
// concrete type.
|
||||||
|
type loopbackNodeEmbedder interface {
|
||||||
|
loopbackNode() *LoopbackNode
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *LoopbackNode) loopbackNode() *LoopbackNode {
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ = (NodeStatfser)((*LoopbackNode)(nil))
|
||||||
|
|
||||||
|
func (n *LoopbackNode) Statfs(ctx context.Context, out *fuse.StatfsOut) syscall.Errno {
|
||||||
|
s := syscall.Statfs_t{}
|
||||||
|
err := syscall.Statfs(n.path(), &s)
|
||||||
|
if err != nil {
|
||||||
|
return ToErrno(err)
|
||||||
|
}
|
||||||
|
out.FromStatfsT(&s)
|
||||||
|
return OK
|
||||||
|
}
|
||||||
|
|
||||||
|
// path returns the full path to the file in the underlying file
|
||||||
|
// system.
|
||||||
|
func (n *LoopbackNode) root() *Inode {
|
||||||
|
var rootNode *Inode
|
||||||
|
if n.RootData.RootNode != nil {
|
||||||
|
rootNode = n.RootData.RootNode.EmbeddedInode()
|
||||||
|
} else {
|
||||||
|
rootNode = n.Root()
|
||||||
|
}
|
||||||
|
|
||||||
|
return rootNode
|
||||||
|
}
|
||||||
|
|
||||||
|
// relativePath returns the path the node, relative to to the root directory
|
||||||
|
func (n *LoopbackNode) relativePath() string {
|
||||||
|
return n.Path(n.root())
|
||||||
|
}
|
||||||
|
|
||||||
|
// path returns the absolute path to the node
|
||||||
|
func (n *LoopbackNode) path() string {
|
||||||
|
return filepath.Join(n.RootData.Path, n.relativePath())
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ = (NodeLookuper)((*LoopbackNode)(nil))
|
||||||
|
|
||||||
|
func (n *LoopbackNode) Lookup(ctx context.Context, name string, out *fuse.EntryOut) (*Inode, syscall.Errno) {
|
||||||
|
p := filepath.Join(n.path(), name)
|
||||||
|
|
||||||
|
st := syscall.Stat_t{}
|
||||||
|
err := syscall.Lstat(p, &st)
|
||||||
|
if err != nil {
|
||||||
|
return nil, ToErrno(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
out.Attr.FromStat(&st)
|
||||||
|
node := n.RootData.newNode(n.EmbeddedInode(), name, &st)
|
||||||
|
ch := n.NewInode(ctx, node, n.RootData.idFromStat(&st))
|
||||||
|
return ch, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// preserveOwner sets uid and gid of `path` according to the caller information
|
||||||
|
// in `ctx`.
|
||||||
|
func (n *LoopbackNode) preserveOwner(ctx context.Context, path string) error {
|
||||||
|
if os.Getuid() != 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
caller, ok := fuse.FromContext(ctx)
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return syscall.Lchown(path, int(caller.Uid), int(caller.Gid))
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ = (NodeMknoder)((*LoopbackNode)(nil))
|
||||||
|
|
||||||
|
func (n *LoopbackNode) Mknod(ctx context.Context, name string, mode, rdev uint32, out *fuse.EntryOut) (*Inode, syscall.Errno) {
|
||||||
|
p := filepath.Join(n.path(), name)
|
||||||
|
err := syscall.Mknod(p, mode, intDev(rdev))
|
||||||
|
if err != nil {
|
||||||
|
return nil, ToErrno(err)
|
||||||
|
}
|
||||||
|
n.preserveOwner(ctx, p)
|
||||||
|
st := syscall.Stat_t{}
|
||||||
|
if err := syscall.Lstat(p, &st); err != nil {
|
||||||
|
syscall.Rmdir(p)
|
||||||
|
return nil, ToErrno(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
out.Attr.FromStat(&st)
|
||||||
|
|
||||||
|
node := n.RootData.newNode(n.EmbeddedInode(), name, &st)
|
||||||
|
ch := n.NewInode(ctx, node, n.RootData.idFromStat(&st))
|
||||||
|
|
||||||
|
return ch, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ = (NodeMkdirer)((*LoopbackNode)(nil))
|
||||||
|
|
||||||
|
func (n *LoopbackNode) Mkdir(ctx context.Context, name string, mode uint32, out *fuse.EntryOut) (*Inode, syscall.Errno) {
|
||||||
|
p := filepath.Join(n.path(), name)
|
||||||
|
err := os.Mkdir(p, os.FileMode(mode))
|
||||||
|
if err != nil {
|
||||||
|
return nil, ToErrno(err)
|
||||||
|
}
|
||||||
|
n.preserveOwner(ctx, p)
|
||||||
|
st := syscall.Stat_t{}
|
||||||
|
if err := syscall.Lstat(p, &st); err != nil {
|
||||||
|
syscall.Rmdir(p)
|
||||||
|
return nil, ToErrno(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
out.Attr.FromStat(&st)
|
||||||
|
|
||||||
|
node := n.RootData.newNode(n.EmbeddedInode(), name, &st)
|
||||||
|
ch := n.NewInode(ctx, node, n.RootData.idFromStat(&st))
|
||||||
|
|
||||||
|
return ch, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ = (NodeRmdirer)((*LoopbackNode)(nil))
|
||||||
|
|
||||||
|
func (n *LoopbackNode) Rmdir(ctx context.Context, name string) syscall.Errno {
|
||||||
|
p := filepath.Join(n.path(), name)
|
||||||
|
err := syscall.Rmdir(p)
|
||||||
|
return ToErrno(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ = (NodeUnlinker)((*LoopbackNode)(nil))
|
||||||
|
|
||||||
|
func (n *LoopbackNode) Unlink(ctx context.Context, name string) syscall.Errno {
|
||||||
|
p := filepath.Join(n.path(), name)
|
||||||
|
err := syscall.Unlink(p)
|
||||||
|
return ToErrno(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ = (NodeRenamer)((*LoopbackNode)(nil))
|
||||||
|
|
||||||
|
func (n *LoopbackNode) Rename(ctx context.Context, name string, newParent InodeEmbedder, newName string, flags uint32) syscall.Errno {
|
||||||
|
e2, ok := newParent.(loopbackNodeEmbedder)
|
||||||
|
if !ok {
|
||||||
|
return syscall.EXDEV
|
||||||
|
}
|
||||||
|
|
||||||
|
if e2.loopbackNode().RootData != n.RootData {
|
||||||
|
return syscall.EXDEV
|
||||||
|
}
|
||||||
|
|
||||||
|
if flags != 0 {
|
||||||
|
return n.rename2(name, e2.loopbackNode(), newName, flags)
|
||||||
|
}
|
||||||
|
|
||||||
|
p1 := filepath.Join(n.path(), name)
|
||||||
|
p2 := filepath.Join(e2.loopbackNode().path(), newName)
|
||||||
|
|
||||||
|
err := syscall.Rename(p1, p2)
|
||||||
|
return ToErrno(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ = (NodeCreater)((*LoopbackNode)(nil))
|
||||||
|
|
||||||
|
func (n *LoopbackNode) Create(ctx context.Context, name string, flags uint32, mode uint32, out *fuse.EntryOut) (inode *Inode, fh FileHandle, fuseFlags uint32, errno syscall.Errno) {
|
||||||
|
p := filepath.Join(n.path(), name)
|
||||||
|
flags = flags &^ syscall.O_APPEND
|
||||||
|
fd, err := syscall.Open(p, int(flags)|os.O_CREATE, mode)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, 0, ToErrno(err)
|
||||||
|
}
|
||||||
|
n.preserveOwner(ctx, p)
|
||||||
|
st := syscall.Stat_t{}
|
||||||
|
if err := syscall.Fstat(fd, &st); err != nil {
|
||||||
|
syscall.Close(fd)
|
||||||
|
return nil, nil, 0, ToErrno(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
node := n.RootData.newNode(n.EmbeddedInode(), name, &st)
|
||||||
|
ch := n.NewInode(ctx, node, n.RootData.idFromStat(&st))
|
||||||
|
lf := NewLoopbackFile(fd)
|
||||||
|
|
||||||
|
out.FromStat(&st)
|
||||||
|
return ch, lf, 0, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *LoopbackNode) rename2(name string, newParent *LoopbackNode, newName string, flags uint32) syscall.Errno {
|
||||||
|
fd1, err := syscall.Open(n.path(), syscall.O_DIRECTORY, 0)
|
||||||
|
if err != nil {
|
||||||
|
return ToErrno(err)
|
||||||
|
}
|
||||||
|
defer syscall.Close(fd1)
|
||||||
|
p2 := newParent.path()
|
||||||
|
fd2, err := syscall.Open(p2, syscall.O_DIRECTORY, 0)
|
||||||
|
if err != nil {
|
||||||
|
return ToErrno(err)
|
||||||
|
}
|
||||||
|
defer syscall.Close(fd2)
|
||||||
|
|
||||||
|
var st syscall.Stat_t
|
||||||
|
if err := syscall.Fstat(fd1, &st); err != nil {
|
||||||
|
return ToErrno(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Double check that nodes didn't change from under us.
|
||||||
|
if n.root() != n.EmbeddedInode() && n.Inode.StableAttr().Ino != n.RootData.idFromStat(&st).Ino {
|
||||||
|
return syscall.EBUSY
|
||||||
|
}
|
||||||
|
if err := syscall.Fstat(fd2, &st); err != nil {
|
||||||
|
return ToErrno(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if (newParent.root() != newParent.EmbeddedInode()) && newParent.Inode.StableAttr().Ino != n.RootData.idFromStat(&st).Ino {
|
||||||
|
return syscall.EBUSY
|
||||||
|
}
|
||||||
|
|
||||||
|
return ToErrno(renameat.Renameat(fd1, name, fd2, newName, uint(flags)))
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ = (NodeSymlinker)((*LoopbackNode)(nil))
|
||||||
|
|
||||||
|
func (n *LoopbackNode) Symlink(ctx context.Context, target, name string, out *fuse.EntryOut) (*Inode, syscall.Errno) {
|
||||||
|
p := filepath.Join(n.path(), name)
|
||||||
|
err := syscall.Symlink(target, p)
|
||||||
|
if err != nil {
|
||||||
|
return nil, ToErrno(err)
|
||||||
|
}
|
||||||
|
n.preserveOwner(ctx, p)
|
||||||
|
st := syscall.Stat_t{}
|
||||||
|
if err := syscall.Lstat(p, &st); err != nil {
|
||||||
|
syscall.Unlink(p)
|
||||||
|
return nil, ToErrno(err)
|
||||||
|
}
|
||||||
|
node := n.RootData.newNode(n.EmbeddedInode(), name, &st)
|
||||||
|
ch := n.NewInode(ctx, node, n.RootData.idFromStat(&st))
|
||||||
|
|
||||||
|
out.Attr.FromStat(&st)
|
||||||
|
return ch, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ = (NodeLinker)((*LoopbackNode)(nil))
|
||||||
|
|
||||||
|
func (n *LoopbackNode) Link(ctx context.Context, target InodeEmbedder, name string, out *fuse.EntryOut) (*Inode, syscall.Errno) {
|
||||||
|
|
||||||
|
p := filepath.Join(n.path(), name)
|
||||||
|
err := syscall.Link(filepath.Join(n.RootData.Path, target.EmbeddedInode().Path(nil)), p)
|
||||||
|
if err != nil {
|
||||||
|
return nil, ToErrno(err)
|
||||||
|
}
|
||||||
|
st := syscall.Stat_t{}
|
||||||
|
if err := syscall.Lstat(p, &st); err != nil {
|
||||||
|
syscall.Unlink(p)
|
||||||
|
return nil, ToErrno(err)
|
||||||
|
}
|
||||||
|
node := n.RootData.newNode(n.EmbeddedInode(), name, &st)
|
||||||
|
ch := n.NewInode(ctx, node, n.RootData.idFromStat(&st))
|
||||||
|
|
||||||
|
out.Attr.FromStat(&st)
|
||||||
|
return ch, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ = (NodeReadlinker)((*LoopbackNode)(nil))
|
||||||
|
|
||||||
|
func (n *LoopbackNode) Readlink(ctx context.Context) ([]byte, syscall.Errno) {
|
||||||
|
p := n.path()
|
||||||
|
|
||||||
|
for l := 256; ; l *= 2 {
|
||||||
|
buf := make([]byte, l)
|
||||||
|
sz, err := syscall.Readlink(p, buf)
|
||||||
|
if err != nil {
|
||||||
|
return nil, ToErrno(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if sz < len(buf) {
|
||||||
|
return buf[:sz], 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ = (NodeOpener)((*LoopbackNode)(nil))
|
||||||
|
|
||||||
|
// Symlink-safe through use of OpenSymlinkAware.
|
||||||
|
func (n *LoopbackNode) Open(ctx context.Context, flags uint32) (fh FileHandle, fuseFlags uint32, errno syscall.Errno) {
|
||||||
|
flags = flags &^ (syscall.O_APPEND | fuse.FMODE_EXEC)
|
||||||
|
|
||||||
|
f, err := openat.OpenSymlinkAware(n.RootData.Path, n.relativePath(), int(flags), 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, ToErrno(err)
|
||||||
|
}
|
||||||
|
lf := NewLoopbackFile(f)
|
||||||
|
return lf, 0, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ = (NodeOpendirHandler)((*LoopbackNode)(nil))
|
||||||
|
|
||||||
|
func (n *LoopbackNode) OpendirHandle(ctx context.Context, flags uint32) (FileHandle, uint32, syscall.Errno) {
|
||||||
|
ds, errno := NewLoopbackDirStream(n.path())
|
||||||
|
if errno != 0 {
|
||||||
|
return nil, 0, errno
|
||||||
|
}
|
||||||
|
return ds, 0, errno
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ = (NodeReaddirer)((*LoopbackNode)(nil))
|
||||||
|
|
||||||
|
func (n *LoopbackNode) Readdir(ctx context.Context) (DirStream, syscall.Errno) {
|
||||||
|
return NewLoopbackDirStream(n.path())
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ = (NodeGetattrer)((*LoopbackNode)(nil))
|
||||||
|
|
||||||
|
func (n *LoopbackNode) Getattr(ctx context.Context, f FileHandle, out *fuse.AttrOut) syscall.Errno {
|
||||||
|
if f != nil {
|
||||||
|
if fga, ok := f.(FileGetattrer); ok {
|
||||||
|
return fga.Getattr(ctx, out)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
p := n.path()
|
||||||
|
|
||||||
|
var err error
|
||||||
|
st := syscall.Stat_t{}
|
||||||
|
if &n.Inode == n.Root() {
|
||||||
|
err = syscall.Stat(p, &st)
|
||||||
|
} else {
|
||||||
|
err = syscall.Lstat(p, &st)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return ToErrno(err)
|
||||||
|
}
|
||||||
|
out.FromStat(&st)
|
||||||
|
return OK
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ = (NodeSetattrer)((*LoopbackNode)(nil))
|
||||||
|
|
||||||
|
func (n *LoopbackNode) Setattr(ctx context.Context, f FileHandle, in *fuse.SetAttrIn, out *fuse.AttrOut) syscall.Errno {
|
||||||
|
p := n.path()
|
||||||
|
fsa, ok := f.(FileSetattrer)
|
||||||
|
if ok && fsa != nil {
|
||||||
|
fsa.Setattr(ctx, in, out)
|
||||||
|
} else {
|
||||||
|
if m, ok := in.GetMode(); ok {
|
||||||
|
if err := syscall.Chmod(p, m); err != nil {
|
||||||
|
return ToErrno(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
uid, uok := in.GetUID()
|
||||||
|
gid, gok := in.GetGID()
|
||||||
|
if uok || gok {
|
||||||
|
suid := -1
|
||||||
|
sgid := -1
|
||||||
|
if uok {
|
||||||
|
suid = int(uid)
|
||||||
|
}
|
||||||
|
if gok {
|
||||||
|
sgid = int(gid)
|
||||||
|
}
|
||||||
|
if err := syscall.Chown(p, suid, sgid); err != nil {
|
||||||
|
return ToErrno(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
mtime, mok := in.GetMTime()
|
||||||
|
atime, aok := in.GetATime()
|
||||||
|
|
||||||
|
if mok || aok {
|
||||||
|
ta := unix.Timespec{Nsec: unix_UTIME_OMIT}
|
||||||
|
tm := unix.Timespec{Nsec: unix_UTIME_OMIT}
|
||||||
|
var err error
|
||||||
|
if aok {
|
||||||
|
ta, err = unix.TimeToTimespec(atime)
|
||||||
|
if err != nil {
|
||||||
|
return ToErrno(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if mok {
|
||||||
|
tm, err = unix.TimeToTimespec(mtime)
|
||||||
|
if err != nil {
|
||||||
|
return ToErrno(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ts := []unix.Timespec{ta, tm}
|
||||||
|
if err := unix.UtimesNanoAt(unix.AT_FDCWD, p, ts, unix.AT_SYMLINK_NOFOLLOW); err != nil {
|
||||||
|
return ToErrno(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if sz, ok := in.GetSize(); ok {
|
||||||
|
if err := syscall.Truncate(p, int64(sz)); err != nil {
|
||||||
|
return ToErrno(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fga, ok := f.(FileGetattrer)
|
||||||
|
if ok && fga != nil {
|
||||||
|
fga.Getattr(ctx, out)
|
||||||
|
} else {
|
||||||
|
st := syscall.Stat_t{}
|
||||||
|
err := syscall.Lstat(p, &st)
|
||||||
|
if err != nil {
|
||||||
|
return ToErrno(err)
|
||||||
|
}
|
||||||
|
out.FromStat(&st)
|
||||||
|
}
|
||||||
|
return OK
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ = (NodeGetxattrer)((*LoopbackNode)(nil))
|
||||||
|
|
||||||
|
func (n *LoopbackNode) Getxattr(ctx context.Context, attr string, dest []byte) (uint32, syscall.Errno) {
|
||||||
|
sz, err := unix.Lgetxattr(n.path(), attr, dest)
|
||||||
|
return uint32(sz), ToErrno(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ = (NodeSetxattrer)((*LoopbackNode)(nil))
|
||||||
|
|
||||||
|
func (n *LoopbackNode) Setxattr(ctx context.Context, attr string, data []byte, flags uint32) syscall.Errno {
|
||||||
|
err := unix.Lsetxattr(n.path(), attr, data, int(flags))
|
||||||
|
return ToErrno(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ = (NodeRemovexattrer)((*LoopbackNode)(nil))
|
||||||
|
|
||||||
|
func (n *LoopbackNode) Removexattr(ctx context.Context, attr string) syscall.Errno {
|
||||||
|
err := unix.Lremovexattr(n.path(), attr)
|
||||||
|
return ToErrno(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ = (NodeCopyFileRanger)((*LoopbackNode)(nil))
|
||||||
|
|
||||||
|
func (n *LoopbackNode) CopyFileRange(ctx context.Context, fhIn FileHandle,
|
||||||
|
offIn uint64, out *Inode, fhOut FileHandle, offOut uint64,
|
||||||
|
len uint64, flags uint64) (uint32, syscall.Errno) {
|
||||||
|
lfIn, ok := fhIn.(*loopbackFile)
|
||||||
|
if !ok {
|
||||||
|
return 0, unix.ENOTSUP
|
||||||
|
}
|
||||||
|
lfOut, ok := fhOut.(*loopbackFile)
|
||||||
|
if !ok {
|
||||||
|
return 0, unix.ENOTSUP
|
||||||
|
}
|
||||||
|
signedOffIn := int64(offIn)
|
||||||
|
signedOffOut := int64(offOut)
|
||||||
|
doCopyFileRange(lfIn.fd, signedOffIn, lfOut.fd, signedOffOut, int(len), int(flags))
|
||||||
|
return 0, syscall.ENOSYS
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewLoopbackRoot returns a root node for a loopback file system whose
|
||||||
|
// root is at the given root. This node implements all NodeXxxxer
|
||||||
|
// operations available.
|
||||||
|
func NewLoopbackRoot(rootPath string) (InodeEmbedder, error) {
|
||||||
|
var st syscall.Stat_t
|
||||||
|
err := syscall.Stat(rootPath, &st)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
root := &LoopbackRoot{
|
||||||
|
Path: rootPath,
|
||||||
|
Dev: uint64(st.Dev),
|
||||||
|
}
|
||||||
|
|
||||||
|
rootNode := root.newNode(nil, "", &st)
|
||||||
|
root.RootNode = rootNode
|
||||||
|
return rootNode, nil
|
||||||
|
}
|
||||||
36
vendor/github.com/hanwen/go-fuse/v2/fs/loopback_darwin.go
generated
vendored
Normal file
36
vendor/github.com/hanwen/go-fuse/v2/fs/loopback_darwin.go
generated
vendored
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
//go:build darwin
|
||||||
|
// +build darwin
|
||||||
|
|
||||||
|
// Copyright 2019 the Go-FUSE Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package fs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const unix_UTIME_OMIT = 0x0
|
||||||
|
|
||||||
|
// timeToTimeval - Convert time.Time to syscall.Timeval
|
||||||
|
//
|
||||||
|
// Note: This does not use syscall.NsecToTimespec because
|
||||||
|
// that does not work properly for times before 1970,
|
||||||
|
// see https://github.com/golang/go/issues/12777
|
||||||
|
func timeToTimeval(t *time.Time) syscall.Timeval {
|
||||||
|
var tv syscall.Timeval
|
||||||
|
tv.Usec = int32(t.Nanosecond() / 1000)
|
||||||
|
tv.Sec = t.Unix()
|
||||||
|
return tv
|
||||||
|
}
|
||||||
|
|
||||||
|
func doCopyFileRange(fdIn int, offIn int64, fdOut int, offOut int64,
|
||||||
|
len int, flags int) (uint32, syscall.Errno) {
|
||||||
|
return 0, syscall.ENOSYS
|
||||||
|
}
|
||||||
|
|
||||||
|
func intDev(dev uint32) int {
|
||||||
|
return int(dev)
|
||||||
|
}
|
||||||
80
vendor/github.com/hanwen/go-fuse/v2/fs/loopback_freebsd.go
generated
vendored
Normal file
80
vendor/github.com/hanwen/go-fuse/v2/fs/loopback_freebsd.go
generated
vendored
Normal file
@@ -0,0 +1,80 @@
|
|||||||
|
// Copyright 2024 the Go-FUSE Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package fs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/hanwen/go-fuse/v2/internal/xattr"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
const unix_UTIME_OMIT = unix.UTIME_OMIT
|
||||||
|
|
||||||
|
// FreeBSD has added copy_file_range(2) since FreeBSD 12. However,
|
||||||
|
// golang.org/x/sys/unix hasn't add corresponding syscall constant or
|
||||||
|
// wrap function. Here we define the syscall constant until sys/unix
|
||||||
|
// provides.
|
||||||
|
const sys_COPY_FILE_RANGE = 569
|
||||||
|
|
||||||
|
// TODO: replace the manual syscall when sys/unix provides CopyFileRange
|
||||||
|
// for FreeBSD
|
||||||
|
func doCopyFileRange(fdIn int, offIn int64, fdOut int, offOut int64,
|
||||||
|
len int, flags int) (uint32, syscall.Errno) {
|
||||||
|
count, _, errno := unix.Syscall6(sys_COPY_FILE_RANGE,
|
||||||
|
uintptr(fdIn), uintptr(offIn), uintptr(fdOut), uintptr(offOut),
|
||||||
|
uintptr(len), uintptr(flags),
|
||||||
|
)
|
||||||
|
return uint32(count), errno
|
||||||
|
}
|
||||||
|
|
||||||
|
func intDev(dev uint32) uint64 {
|
||||||
|
return uint64(dev)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Since FUSE on FreeBSD expect Linux flavor data format of
|
||||||
|
// listxattr, we should reconstruct it with data returned by
|
||||||
|
// FreeBSD's syscall. And here we have added a "user." prefix
|
||||||
|
// to put them under "user" namespace, which is readable and
|
||||||
|
// writable for normal user, for a userspace implemented FS.
|
||||||
|
func rebuildAttrBuf(attrList [][]byte) []byte {
|
||||||
|
ret := make([]byte, 0)
|
||||||
|
for _, attrName := range attrList {
|
||||||
|
nsAttrName := append([]byte("user."), attrName...)
|
||||||
|
ret = append(ret, nsAttrName...)
|
||||||
|
ret = append(ret, 0x0)
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ = (NodeListxattrer)((*LoopbackNode)(nil))
|
||||||
|
|
||||||
|
func (n *LoopbackNode) Listxattr(ctx context.Context, dest []byte) (uint32, syscall.Errno) {
|
||||||
|
// In order to simulate same data format as Linux does,
|
||||||
|
// and the size of returned buf is required to match, we must
|
||||||
|
// call unix.Llistxattr twice.
|
||||||
|
sz, err := unix.Llistxattr(n.path(), nil)
|
||||||
|
if err != nil {
|
||||||
|
return uint32(sz), ToErrno(err)
|
||||||
|
}
|
||||||
|
rawBuf := make([]byte, sz)
|
||||||
|
sz, err = unix.Llistxattr(n.path(), rawBuf)
|
||||||
|
if err != nil {
|
||||||
|
return uint32(sz), ToErrno(err)
|
||||||
|
}
|
||||||
|
attrList := xattr.ParseAttrNames(rawBuf)
|
||||||
|
rebuiltBuf := rebuildAttrBuf(attrList)
|
||||||
|
sz = len(rebuiltBuf)
|
||||||
|
if len(dest) != 0 {
|
||||||
|
// When len(dest) is 0, which means that caller wants to get
|
||||||
|
// the size. If len(dest) is less than len(rebuiltBuf), but greater
|
||||||
|
// than 0 dest will be also filled with data from rebuiltBuf,
|
||||||
|
// but truncated to len(dest). copy() function will do the same.
|
||||||
|
// And this behaviour is same as FreeBSD's syscall extattr_list_file(2).
|
||||||
|
sz = copy(dest, rebuiltBuf)
|
||||||
|
}
|
||||||
|
return uint32(sz), ToErrno(err)
|
||||||
|
}
|
||||||
50
vendor/github.com/hanwen/go-fuse/v2/fs/loopback_linux.go
generated
vendored
Normal file
50
vendor/github.com/hanwen/go-fuse/v2/fs/loopback_linux.go
generated
vendored
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
//go:build linux
|
||||||
|
// +build linux
|
||||||
|
|
||||||
|
// Copyright 2019 the Go-FUSE Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package fs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/hanwen/go-fuse/v2/fuse"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
const unix_UTIME_OMIT = unix.UTIME_OMIT
|
||||||
|
|
||||||
|
func doCopyFileRange(fdIn int, offIn int64, fdOut int, offOut int64,
|
||||||
|
len int, flags int) (uint32, syscall.Errno) {
|
||||||
|
count, err := unix.CopyFileRange(fdIn, &offIn, fdOut, &offOut, len, flags)
|
||||||
|
return uint32(count), ToErrno(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func intDev(dev uint32) int {
|
||||||
|
return int(dev)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ = (NodeStatxer)((*LoopbackNode)(nil))
|
||||||
|
|
||||||
|
func (n *LoopbackNode) Statx(ctx context.Context, f FileHandle,
|
||||||
|
flags uint32, mask uint32,
|
||||||
|
out *fuse.StatxOut) syscall.Errno {
|
||||||
|
if f != nil {
|
||||||
|
if fga, ok := f.(FileStatxer); ok {
|
||||||
|
return fga.Statx(ctx, flags, mask, out)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
p := n.path()
|
||||||
|
|
||||||
|
st := unix.Statx_t{}
|
||||||
|
err := unix.Statx(unix.AT_FDCWD, p, int(flags), int(mask), &st)
|
||||||
|
if err != nil {
|
||||||
|
return ToErrno(err)
|
||||||
|
}
|
||||||
|
out.FromStatx(&st)
|
||||||
|
return OK
|
||||||
|
}
|
||||||
20
vendor/github.com/hanwen/go-fuse/v2/fs/loopback_unix.go
generated
vendored
Normal file
20
vendor/github.com/hanwen/go-fuse/v2/fs/loopback_unix.go
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
//go:build !freebsd
|
||||||
|
|
||||||
|
// Copyright 2024 the Go-FUSE Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
package fs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ = (NodeListxattrer)((*LoopbackNode)(nil))
|
||||||
|
|
||||||
|
func (n *LoopbackNode) Listxattr(ctx context.Context, dest []byte) (uint32, syscall.Errno) {
|
||||||
|
sz, err := unix.Llistxattr(n.path(), dest)
|
||||||
|
return uint32(sz), ToErrno(err)
|
||||||
|
}
|
||||||
121
vendor/github.com/hanwen/go-fuse/v2/fs/mem.go
generated
vendored
Normal file
121
vendor/github.com/hanwen/go-fuse/v2/fs/mem.go
generated
vendored
Normal file
@@ -0,0 +1,121 @@
|
|||||||
|
// Copyright 2019 the Go-FUSE Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package fs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/hanwen/go-fuse/v2/fuse"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MemRegularFile is a filesystem node that holds a data
|
||||||
|
// slice in memory.
|
||||||
|
type MemRegularFile struct {
|
||||||
|
Inode
|
||||||
|
|
||||||
|
mu sync.Mutex
|
||||||
|
Data []byte
|
||||||
|
Attr fuse.Attr
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ = (NodeOpener)((*MemRegularFile)(nil))
|
||||||
|
var _ = (NodeReader)((*MemRegularFile)(nil))
|
||||||
|
var _ = (NodeWriter)((*MemRegularFile)(nil))
|
||||||
|
var _ = (NodeSetattrer)((*MemRegularFile)(nil))
|
||||||
|
var _ = (NodeFlusher)((*MemRegularFile)(nil))
|
||||||
|
var _ = (NodeAllocater)((*MemRegularFile)(nil))
|
||||||
|
|
||||||
|
func (f *MemRegularFile) Allocate(ctx context.Context, fh FileHandle, off uint64, size uint64, mode uint32) syscall.Errno {
|
||||||
|
f.mu.Lock()
|
||||||
|
defer f.mu.Unlock()
|
||||||
|
oldSz := len(f.Data)
|
||||||
|
if uint64(cap(f.Data)) < off+size {
|
||||||
|
n := make([]byte, off+size)
|
||||||
|
copy(n, f.Data)
|
||||||
|
f.Data = n
|
||||||
|
}
|
||||||
|
if keepSizeMode(mode) {
|
||||||
|
f.Data = f.Data[:oldSz]
|
||||||
|
} else if len(f.Data) < int(off+size) {
|
||||||
|
f.Data = f.Data[:off+size]
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *MemRegularFile) Open(ctx context.Context, flags uint32) (fh FileHandle, fuseFlags uint32, errno syscall.Errno) {
|
||||||
|
return nil, fuse.FOPEN_KEEP_CACHE, OK
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *MemRegularFile) Write(ctx context.Context, fh FileHandle, data []byte, off int64) (uint32, syscall.Errno) {
|
||||||
|
f.mu.Lock()
|
||||||
|
defer f.mu.Unlock()
|
||||||
|
end := int64(len(data)) + off
|
||||||
|
if int64(len(f.Data)) < end {
|
||||||
|
n := make([]byte, end)
|
||||||
|
copy(n, f.Data)
|
||||||
|
f.Data = n
|
||||||
|
}
|
||||||
|
|
||||||
|
copy(f.Data[off:off+int64(len(data))], data)
|
||||||
|
|
||||||
|
return uint32(len(data)), 0
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ = (NodeGetattrer)((*MemRegularFile)(nil))
|
||||||
|
|
||||||
|
func (f *MemRegularFile) Getattr(ctx context.Context, fh FileHandle, out *fuse.AttrOut) syscall.Errno {
|
||||||
|
f.mu.Lock()
|
||||||
|
defer f.mu.Unlock()
|
||||||
|
out.Attr = f.Attr
|
||||||
|
out.Attr.Size = uint64(len(f.Data))
|
||||||
|
return OK
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *MemRegularFile) Setattr(ctx context.Context, fh FileHandle, in *fuse.SetAttrIn, out *fuse.AttrOut) syscall.Errno {
|
||||||
|
f.mu.Lock()
|
||||||
|
defer f.mu.Unlock()
|
||||||
|
if sz, ok := in.GetSize(); ok {
|
||||||
|
f.Data = f.Data[:sz]
|
||||||
|
}
|
||||||
|
out.Attr = f.Attr
|
||||||
|
out.Size = uint64(len(f.Data))
|
||||||
|
return OK
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *MemRegularFile) Flush(ctx context.Context, fh FileHandle) syscall.Errno {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *MemRegularFile) Read(ctx context.Context, fh FileHandle, dest []byte, off int64) (fuse.ReadResult, syscall.Errno) {
|
||||||
|
f.mu.Lock()
|
||||||
|
defer f.mu.Unlock()
|
||||||
|
end := int(off) + len(dest)
|
||||||
|
if end > len(f.Data) {
|
||||||
|
end = len(f.Data)
|
||||||
|
}
|
||||||
|
return fuse.ReadResultData(f.Data[off:end]), OK
|
||||||
|
}
|
||||||
|
|
||||||
|
// MemSymlink is an inode holding a symlink in memory.
|
||||||
|
type MemSymlink struct {
|
||||||
|
Inode
|
||||||
|
Attr fuse.Attr
|
||||||
|
Data []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ = (NodeReadlinker)((*MemSymlink)(nil))
|
||||||
|
|
||||||
|
func (l *MemSymlink) Readlink(ctx context.Context) ([]byte, syscall.Errno) {
|
||||||
|
return l.Data, OK
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ = (NodeGetattrer)((*MemSymlink)(nil))
|
||||||
|
|
||||||
|
func (l *MemSymlink) Getattr(ctx context.Context, fh FileHandle, out *fuse.AttrOut) syscall.Errno {
|
||||||
|
out.Attr = l.Attr
|
||||||
|
return OK
|
||||||
|
}
|
||||||
11
vendor/github.com/hanwen/go-fuse/v2/fs/mem_linux.go
generated
vendored
Normal file
11
vendor/github.com/hanwen/go-fuse/v2/fs/mem_linux.go
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
// Copyright 2025 the Go-FUSE Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package fs
|
||||||
|
|
||||||
|
import "golang.org/x/sys/unix"
|
||||||
|
|
||||||
|
func keepSizeMode(mode uint32) bool {
|
||||||
|
return mode&unix.FALLOC_FL_KEEP_SIZE != 0
|
||||||
|
}
|
||||||
11
vendor/github.com/hanwen/go-fuse/v2/fs/mem_unix.go
generated
vendored
Normal file
11
vendor/github.com/hanwen/go-fuse/v2/fs/mem_unix.go
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
//go:build !linux
|
||||||
|
|
||||||
|
// Copyright 2025 the Go-FUSE Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package fs
|
||||||
|
|
||||||
|
func keepSizeMode(mode uint32) bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
40
vendor/github.com/hanwen/go-fuse/v2/fs/mount.go
generated
vendored
Normal file
40
vendor/github.com/hanwen/go-fuse/v2/fs/mount.go
generated
vendored
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
// Copyright 2019 the Go-FUSE Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package fs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hanwen/go-fuse/v2/fuse"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Mount mounts the given NodeFS on the directory, and starts serving
|
||||||
|
// requests. This is a convenience wrapper around NewNodeFS and
|
||||||
|
// fuse.NewServer. If nil is given as options, default settings are
|
||||||
|
// applied, which are 1 second entry and attribute timeout.
|
||||||
|
func Mount(dir string, root InodeEmbedder, options *Options) (*fuse.Server, error) {
|
||||||
|
if options == nil {
|
||||||
|
oneSec := time.Second
|
||||||
|
options = &Options{
|
||||||
|
EntryTimeout: &oneSec,
|
||||||
|
AttrTimeout: &oneSec,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
rawFS := NewNodeFS(root, options)
|
||||||
|
server, err := fuse.NewServer(rawFS, dir, &options.MountOptions)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
go server.Serve()
|
||||||
|
if err := server.WaitMount(); err != nil {
|
||||||
|
// we don't shutdown the serve loop. If the mount does
|
||||||
|
// not succeed, the loop won't work and exit.
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return server, nil
|
||||||
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user