init
This commit is contained in:
commit
8ad61755e6
675 changed files with 266303 additions and 0 deletions
2
.gitignore
vendored
Normal file
2
.gitignore
vendored
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
.idea/
|
||||||
|
config.yml
|
BIN
Idun.jpg
Normal file
BIN
Idun.jpg
Normal file
Binary file not shown.
After Width: | Height: | Size: 187 KiB |
53
Readme.md
Normal file
53
Readme.md
Normal file
|
@ -0,0 +1,53 @@
|
||||||
|
# Idun
|
||||||
|
![Image of Idum](Idun.jpg)
|
||||||
|
|
||||||
|
A small CLI Tool to clean Backups from different Sources. Based on Rules Idun delete Backup files that are older but keep 1 per hour, per day, per weak or month.
|
||||||
|
|
||||||
|
## Buckets
|
||||||
|
Based on the age of a Backup the Backups will be sorted in one Bucket and based on the Rules of that Bucket will be deleted or keep the Backup.
|
||||||
|
|
||||||
|
### Unlimit Bucket (config per Hour)
|
||||||
|
Keep every Backup
|
||||||
|
|
||||||
|
You can configure that for the first X Hour of a Backup livetime. The number of hours is the **minimum** Hours Idun keep the Backups
|
||||||
|
|
||||||
|
### Hourly Bucket (config per Hour)
|
||||||
|
Will keep one Backup per Hour. One Hour is from Min 0 to Min 59, it will keep the olders Backup. If you make Backups at XX:00, XX:15, XX:30 and XX:45 it will keep the Backup at XX:45.
|
||||||
|
|
||||||
|
### Daily Backup (config per Day)
|
||||||
|
Will keep one Backup per Day. One Day is from 00:00:00 UTC to 23:59:59 UTC, it also will keep the olders Backup, tha backup closed to 23:59:59 will stay, all newer Backup at that day will be removed.
|
||||||
|
|
||||||
|
### Weekly Backup (config per Month)buc
|
||||||
|
|
||||||
|
### Monthly Backups (config per Month)
|
||||||
|
|
||||||
|
### Delete All
|
||||||
|
Delete all Backups in this Bucket, used at the end to remove all really old (based on the rules) backups
|
||||||
|
|
||||||
|
## Config
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Show Plan
|
||||||
|
You can see the Plan to check if the config is what you are expected.
|
||||||
|
|
||||||
|
```
|
||||||
|
idun plan -job abc2
|
||||||
|
```
|
||||||
|
|
||||||
|
The job argument is optional and can be used to show just the plan of on job based on the "name" in the config.
|
||||||
|
|
||||||
|
### Dry-Run
|
||||||
|
Show all fieles
|
||||||
|
```
|
||||||
|
idun dry-run -job abc2
|
||||||
|
```
|
||||||
|
|
||||||
|
### Execute
|
||||||
|
|
||||||
|
|
||||||
|
### Crongo run
|
||||||
|
|
||||||
|
## FAQ
|
||||||
|
### Why minimum Hour/Day/Month
|
||||||
|
If you configure to keep one backup per hour for 12 hours Idun will make sure there will be hourly backups for at least 12 hours. But based on the rules of the next Buckets, which is per Day from 00:00:00 UTC to 23:59:59 UTC there may be a gap between the end hour of the "Hourly Bucket" and the beginn of the "Daily Bucket", in this case Iduna will not delete the Backup files. So ther will be up to 35 Hours Hourly Backups.
|
24
go.mod
Normal file
24
go.mod
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
module idun
|
||||||
|
|
||||||
|
go 1.21
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/pkg/sftp v1.13.6
|
||||||
|
github.com/rs/zerolog v1.31.0
|
||||||
|
github.com/stretchr/testify v1.8.4
|
||||||
|
github.com/urfave/cli/v2 v2.25.7
|
||||||
|
golang.org/x/crypto v0.14.0
|
||||||
|
gopkg.in/yaml.v3 v3.0.1
|
||||||
|
)
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||||
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
|
github.com/kr/fs v0.1.0 // indirect
|
||||||
|
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||||
|
github.com/mattn/go-isatty v0.0.19 // indirect
|
||||||
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
|
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||||
|
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||||
|
golang.org/x/sys v0.13.0 // indirect
|
||||||
|
)
|
76
go.sum
Normal file
76
go.sum
Normal file
|
@ -0,0 +1,76 @@
|
||||||
|
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||||
|
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
|
||||||
|
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||||
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||||
|
github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
|
||||||
|
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
||||||
|
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||||
|
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||||
|
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||||
|
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
|
||||||
|
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||||
|
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo=
|
||||||
|
github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
|
||||||
|
github.com/rs/zerolog v1.31.0 h1:FcTR3NnLWW+NnTwwhFWiJSZr4ECLpqCm6QsEnyvbV4A=
|
||||||
|
github.com/rs/zerolog v1.31.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
|
||||||
|
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||||
|
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||||
|
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
|
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||||
|
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||||
|
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||||
|
github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs=
|
||||||
|
github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
|
||||||
|
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
|
||||||
|
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
|
||||||
|
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||||
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
|
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
|
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
|
||||||
|
golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
|
||||||
|
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
|
||||||
|
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||||
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||||
|
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||||
|
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||||
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
|
||||||
|
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
|
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
|
golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek=
|
||||||
|
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
|
||||||
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
|
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||||
|
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||||
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||||
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||||
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
195
main.go
Normal file
195
main.go
Normal file
|
@ -0,0 +1,195 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"github.com/rs/zerolog/log"
|
||||||
|
"github.com/urfave/cli/v2"
|
||||||
|
"gopkg.in/yaml.v3"
|
||||||
|
buckets2 "idun/pkg/buckets"
|
||||||
|
"idun/pkg/storage"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type config struct {
|
||||||
|
Jobs []struct {
|
||||||
|
Name string `yaml:"name"`
|
||||||
|
Driver string `yaml:"driver"`
|
||||||
|
Config map[string]string `yaml:"config"`
|
||||||
|
Buckets buckets2.Config `yaml:"buckets"`
|
||||||
|
} `yaml:"jobs"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
loc, _ := time.LoadLocation("Europe/Berlin")
|
||||||
|
|
||||||
|
app := &cli.App{
|
||||||
|
Commands: []*cli.Command{
|
||||||
|
{
|
||||||
|
Name: "plan",
|
||||||
|
Usage: "Show all Plans of Buckets",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: "job",
|
||||||
|
Value: "",
|
||||||
|
Usage: "filter for job name",
|
||||||
|
},
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: "config",
|
||||||
|
Value: "config.yml",
|
||||||
|
Usage: "path to config",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Action: func(cCtx *cli.Context) error {
|
||||||
|
configPath := cCtx.String("path")
|
||||||
|
if configPath == "" {
|
||||||
|
configPath = "config.yml"
|
||||||
|
}
|
||||||
|
|
||||||
|
config, err := readConfig(configPath)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal().Err(err).Msg("cant get config")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, job := range config.Jobs {
|
||||||
|
if cCtx.String("job") != "" && cCtx.String("job") != job.Name {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
buckets, err := buckets2.GenerateBuckets(time.Now().AddDate(0, 0, 5), job.Buckets, loc)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal().Err(err).Interface("job", job).Msg("cant create plan for bucket")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
fmt.Println("Plan for " + job.Name)
|
||||||
|
for _, b := range buckets {
|
||||||
|
fmt.Println(b.ToString())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "dry-run",
|
||||||
|
Usage: "Show which file idun would delete",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: "job",
|
||||||
|
Value: "",
|
||||||
|
Usage: "filter for job name",
|
||||||
|
},
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: "config",
|
||||||
|
Value: "config.yml",
|
||||||
|
Usage: "path to config",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Action: func(cCtx *cli.Context) error {
|
||||||
|
configPath := cCtx.String("path")
|
||||||
|
if configPath == "" {
|
||||||
|
configPath = "config.yml"
|
||||||
|
}
|
||||||
|
|
||||||
|
config, err := readConfig(configPath)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal().Err(err).Msg("cant get config")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, job := range config.Jobs {
|
||||||
|
if cCtx.String("job") != "" && cCtx.String("job") != job.Name {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
log.Debug().Str("name", job.Name).Msg("Run Job")
|
||||||
|
bucketList, err := buckets2.GenerateBuckets(time.Now().AddDate(0, 0, 5), job.Buckets, loc)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal().Err(err).Interface("job", job).Msg("cant create plan for bucket")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
jobStorage, err := getFileSystem(job.Driver, job.Config)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal().Str("driver", job.Driver).Err(err).Msg("cant get driver")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
files, err := jobStorage.ListFiles()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal().Err(err).Msg("cant get files")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debug().Int("len", len(files)).Msg("got files from jobStorage")
|
||||||
|
|
||||||
|
err = buckets2.InsertFilesInBuckets(&bucketList, files)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal().Err(err).Msg("cant insert files to buckets")
|
||||||
|
}
|
||||||
|
|
||||||
|
var allFilesToDeleted []storage.File
|
||||||
|
|
||||||
|
for _, b := range bucketList {
|
||||||
|
filesToDelete, err := b.GetFilesToDelete()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal().Err(err).Msg("cant get files to delete")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
allFilesToDeleted = append(allFilesToDeleted, filesToDelete...)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, f := range files {
|
||||||
|
name, _ := f.GetName()
|
||||||
|
toDelete := contains(allFilesToDeleted, f)
|
||||||
|
if toDelete {
|
||||||
|
fmt.Println("Delete ", name)
|
||||||
|
} else {
|
||||||
|
fmt.Println("Keep ", name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := app.Run(os.Args); err != nil {
|
||||||
|
log.Fatal().Err(err).Msg("cant finish")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func contains(files []storage.File, file storage.File) bool {
|
||||||
|
for _, f := range files {
|
||||||
|
if f == file {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func getFileSystem(driver string, config map[string]string) (storage.Storage, error) {
|
||||||
|
switch driver {
|
||||||
|
case "sftp":
|
||||||
|
sftp := storage.SFTP{Config: config}
|
||||||
|
return sftp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, errors.New("driver not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
func readConfig(path string) (config, error) {
|
||||||
|
file, err := os.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
return config{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
c := config{}
|
||||||
|
err = yaml.Unmarshal(file, &c)
|
||||||
|
if err != nil {
|
||||||
|
return config{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return c, nil
|
||||||
|
}
|
137
pkg/buckets/buckets.go
Normal file
137
pkg/buckets/buckets.go
Normal file
|
@ -0,0 +1,137 @@
|
||||||
|
package buckets
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"idun/pkg/storage"
|
||||||
|
"sort"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Bucket interface {
|
||||||
|
AddFile(storage.File) error
|
||||||
|
Execute() error
|
||||||
|
ToString() string
|
||||||
|
GetFilesToDelete() ([]storage.File, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type UnlimitBucket struct {
|
||||||
|
start time.Time
|
||||||
|
end time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u UnlimitBucket) AddFile(f storage.File) error {
|
||||||
|
t, err := f.GetTime()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.Before(u.start) || t.After(u.end) {
|
||||||
|
return storage.ErrFileNotInBucketTime
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u UnlimitBucket) Execute() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (b UnlimitBucket) GetFilesToDelete() ([]storage.File, error) {
|
||||||
|
return []storage.File{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u UnlimitBucket) ToString() string {
|
||||||
|
return fmt.Sprintf("%v to %v (Unlimit)", u.start, u.end)
|
||||||
|
}
|
||||||
|
|
||||||
|
type DeleteBucket struct {
|
||||||
|
start time.Time
|
||||||
|
end time.Time
|
||||||
|
files []storage.File
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *DeleteBucket) AddFile(f storage.File) error {
|
||||||
|
t, err := f.GetTime()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.Before(d.start) || t.After(d.end) {
|
||||||
|
return storage.ErrFileNotInBucketTime
|
||||||
|
}
|
||||||
|
d.files = append(d.files, f)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (d DeleteBucket) Execute() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d DeleteBucket) ToString() string {
|
||||||
|
return fmt.Sprintf("%v to %v (Delete)", d.start, d.end)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b DeleteBucket) GetFilesToDelete() ([]storage.File, error) {
|
||||||
|
return b.files, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type TimeBucket struct {
|
||||||
|
files []storage.File
|
||||||
|
start time.Time
|
||||||
|
end time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b TimeBucket) ToString() string {
|
||||||
|
return fmt.Sprintf("%v to %v", b.start, b.end)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *TimeBucket) AddFile(f storage.File) error {
|
||||||
|
t, err := f.GetTime()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.Before(b.start) || t.After(b.end) {
|
||||||
|
return storage.ErrFileNotInBucketTime
|
||||||
|
}
|
||||||
|
b.files = append(b.files, f)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b TimeBucket) Execute() error {
|
||||||
|
if len(b.files) <= 1 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b TimeBucket) GetFilesToDelete() ([]storage.File, error) {
|
||||||
|
toDelete := []storage.File{}
|
||||||
|
sort.SliceStable(b.files, func(i, j int) bool {
|
||||||
|
t1, _ := b.files[i].GetTime()
|
||||||
|
t2, _ := b.files[j].GetTime()
|
||||||
|
return t1.After(t2)
|
||||||
|
})
|
||||||
|
|
||||||
|
for i, f := range b.files {
|
||||||
|
if i == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
toDelete = append(toDelete, f)
|
||||||
|
}
|
||||||
|
|
||||||
|
return toDelete, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewTimeBucket(start time.Time, end time.Time) (TimeBucket, error) {
|
||||||
|
if start.After(end) {
|
||||||
|
return TimeBucket{}, errors.New("start time must be before end time")
|
||||||
|
}
|
||||||
|
|
||||||
|
t := TimeBucket{
|
||||||
|
start: start,
|
||||||
|
end: end,
|
||||||
|
}
|
||||||
|
|
||||||
|
return t, nil
|
||||||
|
}
|
182
pkg/buckets/buckets_test.go
Normal file
182
pkg/buckets/buckets_test.go
Normal file
|
@ -0,0 +1,182 @@
|
||||||
|
package buckets
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
_ "github.com/stretchr/testify/assert"
|
||||||
|
"idun/pkg/storage"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type TestFile struct {
|
||||||
|
name string
|
||||||
|
time time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f TestFile) GetName() (string, error) {
|
||||||
|
return f.name, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f TestFile) GetTime() (time.Time, error) {
|
||||||
|
return f.time, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnlimitBucket(t *testing.T) {
|
||||||
|
tt := []struct {
|
||||||
|
Name string
|
||||||
|
Files []TestFile
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
Name: "No Files, No Deletion",
|
||||||
|
Files: []TestFile{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "One File, no deletion",
|
||||||
|
Files: []TestFile{
|
||||||
|
{
|
||||||
|
time: time.Date(2023, 9, 27, 15, 0, 0, 0, time.UTC),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, ts := range tt {
|
||||||
|
t.Run(ts.Name, func(t *testing.T) {
|
||||||
|
bucket := UnlimitBucket{}
|
||||||
|
|
||||||
|
for _, f := range ts.Files {
|
||||||
|
err := bucket.AddFile(f)
|
||||||
|
assert.Nil(t, err, "should add file without error")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCreateTimeBucket(t *testing.T) {
|
||||||
|
t1 := time.Date(2023, 9, 27, 15, 0, 0, 0, time.UTC)
|
||||||
|
t2 := t1.Add(1 * time.Hour)
|
||||||
|
|
||||||
|
_, err := NewTimeBucket(t1, t2)
|
||||||
|
assert.Nil(t, err, "should create time bucket without error")
|
||||||
|
|
||||||
|
_, err = NewTimeBucket(t2, t1)
|
||||||
|
assert.Equal(t, "start time must be before end time", err.Error(), "should return error if start is behind end time")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddFileToTimeBucket(t *testing.T) {
|
||||||
|
t1 := time.Date(2023, 9, 27, 15, 0, 0, 0, time.UTC)
|
||||||
|
t2 := t1.Add(1 * time.Hour)
|
||||||
|
|
||||||
|
b, err := NewTimeBucket(t1, t2)
|
||||||
|
assert.Nil(t, err, "should be able to create bucket")
|
||||||
|
tt := []struct {
|
||||||
|
Name string
|
||||||
|
File storage.File
|
||||||
|
ExpErr error
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
Name: "File inside the Bucket Time",
|
||||||
|
File: TestFile{time: t1.Add(1 * time.Hour)},
|
||||||
|
ExpErr: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "File after the Bucket Time",
|
||||||
|
File: TestFile{time: t1.Add(12 * time.Hour)},
|
||||||
|
ExpErr: storage.ErrFileNotInBucketTime,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "File after the Bucket Time",
|
||||||
|
File: TestFile{time: t1.Add(-12 * time.Hour)},
|
||||||
|
ExpErr: storage.ErrFileNotInBucketTime,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, ts := range tt {
|
||||||
|
t.Run(ts.Name, func(t *testing.T) {
|
||||||
|
err := b.AddFile(ts.File)
|
||||||
|
assert.Equal(t, ts.ExpErr, err, "error should match")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetFilesToDelete(t *testing.T) {
|
||||||
|
t1 := time.Date(2023, 9, 27, 15, 0, 0, 0, time.UTC)
|
||||||
|
t2 := t1.Add(1 * time.Hour)
|
||||||
|
|
||||||
|
tt := []struct {
|
||||||
|
Name string
|
||||||
|
Files []TestFile
|
||||||
|
ToDeleteFileNames []string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
Name: "No Files, No Deletion",
|
||||||
|
Files: []TestFile{},
|
||||||
|
ToDeleteFileNames: []string{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "One File, no deletion",
|
||||||
|
Files: []TestFile{
|
||||||
|
{
|
||||||
|
time: t1.Add(5 * time.Minute),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
ToDeleteFileNames: []string{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "Two File, one deletion",
|
||||||
|
Files: []TestFile{
|
||||||
|
{
|
||||||
|
name: "abc",
|
||||||
|
time: t1.Add(5 * time.Minute),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "des",
|
||||||
|
time: t1.Add(15 * time.Minute),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
ToDeleteFileNames: []string{"abc"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "Three File, two deletion",
|
||||||
|
Files: []TestFile{
|
||||||
|
{
|
||||||
|
name: "abc",
|
||||||
|
time: t1.Add(5 * time.Minute),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "des",
|
||||||
|
time: t1.Add(15 * time.Minute),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "fff",
|
||||||
|
time: t1.Add(14 * time.Minute),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
ToDeleteFileNames: []string{"fff", "abc"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, ts := range tt {
|
||||||
|
t.Run(ts.Name, func(t *testing.T) {
|
||||||
|
bucket, err := NewTimeBucket(t1, t2)
|
||||||
|
assert.Nil(t, err, "should be able to create bucket")
|
||||||
|
|
||||||
|
for _, f := range ts.Files {
|
||||||
|
err := bucket.AddFile(f)
|
||||||
|
assert.Nil(t, err, "should add file without error")
|
||||||
|
}
|
||||||
|
|
||||||
|
toDelte, err := bucket.GetFilesToDelete()
|
||||||
|
assert.Nil(t, err, "should not return error")
|
||||||
|
|
||||||
|
filenames := []string{}
|
||||||
|
for _, f := range toDelte {
|
||||||
|
name, err := f.GetName()
|
||||||
|
assert.Nil(t, err, "should not return error")
|
||||||
|
filenames = append(filenames, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Equal(t, ts.ToDeleteFileNames, filenames, "should return right list of files to delete")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
157
pkg/buckets/calculator.go
Normal file
157
pkg/buckets/calculator.go
Normal file
|
@ -0,0 +1,157 @@
|
||||||
|
package buckets
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"idun/pkg/storage"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Config struct {
|
||||||
|
Unlimit int `yaml:"unlimit"`
|
||||||
|
Hourly int `yaml:"hourly"`
|
||||||
|
Daily int `yaml:"dayly"`
|
||||||
|
Weekly int `yaml:"weekly"`
|
||||||
|
Monthly int `yaml:"monthly"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func InsertFilesInBuckets(buckets *[]Bucket, files []storage.File) error {
|
||||||
|
for _, f := range files {
|
||||||
|
for _, b := range *buckets {
|
||||||
|
err := b.AddFile(f)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, storage.ErrFileNotInBucketTime) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func GenerateBuckets(fromTime time.Time, config Config, location *time.Location) ([]Bucket, error) {
|
||||||
|
end := fromTime
|
||||||
|
plan := []Bucket{}
|
||||||
|
//Unlimit Bucket for the current hour
|
||||||
|
untilStart := time.Date(fromTime.Year(), fromTime.Month(), fromTime.Day(), fromTime.Hour(), 0, 0, 0, location)
|
||||||
|
unlimitBucket := UnlimitBucket{
|
||||||
|
start: untilStart,
|
||||||
|
end: fromTime,
|
||||||
|
}
|
||||||
|
end = untilStart
|
||||||
|
plan = append(plan, &unlimitBucket)
|
||||||
|
|
||||||
|
//Todo: Make just one unlimit bucket for the complete time
|
||||||
|
//Unlimit Buckets
|
||||||
|
for i := 0; i < config.Unlimit; i++ {
|
||||||
|
unlimitBucket := UnlimitBucket{
|
||||||
|
start: end.Add(-1 * time.Hour),
|
||||||
|
end: end.Add(-1),
|
||||||
|
}
|
||||||
|
end = end.Add(-1 * time.Hour)
|
||||||
|
plan = append(plan, &unlimitBucket)
|
||||||
|
}
|
||||||
|
|
||||||
|
//Hour Buckets
|
||||||
|
for i := 0; i < config.Hourly; i++ {
|
||||||
|
timebucket := TimeBucket{
|
||||||
|
start: end.Add(-1 * time.Hour),
|
||||||
|
end: end.Add(-1),
|
||||||
|
}
|
||||||
|
end = end.Add(-1 * time.Hour)
|
||||||
|
plan = append(plan, &timebucket)
|
||||||
|
}
|
||||||
|
|
||||||
|
//Hour Backups until middnight, if dayly, weekly or monthly is active
|
||||||
|
if config.Daily > 0 || config.Weekly > 0 || config.Monthly > 0 {
|
||||||
|
for {
|
||||||
|
timebucket := TimeBucket{
|
||||||
|
start: end.Add(-1 * time.Hour),
|
||||||
|
end: end.Add(-1),
|
||||||
|
}
|
||||||
|
end = end.Add(-1 * time.Hour)
|
||||||
|
plan = append(plan, &timebucket)
|
||||||
|
if end.Hour() == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//Daily
|
||||||
|
for i := 0; i < config.Daily; i++ {
|
||||||
|
timebucket := TimeBucket{
|
||||||
|
start: end.Add(-24 * time.Hour),
|
||||||
|
end: end.Add(-1),
|
||||||
|
}
|
||||||
|
end = end.Add(-24 * time.Hour)
|
||||||
|
plan = append(plan, &timebucket)
|
||||||
|
}
|
||||||
|
|
||||||
|
//Daily Backups until (inc) monday, if weekly or monthly backups ar active
|
||||||
|
if config.Weekly > 0 || config.Monthly > 0 {
|
||||||
|
for {
|
||||||
|
timebucket := TimeBucket{
|
||||||
|
start: end.Add(-24 * time.Hour),
|
||||||
|
end: end.Add(-1),
|
||||||
|
}
|
||||||
|
end = end.Add(-24 * time.Hour)
|
||||||
|
plan = append(plan, &timebucket)
|
||||||
|
if end.Weekday() == time.Monday {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//Weekly
|
||||||
|
for i := 0; i < config.Weekly; i++ {
|
||||||
|
timebucket := TimeBucket{
|
||||||
|
start: end.Add(-24 * 7 * time.Hour),
|
||||||
|
end: end.Add(-1),
|
||||||
|
}
|
||||||
|
end = end.Add(-24 * 7 * time.Hour)
|
||||||
|
plan = append(plan, &timebucket)
|
||||||
|
}
|
||||||
|
|
||||||
|
//Daily Backups until (inc) monday, if weekly or monthly backups ar active
|
||||||
|
if config.Monthly > 0 {
|
||||||
|
for {
|
||||||
|
lastrun := false
|
||||||
|
start := end.Add(-24 * 7 * time.Hour)
|
||||||
|
if start.Month() != end.Month() {
|
||||||
|
start = time.Date(end.Year(), end.Month(), 1, 0, 0, 0, 0, location)
|
||||||
|
lastrun = true
|
||||||
|
}
|
||||||
|
timebucket := TimeBucket{
|
||||||
|
start: start,
|
||||||
|
end: end.Add(-1),
|
||||||
|
}
|
||||||
|
end = start
|
||||||
|
plan = append(plan, &timebucket)
|
||||||
|
if lastrun {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//Monthly
|
||||||
|
for i := 0; i < config.Monthly; i++ {
|
||||||
|
timebucket := TimeBucket{
|
||||||
|
start: end.AddDate(0, -1, 0),
|
||||||
|
end: end.Add(-1),
|
||||||
|
}
|
||||||
|
end = end.AddDate(0, -1, 0)
|
||||||
|
plan = append(plan, &timebucket)
|
||||||
|
}
|
||||||
|
|
||||||
|
//Add Delete all Backups older than the last plan
|
||||||
|
deleteBucket := DeleteBucket{
|
||||||
|
start: time.Time{},
|
||||||
|
end: end.Add(-1),
|
||||||
|
}
|
||||||
|
|
||||||
|
plan = append(plan, &deleteBucket)
|
||||||
|
|
||||||
|
return plan, nil
|
||||||
|
}
|
99
pkg/buckets/calculator_test.go
Normal file
99
pkg/buckets/calculator_test.go
Normal file
|
@ -0,0 +1,99 @@
|
||||||
|
package buckets
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"idun/pkg/storage"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCalculator(t *testing.T) {
|
||||||
|
tt := []struct {
|
||||||
|
Name string
|
||||||
|
Config Config
|
||||||
|
Plan []string
|
||||||
|
Start time.Time
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
Name: "empty config",
|
||||||
|
Config: Config{},
|
||||||
|
Plan: []string{"2023-09-27 15:00:00 +0000 UTC to 2023-09-27 15:18:03.000000004 +0000 UTC (Unlimit)", "0001-01-01 00:00:00 +0000 UTC to 2023-09-27 14:59:59.999999999 +0000 UTC (Delete)"},
|
||||||
|
Start: time.Date(2023, 9, 27, 15, 18, 3, 4, time.UTC),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "keep last 2 hour, delete everything else",
|
||||||
|
Config: Config{Unlimit: 2},
|
||||||
|
Plan: []string{"2023-09-27 15:00:00 +0000 UTC to 2023-09-27 15:18:03.000000004 +0000 UTC (Unlimit)", "2023-09-27 14:00:00 +0000 UTC to 2023-09-27 14:59:59.999999999 +0000 UTC (Unlimit)", "2023-09-27 13:00:00 +0000 UTC to 2023-09-27 13:59:59.999999999 +0000 UTC (Unlimit)", "0001-01-01 00:00:00 +0000 UTC to 2023-09-27 12:59:59.999999999 +0000 UTC (Delete)"},
|
||||||
|
Start: time.Date(2023, 9, 27, 15, 18, 3, 4, time.UTC),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "keep one for the last 2 hour, delete everything else",
|
||||||
|
Config: Config{Hourly: 2},
|
||||||
|
Plan: []string{"2023-09-27 15:00:00 +0000 UTC to 2023-09-27 15:18:03.000000004 +0000 UTC (Unlimit)", "2023-09-27 14:00:00 +0000 UTC to 2023-09-27 14:59:59.999999999 +0000 UTC", "2023-09-27 13:00:00 +0000 UTC to 2023-09-27 13:59:59.999999999 +0000 UTC", "0001-01-01 00:00:00 +0000 UTC to 2023-09-27 12:59:59.999999999 +0000 UTC (Delete)"},
|
||||||
|
Start: time.Date(2023, 9, 27, 15, 18, 3, 4, time.UTC),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "keep one houre everything, one for the last 2 hour, delete everything else",
|
||||||
|
Config: Config{Unlimit: 1, Hourly: 2},
|
||||||
|
Plan: []string{"2023-09-27 15:00:00 +0000 UTC to 2023-09-27 15:18:03.000000004 +0000 UTC (Unlimit)", "2023-09-27 14:00:00 +0000 UTC to 2023-09-27 14:59:59.999999999 +0000 UTC (Unlimit)", "2023-09-27 13:00:00 +0000 UTC to 2023-09-27 13:59:59.999999999 +0000 UTC", "2023-09-27 12:00:00 +0000 UTC to 2023-09-27 12:59:59.999999999 +0000 UTC", "0001-01-01 00:00:00 +0000 UTC to 2023-09-27 11:59:59.999999999 +0000 UTC (Delete)"},
|
||||||
|
Start: time.Date(2023, 9, 27, 15, 18, 3, 4, time.UTC),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "keep two daily",
|
||||||
|
Config: Config{Daily: 2},
|
||||||
|
Start: time.Date(2023, 9, 27, 2, 18, 3, 4, time.UTC),
|
||||||
|
Plan: []string{"2023-09-27 02:00:00 +0000 UTC to 2023-09-27 02:18:03.000000004 +0000 UTC (Unlimit)", "2023-09-27 01:00:00 +0000 UTC to 2023-09-27 01:59:59.999999999 +0000 UTC", "2023-09-27 00:00:00 +0000 UTC to 2023-09-27 00:59:59.999999999 +0000 UTC", "2023-09-26 00:00:00 +0000 UTC to 2023-09-26 23:59:59.999999999 +0000 UTC", "2023-09-25 00:00:00 +0000 UTC to 2023-09-25 23:59:59.999999999 +0000 UTC", "0001-01-01 00:00:00 +0000 UTC to 2023-09-24 23:59:59.999999999 +0000 UTC (Delete)"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "keep two daily and one weekly",
|
||||||
|
Config: Config{Daily: 1, Weekly: 1},
|
||||||
|
Start: time.Date(2023, 9, 13, 2, 18, 3, 4, time.UTC),
|
||||||
|
Plan: []string{"2023-09-13 02:00:00 +0000 UTC to 2023-09-13 02:18:03.000000004 +0000 UTC (Unlimit)", "2023-09-13 01:00:00 +0000 UTC to 2023-09-13 01:59:59.999999999 +0000 UTC", "2023-09-13 00:00:00 +0000 UTC to 2023-09-13 00:59:59.999999999 +0000 UTC", "2023-09-12 00:00:00 +0000 UTC to 2023-09-12 23:59:59.999999999 +0000 UTC", "2023-09-11 00:00:00 +0000 UTC to 2023-09-11 23:59:59.999999999 +0000 UTC", "2023-09-04 00:00:00 +0000 UTC to 2023-09-10 23:59:59.999999999 +0000 UTC", "0001-01-01 00:00:00 +0000 UTC to 2023-09-03 23:59:59.999999999 +0000 UTC (Delete)"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "keep ones for 2 month",
|
||||||
|
Config: Config{Monthly: 2},
|
||||||
|
Start: time.Date(2023, 9, 13, 2, 18, 3, 4, time.UTC),
|
||||||
|
Plan: []string{"2023-09-13 02:00:00 +0000 UTC to 2023-09-13 02:18:03.000000004 +0000 UTC (Unlimit)", "2023-09-13 01:00:00 +0000 UTC to 2023-09-13 01:59:59.999999999 +0000 UTC", "2023-09-13 00:00:00 +0000 UTC to 2023-09-13 00:59:59.999999999 +0000 UTC", "2023-09-12 00:00:00 +0000 UTC to 2023-09-12 23:59:59.999999999 +0000 UTC", "2023-09-11 00:00:00 +0000 UTC to 2023-09-11 23:59:59.999999999 +0000 UTC", "2023-09-04 00:00:00 +0000 UTC to 2023-09-10 23:59:59.999999999 +0000 UTC", "2023-09-01 00:00:00 +0000 UTC to 2023-09-03 23:59:59.999999999 +0000 UTC", "2023-08-01 00:00:00 +0000 UTC to 2023-08-31 23:59:59.999999999 +0000 UTC", "2023-07-01 00:00:00 +0000 UTC to 2023-07-31 23:59:59.999999999 +0000 UTC", "0001-01-01 00:00:00 +0000 UTC to 2023-06-30 23:59:59.999999999 +0000 UTC (Delete)"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, ts := range tt {
|
||||||
|
t.Run(ts.Name, func(t *testing.T) {
|
||||||
|
|
||||||
|
buckets, err := GenerateBuckets(ts.Start, ts.Config, time.UTC)
|
||||||
|
assert.Nil(t, err, "should return no error")
|
||||||
|
|
||||||
|
plan := []string{}
|
||||||
|
for _, b := range buckets {
|
||||||
|
plan = append(plan, b.ToString())
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Equal(t, ts.Plan, plan, "should return right plan")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInsertFilesInBuckets(t *testing.T) {
|
||||||
|
bucketTime := time.Date(2023, 10, 11, 0, 0, 0, 0, time.UTC)
|
||||||
|
|
||||||
|
bucket1, _ := NewTimeBucket(bucketTime, bucketTime.Add(1*time.Hour))
|
||||||
|
bucket2, _ := NewTimeBucket(bucketTime.Add(1*time.Hour), bucketTime.Add(2*time.Hour))
|
||||||
|
|
||||||
|
buckets := []Bucket{&bucket1, &bucket2}
|
||||||
|
|
||||||
|
file1 := TestFile{time: bucketTime.Add(5 * time.Minute)}
|
||||||
|
file2 := TestFile{time: bucketTime.Add(65 * time.Minute)}
|
||||||
|
file3 := TestFile{time: bucketTime.Add(75 * time.Minute)}
|
||||||
|
|
||||||
|
files := []storage.File{file1, file2, file3}
|
||||||
|
|
||||||
|
err := InsertFilesInBuckets(&buckets, files)
|
||||||
|
assert.Nil(t, err, "should insert file in buckets without error")
|
||||||
|
|
||||||
|
bucket1DeleteFiles, _ := buckets[0].GetFilesToDelete()
|
||||||
|
assert.Equal(t, []storage.File{}, bucket1DeleteFiles)
|
||||||
|
|
||||||
|
bucket2DeleteFiles, _ := buckets[1].GetFilesToDelete()
|
||||||
|
assert.Equal(t, []storage.File{file2}, bucket2DeleteFiles)
|
||||||
|
}
|
7
pkg/storage/errors.go
Normal file
7
pkg/storage/errors.go
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import "errors"
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrFileNotInBucketTime = errors.New("File is not in the time of the Bucket")
|
||||||
|
)
|
26
pkg/storage/files.go
Normal file
26
pkg/storage/files.go
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
type File interface {
|
||||||
|
GetName() (string, error)
|
||||||
|
GetTime() (time.Time, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type GeneralFile struct {
|
||||||
|
name string
|
||||||
|
time time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g GeneralFile) GetName() (string, error) {
|
||||||
|
return g.name, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g GeneralFile) GetTime() (time.Time, error) {
|
||||||
|
return g.time, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetEmptyFileList() []File {
|
||||||
|
var files []File
|
||||||
|
return files
|
||||||
|
}
|
43
pkg/storage/localefilesystem.go
Normal file
43
pkg/storage/localefilesystem.go
Normal file
|
@ -0,0 +1,43 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
type LocaleFileSystem struct {
|
||||||
|
Config map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l LocaleFileSystem) ListFiles() ([]File, error) {
|
||||||
|
files, err := ioutil.ReadDir(l.Config["path"])
|
||||||
|
if err != nil {
|
||||||
|
return []File{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var filelist []File
|
||||||
|
|
||||||
|
for _, file := range files {
|
||||||
|
g := GeneralFile{
|
||||||
|
name: l.Config["path"] + "/" + file.Name(),
|
||||||
|
time: file.ModTime(),
|
||||||
|
}
|
||||||
|
filelist = append(filelist, g)
|
||||||
|
}
|
||||||
|
|
||||||
|
return filelist, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (I LocaleFileSystem) Delete(files []File) error {
|
||||||
|
for _, f := range files {
|
||||||
|
name, err := f.GetName()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = os.Remove(name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
73
pkg/storage/localefilesystem_test.go
Normal file
73
pkg/storage/localefilesystem_test.go
Normal file
|
@ -0,0 +1,73 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestLocaleFileSystemReadFromFolder(t *testing.T) {
|
||||||
|
dname, err := os.MkdirTemp("", "sampledir")
|
||||||
|
assert.Nil(t, err, "should not throw error")
|
||||||
|
|
||||||
|
d1 := []byte("hello\ngo\n")
|
||||||
|
|
||||||
|
err = os.WriteFile(dname+"/file1", d1, 0644)
|
||||||
|
assert.Nil(t, err, "should not throw error")
|
||||||
|
err = os.WriteFile(dname+"/file", d1, 0644)
|
||||||
|
assert.Nil(t, err, "should not throw error")
|
||||||
|
|
||||||
|
config := make(map[string]string)
|
||||||
|
config["path"] = dname
|
||||||
|
|
||||||
|
lfs := LocaleFileSystem{Config: config}
|
||||||
|
files, err := lfs.ListFiles()
|
||||||
|
assert.Nil(t, err, "should get files without throwing error")
|
||||||
|
|
||||||
|
filelistFromLFS := []string{}
|
||||||
|
for _, f := range files {
|
||||||
|
name, err := f.GetName()
|
||||||
|
assert.Nil(t, err, "should not get error")
|
||||||
|
filelistFromLFS = append(filelistFromLFS, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
fileListWanted := []string{dname + "/file", dname + "/file1"}
|
||||||
|
assert.Equal(t, fileListWanted, filelistFromLFS, "should get currect files")
|
||||||
|
|
||||||
|
os.RemoveAll(dname)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLocaleFileSystemDeleteFile(t *testing.T) {
|
||||||
|
dname, err := os.MkdirTemp("", "sampledir")
|
||||||
|
assert.Nil(t, err, "should not throw error")
|
||||||
|
|
||||||
|
d1 := []byte("hello\ngo\n")
|
||||||
|
|
||||||
|
err = os.WriteFile(dname+"/file1", d1, 0644)
|
||||||
|
assert.Nil(t, err, "should not throw error")
|
||||||
|
|
||||||
|
f := GeneralFile{
|
||||||
|
name: dname + "/file1",
|
||||||
|
time: time.Time{},
|
||||||
|
}
|
||||||
|
FileToDelete := []File{f}
|
||||||
|
|
||||||
|
config := make(map[string]string)
|
||||||
|
config["path"] = dname
|
||||||
|
|
||||||
|
lfs := LocaleFileSystem{Config: config}
|
||||||
|
|
||||||
|
files, err := lfs.ListFiles()
|
||||||
|
assert.Nil(t, err, "should not throw error")
|
||||||
|
assert.Equal(t, 1, len(files), "should found one file")
|
||||||
|
|
||||||
|
err = lfs.Delete(FileToDelete)
|
||||||
|
assert.Nil(t, err, "should not throw error")
|
||||||
|
|
||||||
|
files, err = lfs.ListFiles()
|
||||||
|
assert.Nil(t, err, "should not throw error")
|
||||||
|
assert.Equal(t, 0, len(files), "should found zero file")
|
||||||
|
|
||||||
|
os.RemoveAll(dname)
|
||||||
|
}
|
72
pkg/storage/sftp.go
Normal file
72
pkg/storage/sftp.go
Normal file
|
@ -0,0 +1,72 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"github.com/pkg/sftp"
|
||||||
|
"golang.org/x/crypto/ssh"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type SFTP struct {
|
||||||
|
Config map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f SFTP) ListFiles() ([]File, error) {
|
||||||
|
var auths []ssh.AuthMethod
|
||||||
|
auths = append(auths, ssh.Password(f.Config["password"]))
|
||||||
|
|
||||||
|
config := ssh.ClientConfig{
|
||||||
|
User: f.Config["username"],
|
||||||
|
Auth: auths,
|
||||||
|
// Auth: []ssh.AuthMethod{
|
||||||
|
// ssh.KeyboardInteractive(SshInteractive),
|
||||||
|
// },
|
||||||
|
|
||||||
|
// Uncomment to ignore host key check
|
||||||
|
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
|
||||||
|
//HostKeyCallback: ssh.FixedHostKey(hostKey),
|
||||||
|
// HostKeyCallback: func(hostname string, remote net.Addr, key ssh.PublicKey) error {
|
||||||
|
// return nil
|
||||||
|
// },
|
||||||
|
Timeout: 30 * time.Second,
|
||||||
|
}
|
||||||
|
|
||||||
|
conn, err := ssh.Dial("tcp", f.Config["addr"], &config)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer conn.Close()
|
||||||
|
|
||||||
|
sc, err := sftp.NewClient(conn)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer sc.Close()
|
||||||
|
|
||||||
|
files, err := sc.ReadDir(f.Config["path"])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
fileList := []File{}
|
||||||
|
|
||||||
|
for _, file := range files {
|
||||||
|
if file.IsDir() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
generateFile := GeneralFile{
|
||||||
|
name: fmt.Sprintf("%v/%v", f.Config["path"], file.Name()),
|
||||||
|
time: file.ModTime(),
|
||||||
|
}
|
||||||
|
fileList = append(fileList, generateFile)
|
||||||
|
}
|
||||||
|
|
||||||
|
return fileList, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f SFTP) Delete(files []File) error {
|
||||||
|
return errors.New("Not implemented")
|
||||||
|
}
|
6
pkg/storage/storege.go
Normal file
6
pkg/storage/storege.go
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
type Storage interface {
|
||||||
|
ListFiles() ([]File, error)
|
||||||
|
Delete([]File) error
|
||||||
|
}
|
21
vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md
generated
vendored
Normal file
21
vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2014 Brian Goff
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
14
vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go
generated
vendored
Normal file
14
vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
package md2man
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/russross/blackfriday/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Render converts a markdown document into a roff formatted document.
|
||||||
|
func Render(doc []byte) []byte {
|
||||||
|
renderer := NewRoffRenderer()
|
||||||
|
|
||||||
|
return blackfriday.Run(doc,
|
||||||
|
[]blackfriday.Option{blackfriday.WithRenderer(renderer),
|
||||||
|
blackfriday.WithExtensions(renderer.GetExtensions())}...)
|
||||||
|
}
|
336
vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go
generated
vendored
Normal file
336
vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go
generated
vendored
Normal file
|
@ -0,0 +1,336 @@
|
||||||
|
package md2man
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/russross/blackfriday/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// roffRenderer implements the blackfriday.Renderer interface for creating
|
||||||
|
// roff format (manpages) from markdown text
|
||||||
|
type roffRenderer struct {
|
||||||
|
extensions blackfriday.Extensions
|
||||||
|
listCounters []int
|
||||||
|
firstHeader bool
|
||||||
|
firstDD bool
|
||||||
|
listDepth int
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
titleHeader = ".TH "
|
||||||
|
topLevelHeader = "\n\n.SH "
|
||||||
|
secondLevelHdr = "\n.SH "
|
||||||
|
otherHeader = "\n.SS "
|
||||||
|
crTag = "\n"
|
||||||
|
emphTag = "\\fI"
|
||||||
|
emphCloseTag = "\\fP"
|
||||||
|
strongTag = "\\fB"
|
||||||
|
strongCloseTag = "\\fP"
|
||||||
|
breakTag = "\n.br\n"
|
||||||
|
paraTag = "\n.PP\n"
|
||||||
|
hruleTag = "\n.ti 0\n\\l'\\n(.lu'\n"
|
||||||
|
linkTag = "\n\\[la]"
|
||||||
|
linkCloseTag = "\\[ra]"
|
||||||
|
codespanTag = "\\fB\\fC"
|
||||||
|
codespanCloseTag = "\\fR"
|
||||||
|
codeTag = "\n.PP\n.RS\n\n.nf\n"
|
||||||
|
codeCloseTag = "\n.fi\n.RE\n"
|
||||||
|
quoteTag = "\n.PP\n.RS\n"
|
||||||
|
quoteCloseTag = "\n.RE\n"
|
||||||
|
listTag = "\n.RS\n"
|
||||||
|
listCloseTag = "\n.RE\n"
|
||||||
|
dtTag = "\n.TP\n"
|
||||||
|
dd2Tag = "\n"
|
||||||
|
tableStart = "\n.TS\nallbox;\n"
|
||||||
|
tableEnd = ".TE\n"
|
||||||
|
tableCellStart = "T{\n"
|
||||||
|
tableCellEnd = "\nT}\n"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewRoffRenderer creates a new blackfriday Renderer for generating roff documents
|
||||||
|
// from markdown
|
||||||
|
func NewRoffRenderer() *roffRenderer { // nolint: golint
|
||||||
|
var extensions blackfriday.Extensions
|
||||||
|
|
||||||
|
extensions |= blackfriday.NoIntraEmphasis
|
||||||
|
extensions |= blackfriday.Tables
|
||||||
|
extensions |= blackfriday.FencedCode
|
||||||
|
extensions |= blackfriday.SpaceHeadings
|
||||||
|
extensions |= blackfriday.Footnotes
|
||||||
|
extensions |= blackfriday.Titleblock
|
||||||
|
extensions |= blackfriday.DefinitionLists
|
||||||
|
return &roffRenderer{
|
||||||
|
extensions: extensions,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetExtensions returns the list of extensions used by this renderer implementation
|
||||||
|
func (r *roffRenderer) GetExtensions() blackfriday.Extensions {
|
||||||
|
return r.extensions
|
||||||
|
}
|
||||||
|
|
||||||
|
// RenderHeader handles outputting the header at document start
|
||||||
|
func (r *roffRenderer) RenderHeader(w io.Writer, ast *blackfriday.Node) {
|
||||||
|
// disable hyphenation
|
||||||
|
out(w, ".nh\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
// RenderFooter handles outputting the footer at the document end; the roff
|
||||||
|
// renderer has no footer information
|
||||||
|
func (r *roffRenderer) RenderFooter(w io.Writer, ast *blackfriday.Node) {
|
||||||
|
}
|
||||||
|
|
||||||
|
// RenderNode is called for each node in a markdown document; based on the node
|
||||||
|
// type the equivalent roff output is sent to the writer
|
||||||
|
func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus {
|
||||||
|
|
||||||
|
var walkAction = blackfriday.GoToNext
|
||||||
|
|
||||||
|
switch node.Type {
|
||||||
|
case blackfriday.Text:
|
||||||
|
escapeSpecialChars(w, node.Literal)
|
||||||
|
case blackfriday.Softbreak:
|
||||||
|
out(w, crTag)
|
||||||
|
case blackfriday.Hardbreak:
|
||||||
|
out(w, breakTag)
|
||||||
|
case blackfriday.Emph:
|
||||||
|
if entering {
|
||||||
|
out(w, emphTag)
|
||||||
|
} else {
|
||||||
|
out(w, emphCloseTag)
|
||||||
|
}
|
||||||
|
case blackfriday.Strong:
|
||||||
|
if entering {
|
||||||
|
out(w, strongTag)
|
||||||
|
} else {
|
||||||
|
out(w, strongCloseTag)
|
||||||
|
}
|
||||||
|
case blackfriday.Link:
|
||||||
|
if !entering {
|
||||||
|
out(w, linkTag+string(node.LinkData.Destination)+linkCloseTag)
|
||||||
|
}
|
||||||
|
case blackfriday.Image:
|
||||||
|
// ignore images
|
||||||
|
walkAction = blackfriday.SkipChildren
|
||||||
|
case blackfriday.Code:
|
||||||
|
out(w, codespanTag)
|
||||||
|
escapeSpecialChars(w, node.Literal)
|
||||||
|
out(w, codespanCloseTag)
|
||||||
|
case blackfriday.Document:
|
||||||
|
break
|
||||||
|
case blackfriday.Paragraph:
|
||||||
|
// roff .PP markers break lists
|
||||||
|
if r.listDepth > 0 {
|
||||||
|
return blackfriday.GoToNext
|
||||||
|
}
|
||||||
|
if entering {
|
||||||
|
out(w, paraTag)
|
||||||
|
} else {
|
||||||
|
out(w, crTag)
|
||||||
|
}
|
||||||
|
case blackfriday.BlockQuote:
|
||||||
|
if entering {
|
||||||
|
out(w, quoteTag)
|
||||||
|
} else {
|
||||||
|
out(w, quoteCloseTag)
|
||||||
|
}
|
||||||
|
case blackfriday.Heading:
|
||||||
|
r.handleHeading(w, node, entering)
|
||||||
|
case blackfriday.HorizontalRule:
|
||||||
|
out(w, hruleTag)
|
||||||
|
case blackfriday.List:
|
||||||
|
r.handleList(w, node, entering)
|
||||||
|
case blackfriday.Item:
|
||||||
|
r.handleItem(w, node, entering)
|
||||||
|
case blackfriday.CodeBlock:
|
||||||
|
out(w, codeTag)
|
||||||
|
escapeSpecialChars(w, node.Literal)
|
||||||
|
out(w, codeCloseTag)
|
||||||
|
case blackfriday.Table:
|
||||||
|
r.handleTable(w, node, entering)
|
||||||
|
case blackfriday.TableHead:
|
||||||
|
case blackfriday.TableBody:
|
||||||
|
case blackfriday.TableRow:
|
||||||
|
// no action as cell entries do all the nroff formatting
|
||||||
|
return blackfriday.GoToNext
|
||||||
|
case blackfriday.TableCell:
|
||||||
|
r.handleTableCell(w, node, entering)
|
||||||
|
case blackfriday.HTMLSpan:
|
||||||
|
// ignore other HTML tags
|
||||||
|
default:
|
||||||
|
fmt.Fprintln(os.Stderr, "WARNING: go-md2man does not handle node type "+node.Type.String())
|
||||||
|
}
|
||||||
|
return walkAction
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *roffRenderer) handleHeading(w io.Writer, node *blackfriday.Node, entering bool) {
|
||||||
|
if entering {
|
||||||
|
switch node.Level {
|
||||||
|
case 1:
|
||||||
|
if !r.firstHeader {
|
||||||
|
out(w, titleHeader)
|
||||||
|
r.firstHeader = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
out(w, topLevelHeader)
|
||||||
|
case 2:
|
||||||
|
out(w, secondLevelHdr)
|
||||||
|
default:
|
||||||
|
out(w, otherHeader)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *roffRenderer) handleList(w io.Writer, node *blackfriday.Node, entering bool) {
|
||||||
|
openTag := listTag
|
||||||
|
closeTag := listCloseTag
|
||||||
|
if node.ListFlags&blackfriday.ListTypeDefinition != 0 {
|
||||||
|
// tags for definition lists handled within Item node
|
||||||
|
openTag = ""
|
||||||
|
closeTag = ""
|
||||||
|
}
|
||||||
|
if entering {
|
||||||
|
r.listDepth++
|
||||||
|
if node.ListFlags&blackfriday.ListTypeOrdered != 0 {
|
||||||
|
r.listCounters = append(r.listCounters, 1)
|
||||||
|
}
|
||||||
|
out(w, openTag)
|
||||||
|
} else {
|
||||||
|
if node.ListFlags&blackfriday.ListTypeOrdered != 0 {
|
||||||
|
r.listCounters = r.listCounters[:len(r.listCounters)-1]
|
||||||
|
}
|
||||||
|
out(w, closeTag)
|
||||||
|
r.listDepth--
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *roffRenderer) handleItem(w io.Writer, node *blackfriday.Node, entering bool) {
|
||||||
|
if entering {
|
||||||
|
if node.ListFlags&blackfriday.ListTypeOrdered != 0 {
|
||||||
|
out(w, fmt.Sprintf(".IP \"%3d.\" 5\n", r.listCounters[len(r.listCounters)-1]))
|
||||||
|
r.listCounters[len(r.listCounters)-1]++
|
||||||
|
} else if node.ListFlags&blackfriday.ListTypeTerm != 0 {
|
||||||
|
// DT (definition term): line just before DD (see below).
|
||||||
|
out(w, dtTag)
|
||||||
|
r.firstDD = true
|
||||||
|
} else if node.ListFlags&blackfriday.ListTypeDefinition != 0 {
|
||||||
|
// DD (definition description): line that starts with ": ".
|
||||||
|
//
|
||||||
|
// We have to distinguish between the first DD and the
|
||||||
|
// subsequent ones, as there should be no vertical
|
||||||
|
// whitespace between the DT and the first DD.
|
||||||
|
if r.firstDD {
|
||||||
|
r.firstDD = false
|
||||||
|
} else {
|
||||||
|
out(w, dd2Tag)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
out(w, ".IP \\(bu 2\n")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
out(w, "\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *roffRenderer) handleTable(w io.Writer, node *blackfriday.Node, entering bool) {
|
||||||
|
if entering {
|
||||||
|
out(w, tableStart)
|
||||||
|
// call walker to count cells (and rows?) so format section can be produced
|
||||||
|
columns := countColumns(node)
|
||||||
|
out(w, strings.Repeat("l ", columns)+"\n")
|
||||||
|
out(w, strings.Repeat("l ", columns)+".\n")
|
||||||
|
} else {
|
||||||
|
out(w, tableEnd)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *roffRenderer) handleTableCell(w io.Writer, node *blackfriday.Node, entering bool) {
|
||||||
|
if entering {
|
||||||
|
var start string
|
||||||
|
if node.Prev != nil && node.Prev.Type == blackfriday.TableCell {
|
||||||
|
start = "\t"
|
||||||
|
}
|
||||||
|
if node.IsHeader {
|
||||||
|
start += codespanTag
|
||||||
|
} else if nodeLiteralSize(node) > 30 {
|
||||||
|
start += tableCellStart
|
||||||
|
}
|
||||||
|
out(w, start)
|
||||||
|
} else {
|
||||||
|
var end string
|
||||||
|
if node.IsHeader {
|
||||||
|
end = codespanCloseTag
|
||||||
|
} else if nodeLiteralSize(node) > 30 {
|
||||||
|
end = tableCellEnd
|
||||||
|
}
|
||||||
|
if node.Next == nil && end != tableCellEnd {
|
||||||
|
// Last cell: need to carriage return if we are at the end of the
|
||||||
|
// header row and content isn't wrapped in a "tablecell"
|
||||||
|
end += crTag
|
||||||
|
}
|
||||||
|
out(w, end)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func nodeLiteralSize(node *blackfriday.Node) int {
|
||||||
|
total := 0
|
||||||
|
for n := node.FirstChild; n != nil; n = n.FirstChild {
|
||||||
|
total += len(n.Literal)
|
||||||
|
}
|
||||||
|
return total
|
||||||
|
}
|
||||||
|
|
||||||
|
// because roff format requires knowing the column count before outputting any table
|
||||||
|
// data we need to walk a table tree and count the columns
|
||||||
|
func countColumns(node *blackfriday.Node) int {
|
||||||
|
var columns int
|
||||||
|
|
||||||
|
node.Walk(func(node *blackfriday.Node, entering bool) blackfriday.WalkStatus {
|
||||||
|
switch node.Type {
|
||||||
|
case blackfriday.TableRow:
|
||||||
|
if !entering {
|
||||||
|
return blackfriday.Terminate
|
||||||
|
}
|
||||||
|
case blackfriday.TableCell:
|
||||||
|
if entering {
|
||||||
|
columns++
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
return blackfriday.GoToNext
|
||||||
|
})
|
||||||
|
return columns
|
||||||
|
}
|
||||||
|
|
||||||
|
func out(w io.Writer, output string) {
|
||||||
|
io.WriteString(w, output) // nolint: errcheck
|
||||||
|
}
|
||||||
|
|
||||||
|
func escapeSpecialChars(w io.Writer, text []byte) {
|
||||||
|
for i := 0; i < len(text); i++ {
|
||||||
|
// escape initial apostrophe or period
|
||||||
|
if len(text) >= 1 && (text[0] == '\'' || text[0] == '.') {
|
||||||
|
out(w, "\\&")
|
||||||
|
}
|
||||||
|
|
||||||
|
// directly copy normal characters
|
||||||
|
org := i
|
||||||
|
|
||||||
|
for i < len(text) && text[i] != '\\' {
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
if i > org {
|
||||||
|
w.Write(text[org:i]) // nolint: errcheck
|
||||||
|
}
|
||||||
|
|
||||||
|
// escape a character
|
||||||
|
if i >= len(text) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Write([]byte{'\\', text[i]}) // nolint: errcheck
|
||||||
|
}
|
||||||
|
}
|
15
vendor/github.com/davecgh/go-spew/LICENSE
generated
vendored
Normal file
15
vendor/github.com/davecgh/go-spew/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
ISC License
|
||||||
|
|
||||||
|
Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
|
||||||
|
|
||||||
|
Permission to use, copy, modify, and/or distribute this software for any
|
||||||
|
purpose with or without fee is hereby granted, provided that the above
|
||||||
|
copyright notice and this permission notice appear in all copies.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||||
|
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||||
|
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||||
|
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||||
|
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||||
|
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||||
|
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
145
vendor/github.com/davecgh/go-spew/spew/bypass.go
generated
vendored
Normal file
145
vendor/github.com/davecgh/go-spew/spew/bypass.go
generated
vendored
Normal file
|
@ -0,0 +1,145 @@
|
||||||
|
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
|
||||||
|
//
|
||||||
|
// Permission to use, copy, modify, and distribute this software for any
|
||||||
|
// purpose with or without fee is hereby granted, provided that the above
|
||||||
|
// copyright notice and this permission notice appear in all copies.
|
||||||
|
//
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||||
|
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||||
|
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||||
|
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||||
|
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||||
|
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||||
|
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
|
|
||||||
|
// NOTE: Due to the following build constraints, this file will only be compiled
|
||||||
|
// when the code is not running on Google App Engine, compiled by GopherJS, and
|
||||||
|
// "-tags safe" is not added to the go build command line. The "disableunsafe"
|
||||||
|
// tag is deprecated and thus should not be used.
|
||||||
|
// Go versions prior to 1.4 are disabled because they use a different layout
|
||||||
|
// for interfaces which make the implementation of unsafeReflectValue more complex.
|
||||||
|
// +build !js,!appengine,!safe,!disableunsafe,go1.4
|
||||||
|
|
||||||
|
package spew
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// UnsafeDisabled is a build-time constant which specifies whether or
|
||||||
|
// not access to the unsafe package is available.
|
||||||
|
UnsafeDisabled = false
|
||||||
|
|
||||||
|
// ptrSize is the size of a pointer on the current arch.
|
||||||
|
ptrSize = unsafe.Sizeof((*byte)(nil))
|
||||||
|
)
|
||||||
|
|
||||||
|
type flag uintptr
|
||||||
|
|
||||||
|
var (
|
||||||
|
// flagRO indicates whether the value field of a reflect.Value
|
||||||
|
// is read-only.
|
||||||
|
flagRO flag
|
||||||
|
|
||||||
|
// flagAddr indicates whether the address of the reflect.Value's
|
||||||
|
// value may be taken.
|
||||||
|
flagAddr flag
|
||||||
|
)
|
||||||
|
|
||||||
|
// flagKindMask holds the bits that make up the kind
|
||||||
|
// part of the flags field. In all the supported versions,
|
||||||
|
// it is in the lower 5 bits.
|
||||||
|
const flagKindMask = flag(0x1f)
|
||||||
|
|
||||||
|
// Different versions of Go have used different
|
||||||
|
// bit layouts for the flags type. This table
|
||||||
|
// records the known combinations.
|
||||||
|
var okFlags = []struct {
|
||||||
|
ro, addr flag
|
||||||
|
}{{
|
||||||
|
// From Go 1.4 to 1.5
|
||||||
|
ro: 1 << 5,
|
||||||
|
addr: 1 << 7,
|
||||||
|
}, {
|
||||||
|
// Up to Go tip.
|
||||||
|
ro: 1<<5 | 1<<6,
|
||||||
|
addr: 1 << 8,
|
||||||
|
}}
|
||||||
|
|
||||||
|
var flagValOffset = func() uintptr {
|
||||||
|
field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
|
||||||
|
if !ok {
|
||||||
|
panic("reflect.Value has no flag field")
|
||||||
|
}
|
||||||
|
return field.Offset
|
||||||
|
}()
|
||||||
|
|
||||||
|
// flagField returns a pointer to the flag field of a reflect.Value.
|
||||||
|
func flagField(v *reflect.Value) *flag {
|
||||||
|
return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))
|
||||||
|
}
|
||||||
|
|
||||||
|
// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
|
||||||
|
// the typical safety restrictions preventing access to unaddressable and
|
||||||
|
// unexported data. It works by digging the raw pointer to the underlying
|
||||||
|
// value out of the protected value and generating a new unprotected (unsafe)
|
||||||
|
// reflect.Value to it.
|
||||||
|
//
|
||||||
|
// This allows us to check for implementations of the Stringer and error
|
||||||
|
// interfaces to be used for pretty printing ordinarily unaddressable and
|
||||||
|
// inaccessible values such as unexported struct fields.
|
||||||
|
func unsafeReflectValue(v reflect.Value) reflect.Value {
|
||||||
|
if !v.IsValid() || (v.CanInterface() && v.CanAddr()) {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
flagFieldPtr := flagField(&v)
|
||||||
|
*flagFieldPtr &^= flagRO
|
||||||
|
*flagFieldPtr |= flagAddr
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sanity checks against future reflect package changes
|
||||||
|
// to the type or semantics of the Value.flag field.
|
||||||
|
func init() {
|
||||||
|
field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
|
||||||
|
if !ok {
|
||||||
|
panic("reflect.Value has no flag field")
|
||||||
|
}
|
||||||
|
if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {
|
||||||
|
panic("reflect.Value flag field has changed kind")
|
||||||
|
}
|
||||||
|
type t0 int
|
||||||
|
var t struct {
|
||||||
|
A t0
|
||||||
|
// t0 will have flagEmbedRO set.
|
||||||
|
t0
|
||||||
|
// a will have flagStickyRO set
|
||||||
|
a t0
|
||||||
|
}
|
||||||
|
vA := reflect.ValueOf(t).FieldByName("A")
|
||||||
|
va := reflect.ValueOf(t).FieldByName("a")
|
||||||
|
vt0 := reflect.ValueOf(t).FieldByName("t0")
|
||||||
|
|
||||||
|
// Infer flagRO from the difference between the flags
|
||||||
|
// for the (otherwise identical) fields in t.
|
||||||
|
flagPublic := *flagField(&vA)
|
||||||
|
flagWithRO := *flagField(&va) | *flagField(&vt0)
|
||||||
|
flagRO = flagPublic ^ flagWithRO
|
||||||
|
|
||||||
|
// Infer flagAddr from the difference between a value
|
||||||
|
// taken from a pointer and not.
|
||||||
|
vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
|
||||||
|
flagNoPtr := *flagField(&vA)
|
||||||
|
flagPtr := *flagField(&vPtrA)
|
||||||
|
flagAddr = flagNoPtr ^ flagPtr
|
||||||
|
|
||||||
|
// Check that the inferred flags tally with one of the known versions.
|
||||||
|
for _, f := range okFlags {
|
||||||
|
if flagRO == f.ro && flagAddr == f.addr {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
panic("reflect.Value read-only flag has changed semantics")
|
||||||
|
}
|
38
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
generated
vendored
Normal file
38
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
generated
vendored
Normal file
|
@ -0,0 +1,38 @@
|
||||||
|
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
|
||||||
|
//
|
||||||
|
// Permission to use, copy, modify, and distribute this software for any
|
||||||
|
// purpose with or without fee is hereby granted, provided that the above
|
||||||
|
// copyright notice and this permission notice appear in all copies.
|
||||||
|
//
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||||
|
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||||
|
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||||
|
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||||
|
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||||
|
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||||
|
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
|
|
||||||
|
// NOTE: Due to the following build constraints, this file will only be compiled
|
||||||
|
// when the code is running on Google App Engine, compiled by GopherJS, or
|
||||||
|
// "-tags safe" is added to the go build command line. The "disableunsafe"
|
||||||
|
// tag is deprecated and thus should not be used.
|
||||||
|
// +build js appengine safe disableunsafe !go1.4
|
||||||
|
|
||||||
|
package spew
|
||||||
|
|
||||||
|
import "reflect"
|
||||||
|
|
||||||
|
const (
|
||||||
|
// UnsafeDisabled is a build-time constant which specifies whether or
|
||||||
|
// not access to the unsafe package is available.
|
||||||
|
UnsafeDisabled = true
|
||||||
|
)
|
||||||
|
|
||||||
|
// unsafeReflectValue typically converts the passed reflect.Value into a one
|
||||||
|
// that bypasses the typical safety restrictions preventing access to
|
||||||
|
// unaddressable and unexported data. However, doing this relies on access to
|
||||||
|
// the unsafe package. This is a stub version which simply returns the passed
|
||||||
|
// reflect.Value when the unsafe package is not available.
|
||||||
|
func unsafeReflectValue(v reflect.Value) reflect.Value {
|
||||||
|
return v
|
||||||
|
}
|
341
vendor/github.com/davecgh/go-spew/spew/common.go
generated
vendored
Normal file
341
vendor/github.com/davecgh/go-spew/spew/common.go
generated
vendored
Normal file
|
@ -0,0 +1,341 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||||
|
*
|
||||||
|
* Permission to use, copy, modify, and distribute this software for any
|
||||||
|
* purpose with or without fee is hereby granted, provided that the above
|
||||||
|
* copyright notice and this permission notice appear in all copies.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||||
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||||
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||||
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||||
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||||
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||||
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package spew
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Some constants in the form of bytes to avoid string overhead. This mirrors
|
||||||
|
// the technique used in the fmt package.
|
||||||
|
var (
|
||||||
|
panicBytes = []byte("(PANIC=")
|
||||||
|
plusBytes = []byte("+")
|
||||||
|
iBytes = []byte("i")
|
||||||
|
trueBytes = []byte("true")
|
||||||
|
falseBytes = []byte("false")
|
||||||
|
interfaceBytes = []byte("(interface {})")
|
||||||
|
commaNewlineBytes = []byte(",\n")
|
||||||
|
newlineBytes = []byte("\n")
|
||||||
|
openBraceBytes = []byte("{")
|
||||||
|
openBraceNewlineBytes = []byte("{\n")
|
||||||
|
closeBraceBytes = []byte("}")
|
||||||
|
asteriskBytes = []byte("*")
|
||||||
|
colonBytes = []byte(":")
|
||||||
|
colonSpaceBytes = []byte(": ")
|
||||||
|
openParenBytes = []byte("(")
|
||||||
|
closeParenBytes = []byte(")")
|
||||||
|
spaceBytes = []byte(" ")
|
||||||
|
pointerChainBytes = []byte("->")
|
||||||
|
nilAngleBytes = []byte("<nil>")
|
||||||
|
maxNewlineBytes = []byte("<max depth reached>\n")
|
||||||
|
maxShortBytes = []byte("<max>")
|
||||||
|
circularBytes = []byte("<already shown>")
|
||||||
|
circularShortBytes = []byte("<shown>")
|
||||||
|
invalidAngleBytes = []byte("<invalid>")
|
||||||
|
openBracketBytes = []byte("[")
|
||||||
|
closeBracketBytes = []byte("]")
|
||||||
|
percentBytes = []byte("%")
|
||||||
|
precisionBytes = []byte(".")
|
||||||
|
openAngleBytes = []byte("<")
|
||||||
|
closeAngleBytes = []byte(">")
|
||||||
|
openMapBytes = []byte("map[")
|
||||||
|
closeMapBytes = []byte("]")
|
||||||
|
lenEqualsBytes = []byte("len=")
|
||||||
|
capEqualsBytes = []byte("cap=")
|
||||||
|
)
|
||||||
|
|
||||||
|
// hexDigits is used to map a decimal value to a hex digit.
|
||||||
|
var hexDigits = "0123456789abcdef"
|
||||||
|
|
||||||
|
// catchPanic handles any panics that might occur during the handleMethods
|
||||||
|
// calls.
|
||||||
|
func catchPanic(w io.Writer, v reflect.Value) {
|
||||||
|
if err := recover(); err != nil {
|
||||||
|
w.Write(panicBytes)
|
||||||
|
fmt.Fprintf(w, "%v", err)
|
||||||
|
w.Write(closeParenBytes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleMethods attempts to call the Error and String methods on the underlying
|
||||||
|
// type the passed reflect.Value represents and outputes the result to Writer w.
|
||||||
|
//
|
||||||
|
// It handles panics in any called methods by catching and displaying the error
|
||||||
|
// as the formatted value.
|
||||||
|
func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
|
||||||
|
// We need an interface to check if the type implements the error or
|
||||||
|
// Stringer interface. However, the reflect package won't give us an
|
||||||
|
// interface on certain things like unexported struct fields in order
|
||||||
|
// to enforce visibility rules. We use unsafe, when it's available,
|
||||||
|
// to bypass these restrictions since this package does not mutate the
|
||||||
|
// values.
|
||||||
|
if !v.CanInterface() {
|
||||||
|
if UnsafeDisabled {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
v = unsafeReflectValue(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Choose whether or not to do error and Stringer interface lookups against
|
||||||
|
// the base type or a pointer to the base type depending on settings.
|
||||||
|
// Technically calling one of these methods with a pointer receiver can
|
||||||
|
// mutate the value, however, types which choose to satisify an error or
|
||||||
|
// Stringer interface with a pointer receiver should not be mutating their
|
||||||
|
// state inside these interface methods.
|
||||||
|
if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
|
||||||
|
v = unsafeReflectValue(v)
|
||||||
|
}
|
||||||
|
if v.CanAddr() {
|
||||||
|
v = v.Addr()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Is it an error or Stringer?
|
||||||
|
switch iface := v.Interface().(type) {
|
||||||
|
case error:
|
||||||
|
defer catchPanic(w, v)
|
||||||
|
if cs.ContinueOnMethod {
|
||||||
|
w.Write(openParenBytes)
|
||||||
|
w.Write([]byte(iface.Error()))
|
||||||
|
w.Write(closeParenBytes)
|
||||||
|
w.Write(spaceBytes)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Write([]byte(iface.Error()))
|
||||||
|
return true
|
||||||
|
|
||||||
|
case fmt.Stringer:
|
||||||
|
defer catchPanic(w, v)
|
||||||
|
if cs.ContinueOnMethod {
|
||||||
|
w.Write(openParenBytes)
|
||||||
|
w.Write([]byte(iface.String()))
|
||||||
|
w.Write(closeParenBytes)
|
||||||
|
w.Write(spaceBytes)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
w.Write([]byte(iface.String()))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// printBool outputs a boolean value as true or false to Writer w.
|
||||||
|
func printBool(w io.Writer, val bool) {
|
||||||
|
if val {
|
||||||
|
w.Write(trueBytes)
|
||||||
|
} else {
|
||||||
|
w.Write(falseBytes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// printInt outputs a signed integer value to Writer w.
|
||||||
|
func printInt(w io.Writer, val int64, base int) {
|
||||||
|
w.Write([]byte(strconv.FormatInt(val, base)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// printUint outputs an unsigned integer value to Writer w.
|
||||||
|
func printUint(w io.Writer, val uint64, base int) {
|
||||||
|
w.Write([]byte(strconv.FormatUint(val, base)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// printFloat outputs a floating point value using the specified precision,
|
||||||
|
// which is expected to be 32 or 64bit, to Writer w.
|
||||||
|
func printFloat(w io.Writer, val float64, precision int) {
|
||||||
|
w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// printComplex outputs a complex value using the specified float precision
|
||||||
|
// for the real and imaginary parts to Writer w.
|
||||||
|
func printComplex(w io.Writer, c complex128, floatPrecision int) {
|
||||||
|
r := real(c)
|
||||||
|
w.Write(openParenBytes)
|
||||||
|
w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
|
||||||
|
i := imag(c)
|
||||||
|
if i >= 0 {
|
||||||
|
w.Write(plusBytes)
|
||||||
|
}
|
||||||
|
w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
|
||||||
|
w.Write(iBytes)
|
||||||
|
w.Write(closeParenBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
|
||||||
|
// prefix to Writer w.
|
||||||
|
func printHexPtr(w io.Writer, p uintptr) {
|
||||||
|
// Null pointer.
|
||||||
|
num := uint64(p)
|
||||||
|
if num == 0 {
|
||||||
|
w.Write(nilAngleBytes)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
|
||||||
|
buf := make([]byte, 18)
|
||||||
|
|
||||||
|
// It's simpler to construct the hex string right to left.
|
||||||
|
base := uint64(16)
|
||||||
|
i := len(buf) - 1
|
||||||
|
for num >= base {
|
||||||
|
buf[i] = hexDigits[num%base]
|
||||||
|
num /= base
|
||||||
|
i--
|
||||||
|
}
|
||||||
|
buf[i] = hexDigits[num]
|
||||||
|
|
||||||
|
// Add '0x' prefix.
|
||||||
|
i--
|
||||||
|
buf[i] = 'x'
|
||||||
|
i--
|
||||||
|
buf[i] = '0'
|
||||||
|
|
||||||
|
// Strip unused leading bytes.
|
||||||
|
buf = buf[i:]
|
||||||
|
w.Write(buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
// valuesSorter implements sort.Interface to allow a slice of reflect.Value
|
||||||
|
// elements to be sorted.
|
||||||
|
type valuesSorter struct {
|
||||||
|
values []reflect.Value
|
||||||
|
strings []string // either nil or same len and values
|
||||||
|
cs *ConfigState
|
||||||
|
}
|
||||||
|
|
||||||
|
// newValuesSorter initializes a valuesSorter instance, which holds a set of
|
||||||
|
// surrogate keys on which the data should be sorted. It uses flags in
|
||||||
|
// ConfigState to decide if and how to populate those surrogate keys.
|
||||||
|
func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
|
||||||
|
vs := &valuesSorter{values: values, cs: cs}
|
||||||
|
if canSortSimply(vs.values[0].Kind()) {
|
||||||
|
return vs
|
||||||
|
}
|
||||||
|
if !cs.DisableMethods {
|
||||||
|
vs.strings = make([]string, len(values))
|
||||||
|
for i := range vs.values {
|
||||||
|
b := bytes.Buffer{}
|
||||||
|
if !handleMethods(cs, &b, vs.values[i]) {
|
||||||
|
vs.strings = nil
|
||||||
|
break
|
||||||
|
}
|
||||||
|
vs.strings[i] = b.String()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if vs.strings == nil && cs.SpewKeys {
|
||||||
|
vs.strings = make([]string, len(values))
|
||||||
|
for i := range vs.values {
|
||||||
|
vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return vs
|
||||||
|
}
|
||||||
|
|
||||||
|
// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
|
||||||
|
// directly, or whether it should be considered for sorting by surrogate keys
|
||||||
|
// (if the ConfigState allows it).
|
||||||
|
func canSortSimply(kind reflect.Kind) bool {
|
||||||
|
// This switch parallels valueSortLess, except for the default case.
|
||||||
|
switch kind {
|
||||||
|
case reflect.Bool:
|
||||||
|
return true
|
||||||
|
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||||
|
return true
|
||||||
|
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||||
|
return true
|
||||||
|
case reflect.Float32, reflect.Float64:
|
||||||
|
return true
|
||||||
|
case reflect.String:
|
||||||
|
return true
|
||||||
|
case reflect.Uintptr:
|
||||||
|
return true
|
||||||
|
case reflect.Array:
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len returns the number of values in the slice. It is part of the
|
||||||
|
// sort.Interface implementation.
|
||||||
|
func (s *valuesSorter) Len() int {
|
||||||
|
return len(s.values)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Swap swaps the values at the passed indices. It is part of the
|
||||||
|
// sort.Interface implementation.
|
||||||
|
func (s *valuesSorter) Swap(i, j int) {
|
||||||
|
s.values[i], s.values[j] = s.values[j], s.values[i]
|
||||||
|
if s.strings != nil {
|
||||||
|
s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// valueSortLess returns whether the first value should sort before the second
|
||||||
|
// value. It is used by valueSorter.Less as part of the sort.Interface
|
||||||
|
// implementation.
|
||||||
|
func valueSortLess(a, b reflect.Value) bool {
|
||||||
|
switch a.Kind() {
|
||||||
|
case reflect.Bool:
|
||||||
|
return !a.Bool() && b.Bool()
|
||||||
|
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||||
|
return a.Int() < b.Int()
|
||||||
|
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||||
|
return a.Uint() < b.Uint()
|
||||||
|
case reflect.Float32, reflect.Float64:
|
||||||
|
return a.Float() < b.Float()
|
||||||
|
case reflect.String:
|
||||||
|
return a.String() < b.String()
|
||||||
|
case reflect.Uintptr:
|
||||||
|
return a.Uint() < b.Uint()
|
||||||
|
case reflect.Array:
|
||||||
|
// Compare the contents of both arrays.
|
||||||
|
l := a.Len()
|
||||||
|
for i := 0; i < l; i++ {
|
||||||
|
av := a.Index(i)
|
||||||
|
bv := b.Index(i)
|
||||||
|
if av.Interface() == bv.Interface() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return valueSortLess(av, bv)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return a.String() < b.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Less returns whether the value at index i should sort before the
|
||||||
|
// value at index j. It is part of the sort.Interface implementation.
|
||||||
|
func (s *valuesSorter) Less(i, j int) bool {
|
||||||
|
if s.strings == nil {
|
||||||
|
return valueSortLess(s.values[i], s.values[j])
|
||||||
|
}
|
||||||
|
return s.strings[i] < s.strings[j]
|
||||||
|
}
|
||||||
|
|
||||||
|
// sortValues is a sort function that handles both native types and any type that
|
||||||
|
// can be converted to error or Stringer. Other inputs are sorted according to
|
||||||
|
// their Value.String() value to ensure display stability.
|
||||||
|
func sortValues(values []reflect.Value, cs *ConfigState) {
|
||||||
|
if len(values) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
sort.Sort(newValuesSorter(values, cs))
|
||||||
|
}
|
306
vendor/github.com/davecgh/go-spew/spew/config.go
generated
vendored
Normal file
306
vendor/github.com/davecgh/go-spew/spew/config.go
generated
vendored
Normal file
|
@ -0,0 +1,306 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||||
|
*
|
||||||
|
* Permission to use, copy, modify, and distribute this software for any
|
||||||
|
* purpose with or without fee is hereby granted, provided that the above
|
||||||
|
* copyright notice and this permission notice appear in all copies.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||||
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||||
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||||
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||||
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||||
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||||
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package spew
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ConfigState houses the configuration options used by spew to format and
|
||||||
|
// display values. There is a global instance, Config, that is used to control
|
||||||
|
// all top-level Formatter and Dump functionality. Each ConfigState instance
|
||||||
|
// provides methods equivalent to the top-level functions.
|
||||||
|
//
|
||||||
|
// The zero value for ConfigState provides no indentation. You would typically
|
||||||
|
// want to set it to a space or a tab.
|
||||||
|
//
|
||||||
|
// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
|
||||||
|
// with default settings. See the documentation of NewDefaultConfig for default
|
||||||
|
// values.
|
||||||
|
type ConfigState struct {
|
||||||
|
// Indent specifies the string to use for each indentation level. The
|
||||||
|
// global config instance that all top-level functions use set this to a
|
||||||
|
// single space by default. If you would like more indentation, you might
|
||||||
|
// set this to a tab with "\t" or perhaps two spaces with " ".
|
||||||
|
Indent string
|
||||||
|
|
||||||
|
// MaxDepth controls the maximum number of levels to descend into nested
|
||||||
|
// data structures. The default, 0, means there is no limit.
|
||||||
|
//
|
||||||
|
// NOTE: Circular data structures are properly detected, so it is not
|
||||||
|
// necessary to set this value unless you specifically want to limit deeply
|
||||||
|
// nested data structures.
|
||||||
|
MaxDepth int
|
||||||
|
|
||||||
|
// DisableMethods specifies whether or not error and Stringer interfaces are
|
||||||
|
// invoked for types that implement them.
|
||||||
|
DisableMethods bool
|
||||||
|
|
||||||
|
// DisablePointerMethods specifies whether or not to check for and invoke
|
||||||
|
// error and Stringer interfaces on types which only accept a pointer
|
||||||
|
// receiver when the current type is not a pointer.
|
||||||
|
//
|
||||||
|
// NOTE: This might be an unsafe action since calling one of these methods
|
||||||
|
// with a pointer receiver could technically mutate the value, however,
|
||||||
|
// in practice, types which choose to satisify an error or Stringer
|
||||||
|
// interface with a pointer receiver should not be mutating their state
|
||||||
|
// inside these interface methods. As a result, this option relies on
|
||||||
|
// access to the unsafe package, so it will not have any effect when
|
||||||
|
// running in environments without access to the unsafe package such as
|
||||||
|
// Google App Engine or with the "safe" build tag specified.
|
||||||
|
DisablePointerMethods bool
|
||||||
|
|
||||||
|
// DisablePointerAddresses specifies whether to disable the printing of
|
||||||
|
// pointer addresses. This is useful when diffing data structures in tests.
|
||||||
|
DisablePointerAddresses bool
|
||||||
|
|
||||||
|
// DisableCapacities specifies whether to disable the printing of capacities
|
||||||
|
// for arrays, slices, maps and channels. This is useful when diffing
|
||||||
|
// data structures in tests.
|
||||||
|
DisableCapacities bool
|
||||||
|
|
||||||
|
// ContinueOnMethod specifies whether or not recursion should continue once
|
||||||
|
// a custom error or Stringer interface is invoked. The default, false,
|
||||||
|
// means it will print the results of invoking the custom error or Stringer
|
||||||
|
// interface and return immediately instead of continuing to recurse into
|
||||||
|
// the internals of the data type.
|
||||||
|
//
|
||||||
|
// NOTE: This flag does not have any effect if method invocation is disabled
|
||||||
|
// via the DisableMethods or DisablePointerMethods options.
|
||||||
|
ContinueOnMethod bool
|
||||||
|
|
||||||
|
// SortKeys specifies map keys should be sorted before being printed. Use
|
||||||
|
// this to have a more deterministic, diffable output. Note that only
|
||||||
|
// native types (bool, int, uint, floats, uintptr and string) and types
|
||||||
|
// that support the error or Stringer interfaces (if methods are
|
||||||
|
// enabled) are supported, with other types sorted according to the
|
||||||
|
// reflect.Value.String() output which guarantees display stability.
|
||||||
|
SortKeys bool
|
||||||
|
|
||||||
|
// SpewKeys specifies that, as a last resort attempt, map keys should
|
||||||
|
// be spewed to strings and sorted by those strings. This is only
|
||||||
|
// considered if SortKeys is true.
|
||||||
|
SpewKeys bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Config is the active configuration of the top-level functions.
|
||||||
|
// The configuration can be changed by modifying the contents of spew.Config.
|
||||||
|
var Config = ConfigState{Indent: " "}
|
||||||
|
|
||||||
|
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
|
||||||
|
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||||
|
// the formatted string as a value that satisfies error. See NewFormatter
|
||||||
|
// for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
|
||||||
|
func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
|
||||||
|
return fmt.Errorf(format, c.convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
|
||||||
|
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||||
|
// the number of bytes written and any write error encountered. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
|
||||||
|
func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
|
||||||
|
return fmt.Fprint(w, c.convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
|
||||||
|
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||||
|
// the number of bytes written and any write error encountered. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
|
||||||
|
func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
|
||||||
|
return fmt.Fprintf(w, format, c.convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
|
||||||
|
// passed with a Formatter interface returned by c.NewFormatter. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
|
||||||
|
func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
|
||||||
|
return fmt.Fprintln(w, c.convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Print is a wrapper for fmt.Print that treats each argument as if it were
|
||||||
|
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||||
|
// the number of bytes written and any write error encountered. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
|
||||||
|
func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
|
||||||
|
return fmt.Print(c.convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
|
||||||
|
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||||
|
// the number of bytes written and any write error encountered. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
|
||||||
|
func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
|
||||||
|
return fmt.Printf(format, c.convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Println is a wrapper for fmt.Println that treats each argument as if it were
|
||||||
|
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||||
|
// the number of bytes written and any write error encountered. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
|
||||||
|
func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
|
||||||
|
return fmt.Println(c.convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
|
||||||
|
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||||
|
// the resulting string. See NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
|
||||||
|
func (c *ConfigState) Sprint(a ...interface{}) string {
|
||||||
|
return fmt.Sprint(c.convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
|
||||||
|
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||||
|
// the resulting string. See NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
|
||||||
|
func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
|
||||||
|
return fmt.Sprintf(format, c.convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
|
||||||
|
// were passed with a Formatter interface returned by c.NewFormatter. It
|
||||||
|
// returns the resulting string. See NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
|
||||||
|
func (c *ConfigState) Sprintln(a ...interface{}) string {
|
||||||
|
return fmt.Sprintln(c.convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
|
||||||
|
interface. As a result, it integrates cleanly with standard fmt package
|
||||||
|
printing functions. The formatter is useful for inline printing of smaller data
|
||||||
|
types similar to the standard %v format specifier.
|
||||||
|
|
||||||
|
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
||||||
|
addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
|
||||||
|
combinations. Any other verbs such as %x and %q will be sent to the the
|
||||||
|
standard fmt package for formatting. In addition, the custom formatter ignores
|
||||||
|
the width and precision arguments (however they will still work on the format
|
||||||
|
specifiers not handled by the custom formatter).
|
||||||
|
|
||||||
|
Typically this function shouldn't be called directly. It is much easier to make
|
||||||
|
use of the custom formatter by calling one of the convenience functions such as
|
||||||
|
c.Printf, c.Println, or c.Printf.
|
||||||
|
*/
|
||||||
|
func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
|
||||||
|
return newFormatter(c, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fdump formats and displays the passed arguments to io.Writer w. It formats
|
||||||
|
// exactly the same as Dump.
|
||||||
|
func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
|
||||||
|
fdump(c, w, a...)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Dump displays the passed parameters to standard out with newlines, customizable
|
||||||
|
indentation, and additional debug information such as complete types and all
|
||||||
|
pointer addresses used to indirect to the final value. It provides the
|
||||||
|
following features over the built-in printing facilities provided by the fmt
|
||||||
|
package:
|
||||||
|
|
||||||
|
* Pointers are dereferenced and followed
|
||||||
|
* Circular data structures are detected and handled properly
|
||||||
|
* Custom Stringer/error interfaces are optionally invoked, including
|
||||||
|
on unexported types
|
||||||
|
* Custom types which only implement the Stringer/error interfaces via
|
||||||
|
a pointer receiver are optionally invoked when passing non-pointer
|
||||||
|
variables
|
||||||
|
* Byte arrays and slices are dumped like the hexdump -C command which
|
||||||
|
includes offsets, byte values in hex, and ASCII output
|
||||||
|
|
||||||
|
The configuration options are controlled by modifying the public members
|
||||||
|
of c. See ConfigState for options documentation.
|
||||||
|
|
||||||
|
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
|
||||||
|
get the formatted result as a string.
|
||||||
|
*/
|
||||||
|
func (c *ConfigState) Dump(a ...interface{}) {
|
||||||
|
fdump(c, os.Stdout, a...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sdump returns a string with the passed arguments formatted exactly the same
|
||||||
|
// as Dump.
|
||||||
|
func (c *ConfigState) Sdump(a ...interface{}) string {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
fdump(c, &buf, a...)
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// convertArgs accepts a slice of arguments and returns a slice of the same
|
||||||
|
// length with each argument converted to a spew Formatter interface using
|
||||||
|
// the ConfigState associated with s.
|
||||||
|
func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
|
||||||
|
formatters = make([]interface{}, len(args))
|
||||||
|
for index, arg := range args {
|
||||||
|
formatters[index] = newFormatter(c, arg)
|
||||||
|
}
|
||||||
|
return formatters
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDefaultConfig returns a ConfigState with the following default settings.
|
||||||
|
//
|
||||||
|
// Indent: " "
|
||||||
|
// MaxDepth: 0
|
||||||
|
// DisableMethods: false
|
||||||
|
// DisablePointerMethods: false
|
||||||
|
// ContinueOnMethod: false
|
||||||
|
// SortKeys: false
|
||||||
|
func NewDefaultConfig() *ConfigState {
|
||||||
|
return &ConfigState{Indent: " "}
|
||||||
|
}
|
211
vendor/github.com/davecgh/go-spew/spew/doc.go
generated
vendored
Normal file
211
vendor/github.com/davecgh/go-spew/spew/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,211 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||||
|
*
|
||||||
|
* Permission to use, copy, modify, and distribute this software for any
|
||||||
|
* purpose with or without fee is hereby granted, provided that the above
|
||||||
|
* copyright notice and this permission notice appear in all copies.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||||
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||||
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||||
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||||
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||||
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||||
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package spew implements a deep pretty printer for Go data structures to aid in
|
||||||
|
debugging.
|
||||||
|
|
||||||
|
A quick overview of the additional features spew provides over the built-in
|
||||||
|
printing facilities for Go data types are as follows:
|
||||||
|
|
||||||
|
* Pointers are dereferenced and followed
|
||||||
|
* Circular data structures are detected and handled properly
|
||||||
|
* Custom Stringer/error interfaces are optionally invoked, including
|
||||||
|
on unexported types
|
||||||
|
* Custom types which only implement the Stringer/error interfaces via
|
||||||
|
a pointer receiver are optionally invoked when passing non-pointer
|
||||||
|
variables
|
||||||
|
* Byte arrays and slices are dumped like the hexdump -C command which
|
||||||
|
includes offsets, byte values in hex, and ASCII output (only when using
|
||||||
|
Dump style)
|
||||||
|
|
||||||
|
There are two different approaches spew allows for dumping Go data structures:
|
||||||
|
|
||||||
|
* Dump style which prints with newlines, customizable indentation,
|
||||||
|
and additional debug information such as types and all pointer addresses
|
||||||
|
used to indirect to the final value
|
||||||
|
* A custom Formatter interface that integrates cleanly with the standard fmt
|
||||||
|
package and replaces %v, %+v, %#v, and %#+v to provide inline printing
|
||||||
|
similar to the default %v while providing the additional functionality
|
||||||
|
outlined above and passing unsupported format verbs such as %x and %q
|
||||||
|
along to fmt
|
||||||
|
|
||||||
|
Quick Start
|
||||||
|
|
||||||
|
This section demonstrates how to quickly get started with spew. See the
|
||||||
|
sections below for further details on formatting and configuration options.
|
||||||
|
|
||||||
|
To dump a variable with full newlines, indentation, type, and pointer
|
||||||
|
information use Dump, Fdump, or Sdump:
|
||||||
|
spew.Dump(myVar1, myVar2, ...)
|
||||||
|
spew.Fdump(someWriter, myVar1, myVar2, ...)
|
||||||
|
str := spew.Sdump(myVar1, myVar2, ...)
|
||||||
|
|
||||||
|
Alternatively, if you would prefer to use format strings with a compacted inline
|
||||||
|
printing style, use the convenience wrappers Printf, Fprintf, etc with
|
||||||
|
%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
|
||||||
|
%#+v (adds types and pointer addresses):
|
||||||
|
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||||
|
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||||
|
spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||||
|
spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||||
|
|
||||||
|
Configuration Options
|
||||||
|
|
||||||
|
Configuration of spew is handled by fields in the ConfigState type. For
|
||||||
|
convenience, all of the top-level functions use a global state available
|
||||||
|
via the spew.Config global.
|
||||||
|
|
||||||
|
It is also possible to create a ConfigState instance that provides methods
|
||||||
|
equivalent to the top-level functions. This allows concurrent configuration
|
||||||
|
options. See the ConfigState documentation for more details.
|
||||||
|
|
||||||
|
The following configuration options are available:
|
||||||
|
* Indent
|
||||||
|
String to use for each indentation level for Dump functions.
|
||||||
|
It is a single space by default. A popular alternative is "\t".
|
||||||
|
|
||||||
|
* MaxDepth
|
||||||
|
Maximum number of levels to descend into nested data structures.
|
||||||
|
There is no limit by default.
|
||||||
|
|
||||||
|
* DisableMethods
|
||||||
|
Disables invocation of error and Stringer interface methods.
|
||||||
|
Method invocation is enabled by default.
|
||||||
|
|
||||||
|
* DisablePointerMethods
|
||||||
|
Disables invocation of error and Stringer interface methods on types
|
||||||
|
which only accept pointer receivers from non-pointer variables.
|
||||||
|
Pointer method invocation is enabled by default.
|
||||||
|
|
||||||
|
* DisablePointerAddresses
|
||||||
|
DisablePointerAddresses specifies whether to disable the printing of
|
||||||
|
pointer addresses. This is useful when diffing data structures in tests.
|
||||||
|
|
||||||
|
* DisableCapacities
|
||||||
|
DisableCapacities specifies whether to disable the printing of
|
||||||
|
capacities for arrays, slices, maps and channels. This is useful when
|
||||||
|
diffing data structures in tests.
|
||||||
|
|
||||||
|
* ContinueOnMethod
|
||||||
|
Enables recursion into types after invoking error and Stringer interface
|
||||||
|
methods. Recursion after method invocation is disabled by default.
|
||||||
|
|
||||||
|
* SortKeys
|
||||||
|
Specifies map keys should be sorted before being printed. Use
|
||||||
|
this to have a more deterministic, diffable output. Note that
|
||||||
|
only native types (bool, int, uint, floats, uintptr and string)
|
||||||
|
and types which implement error or Stringer interfaces are
|
||||||
|
supported with other types sorted according to the
|
||||||
|
reflect.Value.String() output which guarantees display
|
||||||
|
stability. Natural map order is used by default.
|
||||||
|
|
||||||
|
* SpewKeys
|
||||||
|
Specifies that, as a last resort attempt, map keys should be
|
||||||
|
spewed to strings and sorted by those strings. This is only
|
||||||
|
considered if SortKeys is true.
|
||||||
|
|
||||||
|
Dump Usage
|
||||||
|
|
||||||
|
Simply call spew.Dump with a list of variables you want to dump:
|
||||||
|
|
||||||
|
spew.Dump(myVar1, myVar2, ...)
|
||||||
|
|
||||||
|
You may also call spew.Fdump if you would prefer to output to an arbitrary
|
||||||
|
io.Writer. For example, to dump to standard error:
|
||||||
|
|
||||||
|
spew.Fdump(os.Stderr, myVar1, myVar2, ...)
|
||||||
|
|
||||||
|
A third option is to call spew.Sdump to get the formatted output as a string:
|
||||||
|
|
||||||
|
str := spew.Sdump(myVar1, myVar2, ...)
|
||||||
|
|
||||||
|
Sample Dump Output
|
||||||
|
|
||||||
|
See the Dump example for details on the setup of the types and variables being
|
||||||
|
shown here.
|
||||||
|
|
||||||
|
(main.Foo) {
|
||||||
|
unexportedField: (*main.Bar)(0xf84002e210)({
|
||||||
|
flag: (main.Flag) flagTwo,
|
||||||
|
data: (uintptr) <nil>
|
||||||
|
}),
|
||||||
|
ExportedField: (map[interface {}]interface {}) (len=1) {
|
||||||
|
(string) (len=3) "one": (bool) true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
|
||||||
|
command as shown.
|
||||||
|
([]uint8) (len=32 cap=32) {
|
||||||
|
00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
|
||||||
|
00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
|
||||||
|
00000020 31 32 |12|
|
||||||
|
}
|
||||||
|
|
||||||
|
Custom Formatter
|
||||||
|
|
||||||
|
Spew provides a custom formatter that implements the fmt.Formatter interface
|
||||||
|
so that it integrates cleanly with standard fmt package printing functions. The
|
||||||
|
formatter is useful for inline printing of smaller data types similar to the
|
||||||
|
standard %v format specifier.
|
||||||
|
|
||||||
|
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
||||||
|
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
|
||||||
|
combinations. Any other verbs such as %x and %q will be sent to the the
|
||||||
|
standard fmt package for formatting. In addition, the custom formatter ignores
|
||||||
|
the width and precision arguments (however they will still work on the format
|
||||||
|
specifiers not handled by the custom formatter).
|
||||||
|
|
||||||
|
Custom Formatter Usage
|
||||||
|
|
||||||
|
The simplest way to make use of the spew custom formatter is to call one of the
|
||||||
|
convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
|
||||||
|
functions have syntax you are most likely already familiar with:
|
||||||
|
|
||||||
|
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||||
|
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||||
|
spew.Println(myVar, myVar2)
|
||||||
|
spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||||
|
spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||||
|
|
||||||
|
See the Index for the full list convenience functions.
|
||||||
|
|
||||||
|
Sample Formatter Output
|
||||||
|
|
||||||
|
Double pointer to a uint8:
|
||||||
|
%v: <**>5
|
||||||
|
%+v: <**>(0xf8400420d0->0xf8400420c8)5
|
||||||
|
%#v: (**uint8)5
|
||||||
|
%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
|
||||||
|
|
||||||
|
Pointer to circular struct with a uint8 field and a pointer to itself:
|
||||||
|
%v: <*>{1 <*><shown>}
|
||||||
|
%+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
|
||||||
|
%#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
|
||||||
|
%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
|
||||||
|
|
||||||
|
See the Printf example for details on the setup of variables being shown
|
||||||
|
here.
|
||||||
|
|
||||||
|
Errors
|
||||||
|
|
||||||
|
Since it is possible for custom Stringer/error interfaces to panic, spew
|
||||||
|
detects them and handles them internally by printing the panic information
|
||||||
|
inline with the output. Since spew is intended to provide deep pretty printing
|
||||||
|
capabilities on structures, it intentionally does not return any errors.
|
||||||
|
*/
|
||||||
|
package spew
|
509
vendor/github.com/davecgh/go-spew/spew/dump.go
generated
vendored
Normal file
509
vendor/github.com/davecgh/go-spew/spew/dump.go
generated
vendored
Normal file
|
@ -0,0 +1,509 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||||
|
*
|
||||||
|
* Permission to use, copy, modify, and distribute this software for any
|
||||||
|
* purpose with or without fee is hereby granted, provided that the above
|
||||||
|
* copyright notice and this permission notice appear in all copies.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||||
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||||
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||||
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||||
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||||
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||||
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package spew
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"reflect"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// uint8Type is a reflect.Type representing a uint8. It is used to
|
||||||
|
// convert cgo types to uint8 slices for hexdumping.
|
||||||
|
uint8Type = reflect.TypeOf(uint8(0))
|
||||||
|
|
||||||
|
// cCharRE is a regular expression that matches a cgo char.
|
||||||
|
// It is used to detect character arrays to hexdump them.
|
||||||
|
cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`)
|
||||||
|
|
||||||
|
// cUnsignedCharRE is a regular expression that matches a cgo unsigned
|
||||||
|
// char. It is used to detect unsigned character arrays to hexdump
|
||||||
|
// them.
|
||||||
|
cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`)
|
||||||
|
|
||||||
|
// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
|
||||||
|
// It is used to detect uint8_t arrays to hexdump them.
|
||||||
|
cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`)
|
||||||
|
)
|
||||||
|
|
||||||
|
// dumpState contains information about the state of a dump operation.
|
||||||
|
type dumpState struct {
|
||||||
|
w io.Writer
|
||||||
|
depth int
|
||||||
|
pointers map[uintptr]int
|
||||||
|
ignoreNextType bool
|
||||||
|
ignoreNextIndent bool
|
||||||
|
cs *ConfigState
|
||||||
|
}
|
||||||
|
|
||||||
|
// indent performs indentation according to the depth level and cs.Indent
|
||||||
|
// option.
|
||||||
|
func (d *dumpState) indent() {
|
||||||
|
if d.ignoreNextIndent {
|
||||||
|
d.ignoreNextIndent = false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
|
||||||
|
}
|
||||||
|
|
||||||
|
// unpackValue returns values inside of non-nil interfaces when possible.
|
||||||
|
// This is useful for data types like structs, arrays, slices, and maps which
|
||||||
|
// can contain varying types packed inside an interface.
|
||||||
|
func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
|
||||||
|
if v.Kind() == reflect.Interface && !v.IsNil() {
|
||||||
|
v = v.Elem()
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// dumpPtr handles formatting of pointers by indirecting them as necessary.
|
||||||
|
func (d *dumpState) dumpPtr(v reflect.Value) {
|
||||||
|
// Remove pointers at or below the current depth from map used to detect
|
||||||
|
// circular refs.
|
||||||
|
for k, depth := range d.pointers {
|
||||||
|
if depth >= d.depth {
|
||||||
|
delete(d.pointers, k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Keep list of all dereferenced pointers to show later.
|
||||||
|
pointerChain := make([]uintptr, 0)
|
||||||
|
|
||||||
|
// Figure out how many levels of indirection there are by dereferencing
|
||||||
|
// pointers and unpacking interfaces down the chain while detecting circular
|
||||||
|
// references.
|
||||||
|
nilFound := false
|
||||||
|
cycleFound := false
|
||||||
|
indirects := 0
|
||||||
|
ve := v
|
||||||
|
for ve.Kind() == reflect.Ptr {
|
||||||
|
if ve.IsNil() {
|
||||||
|
nilFound = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
indirects++
|
||||||
|
addr := ve.Pointer()
|
||||||
|
pointerChain = append(pointerChain, addr)
|
||||||
|
if pd, ok := d.pointers[addr]; ok && pd < d.depth {
|
||||||
|
cycleFound = true
|
||||||
|
indirects--
|
||||||
|
break
|
||||||
|
}
|
||||||
|
d.pointers[addr] = d.depth
|
||||||
|
|
||||||
|
ve = ve.Elem()
|
||||||
|
if ve.Kind() == reflect.Interface {
|
||||||
|
if ve.IsNil() {
|
||||||
|
nilFound = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
ve = ve.Elem()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Display type information.
|
||||||
|
d.w.Write(openParenBytes)
|
||||||
|
d.w.Write(bytes.Repeat(asteriskBytes, indirects))
|
||||||
|
d.w.Write([]byte(ve.Type().String()))
|
||||||
|
d.w.Write(closeParenBytes)
|
||||||
|
|
||||||
|
// Display pointer information.
|
||||||
|
if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
|
||||||
|
d.w.Write(openParenBytes)
|
||||||
|
for i, addr := range pointerChain {
|
||||||
|
if i > 0 {
|
||||||
|
d.w.Write(pointerChainBytes)
|
||||||
|
}
|
||||||
|
printHexPtr(d.w, addr)
|
||||||
|
}
|
||||||
|
d.w.Write(closeParenBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Display dereferenced value.
|
||||||
|
d.w.Write(openParenBytes)
|
||||||
|
switch {
|
||||||
|
case nilFound:
|
||||||
|
d.w.Write(nilAngleBytes)
|
||||||
|
|
||||||
|
case cycleFound:
|
||||||
|
d.w.Write(circularBytes)
|
||||||
|
|
||||||
|
default:
|
||||||
|
d.ignoreNextType = true
|
||||||
|
d.dump(ve)
|
||||||
|
}
|
||||||
|
d.w.Write(closeParenBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// dumpSlice handles formatting of arrays and slices. Byte (uint8 under
|
||||||
|
// reflection) arrays and slices are dumped in hexdump -C fashion.
|
||||||
|
func (d *dumpState) dumpSlice(v reflect.Value) {
|
||||||
|
// Determine whether this type should be hex dumped or not. Also,
|
||||||
|
// for types which should be hexdumped, try to use the underlying data
|
||||||
|
// first, then fall back to trying to convert them to a uint8 slice.
|
||||||
|
var buf []uint8
|
||||||
|
doConvert := false
|
||||||
|
doHexDump := false
|
||||||
|
numEntries := v.Len()
|
||||||
|
if numEntries > 0 {
|
||||||
|
vt := v.Index(0).Type()
|
||||||
|
vts := vt.String()
|
||||||
|
switch {
|
||||||
|
// C types that need to be converted.
|
||||||
|
case cCharRE.MatchString(vts):
|
||||||
|
fallthrough
|
||||||
|
case cUnsignedCharRE.MatchString(vts):
|
||||||
|
fallthrough
|
||||||
|
case cUint8tCharRE.MatchString(vts):
|
||||||
|
doConvert = true
|
||||||
|
|
||||||
|
// Try to use existing uint8 slices and fall back to converting
|
||||||
|
// and copying if that fails.
|
||||||
|
case vt.Kind() == reflect.Uint8:
|
||||||
|
// We need an addressable interface to convert the type
|
||||||
|
// to a byte slice. However, the reflect package won't
|
||||||
|
// give us an interface on certain things like
|
||||||
|
// unexported struct fields in order to enforce
|
||||||
|
// visibility rules. We use unsafe, when available, to
|
||||||
|
// bypass these restrictions since this package does not
|
||||||
|
// mutate the values.
|
||||||
|
vs := v
|
||||||
|
if !vs.CanInterface() || !vs.CanAddr() {
|
||||||
|
vs = unsafeReflectValue(vs)
|
||||||
|
}
|
||||||
|
if !UnsafeDisabled {
|
||||||
|
vs = vs.Slice(0, numEntries)
|
||||||
|
|
||||||
|
// Use the existing uint8 slice if it can be
|
||||||
|
// type asserted.
|
||||||
|
iface := vs.Interface()
|
||||||
|
if slice, ok := iface.([]uint8); ok {
|
||||||
|
buf = slice
|
||||||
|
doHexDump = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// The underlying data needs to be converted if it can't
|
||||||
|
// be type asserted to a uint8 slice.
|
||||||
|
doConvert = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy and convert the underlying type if needed.
|
||||||
|
if doConvert && vt.ConvertibleTo(uint8Type) {
|
||||||
|
// Convert and copy each element into a uint8 byte
|
||||||
|
// slice.
|
||||||
|
buf = make([]uint8, numEntries)
|
||||||
|
for i := 0; i < numEntries; i++ {
|
||||||
|
vv := v.Index(i)
|
||||||
|
buf[i] = uint8(vv.Convert(uint8Type).Uint())
|
||||||
|
}
|
||||||
|
doHexDump = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hexdump the entire slice as needed.
|
||||||
|
if doHexDump {
|
||||||
|
indent := strings.Repeat(d.cs.Indent, d.depth)
|
||||||
|
str := indent + hex.Dump(buf)
|
||||||
|
str = strings.Replace(str, "\n", "\n"+indent, -1)
|
||||||
|
str = strings.TrimRight(str, d.cs.Indent)
|
||||||
|
d.w.Write([]byte(str))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Recursively call dump for each item.
|
||||||
|
for i := 0; i < numEntries; i++ {
|
||||||
|
d.dump(d.unpackValue(v.Index(i)))
|
||||||
|
if i < (numEntries - 1) {
|
||||||
|
d.w.Write(commaNewlineBytes)
|
||||||
|
} else {
|
||||||
|
d.w.Write(newlineBytes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// dump is the main workhorse for dumping a value. It uses the passed reflect
|
||||||
|
// value to figure out what kind of object we are dealing with and formats it
|
||||||
|
// appropriately. It is a recursive function, however circular data structures
|
||||||
|
// are detected and handled properly.
|
||||||
|
func (d *dumpState) dump(v reflect.Value) {
|
||||||
|
// Handle invalid reflect values immediately.
|
||||||
|
kind := v.Kind()
|
||||||
|
if kind == reflect.Invalid {
|
||||||
|
d.w.Write(invalidAngleBytes)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle pointers specially.
|
||||||
|
if kind == reflect.Ptr {
|
||||||
|
d.indent()
|
||||||
|
d.dumpPtr(v)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Print type information unless already handled elsewhere.
|
||||||
|
if !d.ignoreNextType {
|
||||||
|
d.indent()
|
||||||
|
d.w.Write(openParenBytes)
|
||||||
|
d.w.Write([]byte(v.Type().String()))
|
||||||
|
d.w.Write(closeParenBytes)
|
||||||
|
d.w.Write(spaceBytes)
|
||||||
|
}
|
||||||
|
d.ignoreNextType = false
|
||||||
|
|
||||||
|
// Display length and capacity if the built-in len and cap functions
|
||||||
|
// work with the value's kind and the len/cap itself is non-zero.
|
||||||
|
valueLen, valueCap := 0, 0
|
||||||
|
switch v.Kind() {
|
||||||
|
case reflect.Array, reflect.Slice, reflect.Chan:
|
||||||
|
valueLen, valueCap = v.Len(), v.Cap()
|
||||||
|
case reflect.Map, reflect.String:
|
||||||
|
valueLen = v.Len()
|
||||||
|
}
|
||||||
|
if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
|
||||||
|
d.w.Write(openParenBytes)
|
||||||
|
if valueLen != 0 {
|
||||||
|
d.w.Write(lenEqualsBytes)
|
||||||
|
printInt(d.w, int64(valueLen), 10)
|
||||||
|
}
|
||||||
|
if !d.cs.DisableCapacities && valueCap != 0 {
|
||||||
|
if valueLen != 0 {
|
||||||
|
d.w.Write(spaceBytes)
|
||||||
|
}
|
||||||
|
d.w.Write(capEqualsBytes)
|
||||||
|
printInt(d.w, int64(valueCap), 10)
|
||||||
|
}
|
||||||
|
d.w.Write(closeParenBytes)
|
||||||
|
d.w.Write(spaceBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Call Stringer/error interfaces if they exist and the handle methods flag
|
||||||
|
// is enabled
|
||||||
|
if !d.cs.DisableMethods {
|
||||||
|
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
|
||||||
|
if handled := handleMethods(d.cs, d.w, v); handled {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch kind {
|
||||||
|
case reflect.Invalid:
|
||||||
|
// Do nothing. We should never get here since invalid has already
|
||||||
|
// been handled above.
|
||||||
|
|
||||||
|
case reflect.Bool:
|
||||||
|
printBool(d.w, v.Bool())
|
||||||
|
|
||||||
|
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||||
|
printInt(d.w, v.Int(), 10)
|
||||||
|
|
||||||
|
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||||
|
printUint(d.w, v.Uint(), 10)
|
||||||
|
|
||||||
|
case reflect.Float32:
|
||||||
|
printFloat(d.w, v.Float(), 32)
|
||||||
|
|
||||||
|
case reflect.Float64:
|
||||||
|
printFloat(d.w, v.Float(), 64)
|
||||||
|
|
||||||
|
case reflect.Complex64:
|
||||||
|
printComplex(d.w, v.Complex(), 32)
|
||||||
|
|
||||||
|
case reflect.Complex128:
|
||||||
|
printComplex(d.w, v.Complex(), 64)
|
||||||
|
|
||||||
|
case reflect.Slice:
|
||||||
|
if v.IsNil() {
|
||||||
|
d.w.Write(nilAngleBytes)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
fallthrough
|
||||||
|
|
||||||
|
case reflect.Array:
|
||||||
|
d.w.Write(openBraceNewlineBytes)
|
||||||
|
d.depth++
|
||||||
|
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
|
||||||
|
d.indent()
|
||||||
|
d.w.Write(maxNewlineBytes)
|
||||||
|
} else {
|
||||||
|
d.dumpSlice(v)
|
||||||
|
}
|
||||||
|
d.depth--
|
||||||
|
d.indent()
|
||||||
|
d.w.Write(closeBraceBytes)
|
||||||
|
|
||||||
|
case reflect.String:
|
||||||
|
d.w.Write([]byte(strconv.Quote(v.String())))
|
||||||
|
|
||||||
|
case reflect.Interface:
|
||||||
|
// The only time we should get here is for nil interfaces due to
|
||||||
|
// unpackValue calls.
|
||||||
|
if v.IsNil() {
|
||||||
|
d.w.Write(nilAngleBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
case reflect.Ptr:
|
||||||
|
// Do nothing. We should never get here since pointers have already
|
||||||
|
// been handled above.
|
||||||
|
|
||||||
|
case reflect.Map:
|
||||||
|
// nil maps should be indicated as different than empty maps
|
||||||
|
if v.IsNil() {
|
||||||
|
d.w.Write(nilAngleBytes)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
d.w.Write(openBraceNewlineBytes)
|
||||||
|
d.depth++
|
||||||
|
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
|
||||||
|
d.indent()
|
||||||
|
d.w.Write(maxNewlineBytes)
|
||||||
|
} else {
|
||||||
|
numEntries := v.Len()
|
||||||
|
keys := v.MapKeys()
|
||||||
|
if d.cs.SortKeys {
|
||||||
|
sortValues(keys, d.cs)
|
||||||
|
}
|
||||||
|
for i, key := range keys {
|
||||||
|
d.dump(d.unpackValue(key))
|
||||||
|
d.w.Write(colonSpaceBytes)
|
||||||
|
d.ignoreNextIndent = true
|
||||||
|
d.dump(d.unpackValue(v.MapIndex(key)))
|
||||||
|
if i < (numEntries - 1) {
|
||||||
|
d.w.Write(commaNewlineBytes)
|
||||||
|
} else {
|
||||||
|
d.w.Write(newlineBytes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
d.depth--
|
||||||
|
d.indent()
|
||||||
|
d.w.Write(closeBraceBytes)
|
||||||
|
|
||||||
|
case reflect.Struct:
|
||||||
|
d.w.Write(openBraceNewlineBytes)
|
||||||
|
d.depth++
|
||||||
|
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
|
||||||
|
d.indent()
|
||||||
|
d.w.Write(maxNewlineBytes)
|
||||||
|
} else {
|
||||||
|
vt := v.Type()
|
||||||
|
numFields := v.NumField()
|
||||||
|
for i := 0; i < numFields; i++ {
|
||||||
|
d.indent()
|
||||||
|
vtf := vt.Field(i)
|
||||||
|
d.w.Write([]byte(vtf.Name))
|
||||||
|
d.w.Write(colonSpaceBytes)
|
||||||
|
d.ignoreNextIndent = true
|
||||||
|
d.dump(d.unpackValue(v.Field(i)))
|
||||||
|
if i < (numFields - 1) {
|
||||||
|
d.w.Write(commaNewlineBytes)
|
||||||
|
} else {
|
||||||
|
d.w.Write(newlineBytes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
d.depth--
|
||||||
|
d.indent()
|
||||||
|
d.w.Write(closeBraceBytes)
|
||||||
|
|
||||||
|
case reflect.Uintptr:
|
||||||
|
printHexPtr(d.w, uintptr(v.Uint()))
|
||||||
|
|
||||||
|
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
|
||||||
|
printHexPtr(d.w, v.Pointer())
|
||||||
|
|
||||||
|
// There were not any other types at the time this code was written, but
|
||||||
|
// fall back to letting the default fmt package handle it in case any new
|
||||||
|
// types are added.
|
||||||
|
default:
|
||||||
|
if v.CanInterface() {
|
||||||
|
fmt.Fprintf(d.w, "%v", v.Interface())
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(d.w, "%v", v.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// fdump is a helper function to consolidate the logic from the various public
|
||||||
|
// methods which take varying writers and config states.
|
||||||
|
func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
|
||||||
|
for _, arg := range a {
|
||||||
|
if arg == nil {
|
||||||
|
w.Write(interfaceBytes)
|
||||||
|
w.Write(spaceBytes)
|
||||||
|
w.Write(nilAngleBytes)
|
||||||
|
w.Write(newlineBytes)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
d := dumpState{w: w, cs: cs}
|
||||||
|
d.pointers = make(map[uintptr]int)
|
||||||
|
d.dump(reflect.ValueOf(arg))
|
||||||
|
d.w.Write(newlineBytes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fdump formats and displays the passed arguments to io.Writer w. It formats
|
||||||
|
// exactly the same as Dump.
|
||||||
|
func Fdump(w io.Writer, a ...interface{}) {
|
||||||
|
fdump(&Config, w, a...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sdump returns a string with the passed arguments formatted exactly the same
|
||||||
|
// as Dump.
|
||||||
|
func Sdump(a ...interface{}) string {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
fdump(&Config, &buf, a...)
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Dump displays the passed parameters to standard out with newlines, customizable
|
||||||
|
indentation, and additional debug information such as complete types and all
|
||||||
|
pointer addresses used to indirect to the final value. It provides the
|
||||||
|
following features over the built-in printing facilities provided by the fmt
|
||||||
|
package:
|
||||||
|
|
||||||
|
* Pointers are dereferenced and followed
|
||||||
|
* Circular data structures are detected and handled properly
|
||||||
|
* Custom Stringer/error interfaces are optionally invoked, including
|
||||||
|
on unexported types
|
||||||
|
* Custom types which only implement the Stringer/error interfaces via
|
||||||
|
a pointer receiver are optionally invoked when passing non-pointer
|
||||||
|
variables
|
||||||
|
* Byte arrays and slices are dumped like the hexdump -C command which
|
||||||
|
includes offsets, byte values in hex, and ASCII output
|
||||||
|
|
||||||
|
The configuration options are controlled by an exported package global,
|
||||||
|
spew.Config. See ConfigState for options documentation.
|
||||||
|
|
||||||
|
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
|
||||||
|
get the formatted result as a string.
|
||||||
|
*/
|
||||||
|
func Dump(a ...interface{}) {
|
||||||
|
fdump(&Config, os.Stdout, a...)
|
||||||
|
}
|
419
vendor/github.com/davecgh/go-spew/spew/format.go
generated
vendored
Normal file
419
vendor/github.com/davecgh/go-spew/spew/format.go
generated
vendored
Normal file
|
@ -0,0 +1,419 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||||
|
*
|
||||||
|
* Permission to use, copy, modify, and distribute this software for any
|
||||||
|
* purpose with or without fee is hereby granted, provided that the above
|
||||||
|
* copyright notice and this permission notice appear in all copies.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||||
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||||
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||||
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||||
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||||
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||||
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package spew
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// supportedFlags is a list of all the character flags supported by fmt package.
|
||||||
|
const supportedFlags = "0-+# "
|
||||||
|
|
||||||
|
// formatState implements the fmt.Formatter interface and contains information
|
||||||
|
// about the state of a formatting operation. The NewFormatter function can
|
||||||
|
// be used to get a new Formatter which can be used directly as arguments
|
||||||
|
// in standard fmt package printing calls.
|
||||||
|
type formatState struct {
|
||||||
|
value interface{}
|
||||||
|
fs fmt.State
|
||||||
|
depth int
|
||||||
|
pointers map[uintptr]int
|
||||||
|
ignoreNextType bool
|
||||||
|
cs *ConfigState
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildDefaultFormat recreates the original format string without precision
|
||||||
|
// and width information to pass in to fmt.Sprintf in the case of an
|
||||||
|
// unrecognized type. Unless new types are added to the language, this
|
||||||
|
// function won't ever be called.
|
||||||
|
func (f *formatState) buildDefaultFormat() (format string) {
|
||||||
|
buf := bytes.NewBuffer(percentBytes)
|
||||||
|
|
||||||
|
for _, flag := range supportedFlags {
|
||||||
|
if f.fs.Flag(int(flag)) {
|
||||||
|
buf.WriteRune(flag)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.WriteRune('v')
|
||||||
|
|
||||||
|
format = buf.String()
|
||||||
|
return format
|
||||||
|
}
|
||||||
|
|
||||||
|
// constructOrigFormat recreates the original format string including precision
|
||||||
|
// and width information to pass along to the standard fmt package. This allows
|
||||||
|
// automatic deferral of all format strings this package doesn't support.
|
||||||
|
func (f *formatState) constructOrigFormat(verb rune) (format string) {
|
||||||
|
buf := bytes.NewBuffer(percentBytes)
|
||||||
|
|
||||||
|
for _, flag := range supportedFlags {
|
||||||
|
if f.fs.Flag(int(flag)) {
|
||||||
|
buf.WriteRune(flag)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if width, ok := f.fs.Width(); ok {
|
||||||
|
buf.WriteString(strconv.Itoa(width))
|
||||||
|
}
|
||||||
|
|
||||||
|
if precision, ok := f.fs.Precision(); ok {
|
||||||
|
buf.Write(precisionBytes)
|
||||||
|
buf.WriteString(strconv.Itoa(precision))
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.WriteRune(verb)
|
||||||
|
|
||||||
|
format = buf.String()
|
||||||
|
return format
|
||||||
|
}
|
||||||
|
|
||||||
|
// unpackValue returns values inside of non-nil interfaces when possible and
|
||||||
|
// ensures that types for values which have been unpacked from an interface
|
||||||
|
// are displayed when the show types flag is also set.
|
||||||
|
// This is useful for data types like structs, arrays, slices, and maps which
|
||||||
|
// can contain varying types packed inside an interface.
|
||||||
|
func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
|
||||||
|
if v.Kind() == reflect.Interface {
|
||||||
|
f.ignoreNextType = false
|
||||||
|
if !v.IsNil() {
|
||||||
|
v = v.Elem()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// formatPtr handles formatting of pointers by indirecting them as necessary.
|
||||||
|
func (f *formatState) formatPtr(v reflect.Value) {
|
||||||
|
// Display nil if top level pointer is nil.
|
||||||
|
showTypes := f.fs.Flag('#')
|
||||||
|
if v.IsNil() && (!showTypes || f.ignoreNextType) {
|
||||||
|
f.fs.Write(nilAngleBytes)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove pointers at or below the current depth from map used to detect
|
||||||
|
// circular refs.
|
||||||
|
for k, depth := range f.pointers {
|
||||||
|
if depth >= f.depth {
|
||||||
|
delete(f.pointers, k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Keep list of all dereferenced pointers to possibly show later.
|
||||||
|
pointerChain := make([]uintptr, 0)
|
||||||
|
|
||||||
|
// Figure out how many levels of indirection there are by derferencing
|
||||||
|
// pointers and unpacking interfaces down the chain while detecting circular
|
||||||
|
// references.
|
||||||
|
nilFound := false
|
||||||
|
cycleFound := false
|
||||||
|
indirects := 0
|
||||||
|
ve := v
|
||||||
|
for ve.Kind() == reflect.Ptr {
|
||||||
|
if ve.IsNil() {
|
||||||
|
nilFound = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
indirects++
|
||||||
|
addr := ve.Pointer()
|
||||||
|
pointerChain = append(pointerChain, addr)
|
||||||
|
if pd, ok := f.pointers[addr]; ok && pd < f.depth {
|
||||||
|
cycleFound = true
|
||||||
|
indirects--
|
||||||
|
break
|
||||||
|
}
|
||||||
|
f.pointers[addr] = f.depth
|
||||||
|
|
||||||
|
ve = ve.Elem()
|
||||||
|
if ve.Kind() == reflect.Interface {
|
||||||
|
if ve.IsNil() {
|
||||||
|
nilFound = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
ve = ve.Elem()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Display type or indirection level depending on flags.
|
||||||
|
if showTypes && !f.ignoreNextType {
|
||||||
|
f.fs.Write(openParenBytes)
|
||||||
|
f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
|
||||||
|
f.fs.Write([]byte(ve.Type().String()))
|
||||||
|
f.fs.Write(closeParenBytes)
|
||||||
|
} else {
|
||||||
|
if nilFound || cycleFound {
|
||||||
|
indirects += strings.Count(ve.Type().String(), "*")
|
||||||
|
}
|
||||||
|
f.fs.Write(openAngleBytes)
|
||||||
|
f.fs.Write([]byte(strings.Repeat("*", indirects)))
|
||||||
|
f.fs.Write(closeAngleBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Display pointer information depending on flags.
|
||||||
|
if f.fs.Flag('+') && (len(pointerChain) > 0) {
|
||||||
|
f.fs.Write(openParenBytes)
|
||||||
|
for i, addr := range pointerChain {
|
||||||
|
if i > 0 {
|
||||||
|
f.fs.Write(pointerChainBytes)
|
||||||
|
}
|
||||||
|
printHexPtr(f.fs, addr)
|
||||||
|
}
|
||||||
|
f.fs.Write(closeParenBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Display dereferenced value.
|
||||||
|
switch {
|
||||||
|
case nilFound:
|
||||||
|
f.fs.Write(nilAngleBytes)
|
||||||
|
|
||||||
|
case cycleFound:
|
||||||
|
f.fs.Write(circularShortBytes)
|
||||||
|
|
||||||
|
default:
|
||||||
|
f.ignoreNextType = true
|
||||||
|
f.format(ve)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// format is the main workhorse for providing the Formatter interface. It
|
||||||
|
// uses the passed reflect value to figure out what kind of object we are
|
||||||
|
// dealing with and formats it appropriately. It is a recursive function,
|
||||||
|
// however circular data structures are detected and handled properly.
|
||||||
|
func (f *formatState) format(v reflect.Value) {
|
||||||
|
// Handle invalid reflect values immediately.
|
||||||
|
kind := v.Kind()
|
||||||
|
if kind == reflect.Invalid {
|
||||||
|
f.fs.Write(invalidAngleBytes)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle pointers specially.
|
||||||
|
if kind == reflect.Ptr {
|
||||||
|
f.formatPtr(v)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Print type information unless already handled elsewhere.
|
||||||
|
if !f.ignoreNextType && f.fs.Flag('#') {
|
||||||
|
f.fs.Write(openParenBytes)
|
||||||
|
f.fs.Write([]byte(v.Type().String()))
|
||||||
|
f.fs.Write(closeParenBytes)
|
||||||
|
}
|
||||||
|
f.ignoreNextType = false
|
||||||
|
|
||||||
|
// Call Stringer/error interfaces if they exist and the handle methods
|
||||||
|
// flag is enabled.
|
||||||
|
if !f.cs.DisableMethods {
|
||||||
|
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
|
||||||
|
if handled := handleMethods(f.cs, f.fs, v); handled {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch kind {
|
||||||
|
case reflect.Invalid:
|
||||||
|
// Do nothing. We should never get here since invalid has already
|
||||||
|
// been handled above.
|
||||||
|
|
||||||
|
case reflect.Bool:
|
||||||
|
printBool(f.fs, v.Bool())
|
||||||
|
|
||||||
|
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||||
|
printInt(f.fs, v.Int(), 10)
|
||||||
|
|
||||||
|
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||||
|
printUint(f.fs, v.Uint(), 10)
|
||||||
|
|
||||||
|
case reflect.Float32:
|
||||||
|
printFloat(f.fs, v.Float(), 32)
|
||||||
|
|
||||||
|
case reflect.Float64:
|
||||||
|
printFloat(f.fs, v.Float(), 64)
|
||||||
|
|
||||||
|
case reflect.Complex64:
|
||||||
|
printComplex(f.fs, v.Complex(), 32)
|
||||||
|
|
||||||
|
case reflect.Complex128:
|
||||||
|
printComplex(f.fs, v.Complex(), 64)
|
||||||
|
|
||||||
|
case reflect.Slice:
|
||||||
|
if v.IsNil() {
|
||||||
|
f.fs.Write(nilAngleBytes)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
fallthrough
|
||||||
|
|
||||||
|
case reflect.Array:
|
||||||
|
f.fs.Write(openBracketBytes)
|
||||||
|
f.depth++
|
||||||
|
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
||||||
|
f.fs.Write(maxShortBytes)
|
||||||
|
} else {
|
||||||
|
numEntries := v.Len()
|
||||||
|
for i := 0; i < numEntries; i++ {
|
||||||
|
if i > 0 {
|
||||||
|
f.fs.Write(spaceBytes)
|
||||||
|
}
|
||||||
|
f.ignoreNextType = true
|
||||||
|
f.format(f.unpackValue(v.Index(i)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
f.depth--
|
||||||
|
f.fs.Write(closeBracketBytes)
|
||||||
|
|
||||||
|
case reflect.String:
|
||||||
|
f.fs.Write([]byte(v.String()))
|
||||||
|
|
||||||
|
case reflect.Interface:
|
||||||
|
// The only time we should get here is for nil interfaces due to
|
||||||
|
// unpackValue calls.
|
||||||
|
if v.IsNil() {
|
||||||
|
f.fs.Write(nilAngleBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
case reflect.Ptr:
|
||||||
|
// Do nothing. We should never get here since pointers have already
|
||||||
|
// been handled above.
|
||||||
|
|
||||||
|
case reflect.Map:
|
||||||
|
// nil maps should be indicated as different than empty maps
|
||||||
|
if v.IsNil() {
|
||||||
|
f.fs.Write(nilAngleBytes)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
f.fs.Write(openMapBytes)
|
||||||
|
f.depth++
|
||||||
|
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
||||||
|
f.fs.Write(maxShortBytes)
|
||||||
|
} else {
|
||||||
|
keys := v.MapKeys()
|
||||||
|
if f.cs.SortKeys {
|
||||||
|
sortValues(keys, f.cs)
|
||||||
|
}
|
||||||
|
for i, key := range keys {
|
||||||
|
if i > 0 {
|
||||||
|
f.fs.Write(spaceBytes)
|
||||||
|
}
|
||||||
|
f.ignoreNextType = true
|
||||||
|
f.format(f.unpackValue(key))
|
||||||
|
f.fs.Write(colonBytes)
|
||||||
|
f.ignoreNextType = true
|
||||||
|
f.format(f.unpackValue(v.MapIndex(key)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
f.depth--
|
||||||
|
f.fs.Write(closeMapBytes)
|
||||||
|
|
||||||
|
case reflect.Struct:
|
||||||
|
numFields := v.NumField()
|
||||||
|
f.fs.Write(openBraceBytes)
|
||||||
|
f.depth++
|
||||||
|
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
||||||
|
f.fs.Write(maxShortBytes)
|
||||||
|
} else {
|
||||||
|
vt := v.Type()
|
||||||
|
for i := 0; i < numFields; i++ {
|
||||||
|
if i > 0 {
|
||||||
|
f.fs.Write(spaceBytes)
|
||||||
|
}
|
||||||
|
vtf := vt.Field(i)
|
||||||
|
if f.fs.Flag('+') || f.fs.Flag('#') {
|
||||||
|
f.fs.Write([]byte(vtf.Name))
|
||||||
|
f.fs.Write(colonBytes)
|
||||||
|
}
|
||||||
|
f.format(f.unpackValue(v.Field(i)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
f.depth--
|
||||||
|
f.fs.Write(closeBraceBytes)
|
||||||
|
|
||||||
|
case reflect.Uintptr:
|
||||||
|
printHexPtr(f.fs, uintptr(v.Uint()))
|
||||||
|
|
||||||
|
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
|
||||||
|
printHexPtr(f.fs, v.Pointer())
|
||||||
|
|
||||||
|
// There were not any other types at the time this code was written, but
|
||||||
|
// fall back to letting the default fmt package handle it if any get added.
|
||||||
|
default:
|
||||||
|
format := f.buildDefaultFormat()
|
||||||
|
if v.CanInterface() {
|
||||||
|
fmt.Fprintf(f.fs, format, v.Interface())
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(f.fs, format, v.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
|
||||||
|
// details.
|
||||||
|
func (f *formatState) Format(fs fmt.State, verb rune) {
|
||||||
|
f.fs = fs
|
||||||
|
|
||||||
|
// Use standard formatting for verbs that are not v.
|
||||||
|
if verb != 'v' {
|
||||||
|
format := f.constructOrigFormat(verb)
|
||||||
|
fmt.Fprintf(fs, format, f.value)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if f.value == nil {
|
||||||
|
if fs.Flag('#') {
|
||||||
|
fs.Write(interfaceBytes)
|
||||||
|
}
|
||||||
|
fs.Write(nilAngleBytes)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
f.format(reflect.ValueOf(f.value))
|
||||||
|
}
|
||||||
|
|
||||||
|
// newFormatter is a helper function to consolidate the logic from the various
|
||||||
|
// public methods which take varying config states.
|
||||||
|
func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
|
||||||
|
fs := &formatState{value: v, cs: cs}
|
||||||
|
fs.pointers = make(map[uintptr]int)
|
||||||
|
return fs
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
|
||||||
|
interface. As a result, it integrates cleanly with standard fmt package
|
||||||
|
printing functions. The formatter is useful for inline printing of smaller data
|
||||||
|
types similar to the standard %v format specifier.
|
||||||
|
|
||||||
|
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
||||||
|
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
|
||||||
|
combinations. Any other verbs such as %x and %q will be sent to the the
|
||||||
|
standard fmt package for formatting. In addition, the custom formatter ignores
|
||||||
|
the width and precision arguments (however they will still work on the format
|
||||||
|
specifiers not handled by the custom formatter).
|
||||||
|
|
||||||
|
Typically this function shouldn't be called directly. It is much easier to make
|
||||||
|
use of the custom formatter by calling one of the convenience functions such as
|
||||||
|
Printf, Println, or Fprintf.
|
||||||
|
*/
|
||||||
|
func NewFormatter(v interface{}) fmt.Formatter {
|
||||||
|
return newFormatter(&Config, v)
|
||||||
|
}
|
148
vendor/github.com/davecgh/go-spew/spew/spew.go
generated
vendored
Normal file
148
vendor/github.com/davecgh/go-spew/spew/spew.go
generated
vendored
Normal file
|
@ -0,0 +1,148 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||||
|
*
|
||||||
|
* Permission to use, copy, modify, and distribute this software for any
|
||||||
|
* purpose with or without fee is hereby granted, provided that the above
|
||||||
|
* copyright notice and this permission notice appear in all copies.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||||
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||||
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||||
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||||
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||||
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||||
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package spew
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
|
||||||
|
// passed with a default Formatter interface returned by NewFormatter. It
|
||||||
|
// returns the formatted string as a value that satisfies error. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||||
|
func Errorf(format string, a ...interface{}) (err error) {
|
||||||
|
return fmt.Errorf(format, convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
|
||||||
|
// passed with a default Formatter interface returned by NewFormatter. It
|
||||||
|
// returns the number of bytes written and any write error encountered. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||||
|
func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
|
||||||
|
return fmt.Fprint(w, convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
|
||||||
|
// passed with a default Formatter interface returned by NewFormatter. It
|
||||||
|
// returns the number of bytes written and any write error encountered. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||||
|
func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
|
||||||
|
return fmt.Fprintf(w, format, convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
|
||||||
|
// passed with a default Formatter interface returned by NewFormatter. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||||
|
func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
|
||||||
|
return fmt.Fprintln(w, convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Print is a wrapper for fmt.Print that treats each argument as if it were
|
||||||
|
// passed with a default Formatter interface returned by NewFormatter. It
|
||||||
|
// returns the number of bytes written and any write error encountered. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||||
|
func Print(a ...interface{}) (n int, err error) {
|
||||||
|
return fmt.Print(convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
|
||||||
|
// passed with a default Formatter interface returned by NewFormatter. It
|
||||||
|
// returns the number of bytes written and any write error encountered. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||||
|
func Printf(format string, a ...interface{}) (n int, err error) {
|
||||||
|
return fmt.Printf(format, convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Println is a wrapper for fmt.Println that treats each argument as if it were
|
||||||
|
// passed with a default Formatter interface returned by NewFormatter. It
|
||||||
|
// returns the number of bytes written and any write error encountered. See
|
||||||
|
// NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||||
|
func Println(a ...interface{}) (n int, err error) {
|
||||||
|
return fmt.Println(convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
|
||||||
|
// passed with a default Formatter interface returned by NewFormatter. It
|
||||||
|
// returns the resulting string. See NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||||
|
func Sprint(a ...interface{}) string {
|
||||||
|
return fmt.Sprint(convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
|
||||||
|
// passed with a default Formatter interface returned by NewFormatter. It
|
||||||
|
// returns the resulting string. See NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||||
|
func Sprintf(format string, a ...interface{}) string {
|
||||||
|
return fmt.Sprintf(format, convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
|
||||||
|
// were passed with a default Formatter interface returned by NewFormatter. It
|
||||||
|
// returns the resulting string. See NewFormatter for formatting details.
|
||||||
|
//
|
||||||
|
// This function is shorthand for the following syntax:
|
||||||
|
//
|
||||||
|
// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||||
|
func Sprintln(a ...interface{}) string {
|
||||||
|
return fmt.Sprintln(convertArgs(a)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// convertArgs accepts a slice of arguments and returns a slice of the same
|
||||||
|
// length with each argument converted to a default spew Formatter interface.
|
||||||
|
func convertArgs(args []interface{}) (formatters []interface{}) {
|
||||||
|
formatters = make([]interface{}, len(args))
|
||||||
|
for index, arg := range args {
|
||||||
|
formatters[index] = NewFormatter(arg)
|
||||||
|
}
|
||||||
|
return formatters
|
||||||
|
}
|
27
vendor/github.com/kr/fs/LICENSE
generated
vendored
Normal file
27
vendor/github.com/kr/fs/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
||||||
|
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above
|
||||||
|
copyright notice, this list of conditions and the following disclaimer
|
||||||
|
in the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
* Neither the name of Google Inc. nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from
|
||||||
|
this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
3
vendor/github.com/kr/fs/Readme
generated
vendored
Normal file
3
vendor/github.com/kr/fs/Readme
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
Filesystem Package
|
||||||
|
|
||||||
|
http://godoc.org/github.com/kr/fs
|
36
vendor/github.com/kr/fs/filesystem.go
generated
vendored
Normal file
36
vendor/github.com/kr/fs/filesystem.go
generated
vendored
Normal file
|
@ -0,0 +1,36 @@
|
||||||
|
package fs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FileSystem defines the methods of an abstract filesystem.
|
||||||
|
type FileSystem interface {
|
||||||
|
|
||||||
|
// ReadDir reads the directory named by dirname and returns a
|
||||||
|
// list of directory entries.
|
||||||
|
ReadDir(dirname string) ([]os.FileInfo, error)
|
||||||
|
|
||||||
|
// Lstat returns a FileInfo describing the named file. If the file is a
|
||||||
|
// symbolic link, the returned FileInfo describes the symbolic link. Lstat
|
||||||
|
// makes no attempt to follow the link.
|
||||||
|
Lstat(name string) (os.FileInfo, error)
|
||||||
|
|
||||||
|
// Join joins any number of path elements into a single path, adding a
|
||||||
|
// separator if necessary. The result is Cleaned; in particular, all
|
||||||
|
// empty strings are ignored.
|
||||||
|
//
|
||||||
|
// The separator is FileSystem specific.
|
||||||
|
Join(elem ...string) string
|
||||||
|
}
|
||||||
|
|
||||||
|
// fs represents a FileSystem provided by the os package.
|
||||||
|
type fs struct{}
|
||||||
|
|
||||||
|
func (f *fs) ReadDir(dirname string) ([]os.FileInfo, error) { return ioutil.ReadDir(dirname) }
|
||||||
|
|
||||||
|
func (f *fs) Lstat(name string) (os.FileInfo, error) { return os.Lstat(name) }
|
||||||
|
|
||||||
|
func (f *fs) Join(elem ...string) string { return filepath.Join(elem...) }
|
95
vendor/github.com/kr/fs/walk.go
generated
vendored
Normal file
95
vendor/github.com/kr/fs/walk.go
generated
vendored
Normal file
|
@ -0,0 +1,95 @@
|
||||||
|
// Package fs provides filesystem-related functions.
|
||||||
|
package fs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Walker provides a convenient interface for iterating over the
|
||||||
|
// descendants of a filesystem path.
|
||||||
|
// Successive calls to the Step method will step through each
|
||||||
|
// file or directory in the tree, including the root. The files
|
||||||
|
// are walked in lexical order, which makes the output deterministic
|
||||||
|
// but means that for very large directories Walker can be inefficient.
|
||||||
|
// Walker does not follow symbolic links.
|
||||||
|
type Walker struct {
|
||||||
|
fs FileSystem
|
||||||
|
cur item
|
||||||
|
stack []item
|
||||||
|
descend bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type item struct {
|
||||||
|
path string
|
||||||
|
info os.FileInfo
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Walk returns a new Walker rooted at root.
|
||||||
|
func Walk(root string) *Walker {
|
||||||
|
return WalkFS(root, new(fs))
|
||||||
|
}
|
||||||
|
|
||||||
|
// WalkFS returns a new Walker rooted at root on the FileSystem fs.
|
||||||
|
func WalkFS(root string, fs FileSystem) *Walker {
|
||||||
|
info, err := fs.Lstat(root)
|
||||||
|
return &Walker{
|
||||||
|
fs: fs,
|
||||||
|
stack: []item{{root, info, err}},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Step advances the Walker to the next file or directory,
|
||||||
|
// which will then be available through the Path, Stat,
|
||||||
|
// and Err methods.
|
||||||
|
// It returns false when the walk stops at the end of the tree.
|
||||||
|
func (w *Walker) Step() bool {
|
||||||
|
if w.descend && w.cur.err == nil && w.cur.info.IsDir() {
|
||||||
|
list, err := w.fs.ReadDir(w.cur.path)
|
||||||
|
if err != nil {
|
||||||
|
w.cur.err = err
|
||||||
|
w.stack = append(w.stack, w.cur)
|
||||||
|
} else {
|
||||||
|
for i := len(list) - 1; i >= 0; i-- {
|
||||||
|
path := w.fs.Join(w.cur.path, list[i].Name())
|
||||||
|
w.stack = append(w.stack, item{path, list[i], nil})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(w.stack) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
i := len(w.stack) - 1
|
||||||
|
w.cur = w.stack[i]
|
||||||
|
w.stack = w.stack[:i]
|
||||||
|
w.descend = true
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Path returns the path to the most recent file or directory
|
||||||
|
// visited by a call to Step. It contains the argument to Walk
|
||||||
|
// as a prefix; that is, if Walk is called with "dir", which is
|
||||||
|
// a directory containing the file "a", Path will return "dir/a".
|
||||||
|
func (w *Walker) Path() string {
|
||||||
|
return w.cur.path
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stat returns info for the most recent file or directory
|
||||||
|
// visited by a call to Step.
|
||||||
|
func (w *Walker) Stat() os.FileInfo {
|
||||||
|
return w.cur.info
|
||||||
|
}
|
||||||
|
|
||||||
|
// Err returns the error, if any, for the most recent attempt
|
||||||
|
// by Step to visit a file or directory. If a directory has
|
||||||
|
// an error, w will not descend into that directory.
|
||||||
|
func (w *Walker) Err() error {
|
||||||
|
return w.cur.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// SkipDir causes the currently visited directory to be skipped.
|
||||||
|
// If w is not on a directory, SkipDir has no effect.
|
||||||
|
func (w *Walker) SkipDir() {
|
||||||
|
w.descend = false
|
||||||
|
}
|
21
vendor/github.com/mattn/go-colorable/LICENSE
generated
vendored
Normal file
21
vendor/github.com/mattn/go-colorable/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2016 Yasuhiro Matsumoto
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
48
vendor/github.com/mattn/go-colorable/README.md
generated
vendored
Normal file
48
vendor/github.com/mattn/go-colorable/README.md
generated
vendored
Normal file
|
@ -0,0 +1,48 @@
|
||||||
|
# go-colorable
|
||||||
|
|
||||||
|
[![Build Status](https://github.com/mattn/go-colorable/workflows/test/badge.svg)](https://github.com/mattn/go-colorable/actions?query=workflow%3Atest)
|
||||||
|
[![Codecov](https://codecov.io/gh/mattn/go-colorable/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-colorable)
|
||||||
|
[![GoDoc](https://godoc.org/github.com/mattn/go-colorable?status.svg)](http://godoc.org/github.com/mattn/go-colorable)
|
||||||
|
[![Go Report Card](https://goreportcard.com/badge/mattn/go-colorable)](https://goreportcard.com/report/mattn/go-colorable)
|
||||||
|
|
||||||
|
Colorable writer for windows.
|
||||||
|
|
||||||
|
For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.)
|
||||||
|
This package is possible to handle escape sequence for ansi color on windows.
|
||||||
|
|
||||||
|
## Too Bad!
|
||||||
|
|
||||||
|
![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/bad.png)
|
||||||
|
|
||||||
|
|
||||||
|
## So Good!
|
||||||
|
|
||||||
|
![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/good.png)
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
```go
|
||||||
|
logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true})
|
||||||
|
logrus.SetOutput(colorable.NewColorableStdout())
|
||||||
|
|
||||||
|
logrus.Info("succeeded")
|
||||||
|
logrus.Warn("not correct")
|
||||||
|
logrus.Error("something error")
|
||||||
|
logrus.Fatal("panic")
|
||||||
|
```
|
||||||
|
|
||||||
|
You can compile above code on non-windows OSs.
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
```
|
||||||
|
$ go get github.com/mattn/go-colorable
|
||||||
|
```
|
||||||
|
|
||||||
|
# License
|
||||||
|
|
||||||
|
MIT
|
||||||
|
|
||||||
|
# Author
|
||||||
|
|
||||||
|
Yasuhiro Matsumoto (a.k.a mattn)
|
38
vendor/github.com/mattn/go-colorable/colorable_appengine.go
generated
vendored
Normal file
38
vendor/github.com/mattn/go-colorable/colorable_appengine.go
generated
vendored
Normal file
|
@ -0,0 +1,38 @@
|
||||||
|
//go:build appengine
|
||||||
|
// +build appengine
|
||||||
|
|
||||||
|
package colorable
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
_ "github.com/mattn/go-isatty"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewColorable returns new instance of Writer which handles escape sequence.
|
||||||
|
func NewColorable(file *os.File) io.Writer {
|
||||||
|
if file == nil {
|
||||||
|
panic("nil passed instead of *os.File to NewColorable()")
|
||||||
|
}
|
||||||
|
|
||||||
|
return file
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout.
|
||||||
|
func NewColorableStdout() io.Writer {
|
||||||
|
return os.Stdout
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr.
|
||||||
|
func NewColorableStderr() io.Writer {
|
||||||
|
return os.Stderr
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnableColorsStdout enable colors if possible.
|
||||||
|
func EnableColorsStdout(enabled *bool) func() {
|
||||||
|
if enabled != nil {
|
||||||
|
*enabled = true
|
||||||
|
}
|
||||||
|
return func() {}
|
||||||
|
}
|
38
vendor/github.com/mattn/go-colorable/colorable_others.go
generated
vendored
Normal file
38
vendor/github.com/mattn/go-colorable/colorable_others.go
generated
vendored
Normal file
|
@ -0,0 +1,38 @@
|
||||||
|
//go:build !windows && !appengine
|
||||||
|
// +build !windows,!appengine
|
||||||
|
|
||||||
|
package colorable
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
_ "github.com/mattn/go-isatty"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewColorable returns new instance of Writer which handles escape sequence.
|
||||||
|
func NewColorable(file *os.File) io.Writer {
|
||||||
|
if file == nil {
|
||||||
|
panic("nil passed instead of *os.File to NewColorable()")
|
||||||
|
}
|
||||||
|
|
||||||
|
return file
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout.
|
||||||
|
func NewColorableStdout() io.Writer {
|
||||||
|
return os.Stdout
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr.
|
||||||
|
func NewColorableStderr() io.Writer {
|
||||||
|
return os.Stderr
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnableColorsStdout enable colors if possible.
|
||||||
|
func EnableColorsStdout(enabled *bool) func() {
|
||||||
|
if enabled != nil {
|
||||||
|
*enabled = true
|
||||||
|
}
|
||||||
|
return func() {}
|
||||||
|
}
|
1047
vendor/github.com/mattn/go-colorable/colorable_windows.go
generated
vendored
Normal file
1047
vendor/github.com/mattn/go-colorable/colorable_windows.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
12
vendor/github.com/mattn/go-colorable/go.test.sh
generated
vendored
Normal file
12
vendor/github.com/mattn/go-colorable/go.test.sh
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
echo "" > coverage.txt
|
||||||
|
|
||||||
|
for d in $(go list ./... | grep -v vendor); do
|
||||||
|
go test -race -coverprofile=profile.out -covermode=atomic "$d"
|
||||||
|
if [ -f profile.out ]; then
|
||||||
|
cat profile.out >> coverage.txt
|
||||||
|
rm profile.out
|
||||||
|
fi
|
||||||
|
done
|
57
vendor/github.com/mattn/go-colorable/noncolorable.go
generated
vendored
Normal file
57
vendor/github.com/mattn/go-colorable/noncolorable.go
generated
vendored
Normal file
|
@ -0,0 +1,57 @@
|
||||||
|
package colorable
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NonColorable holds writer but removes escape sequence.
|
||||||
|
type NonColorable struct {
|
||||||
|
out io.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewNonColorable returns new instance of Writer which removes escape sequence from Writer.
|
||||||
|
func NewNonColorable(w io.Writer) io.Writer {
|
||||||
|
return &NonColorable{out: w}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write writes data on console
|
||||||
|
func (w *NonColorable) Write(data []byte) (n int, err error) {
|
||||||
|
er := bytes.NewReader(data)
|
||||||
|
var plaintext bytes.Buffer
|
||||||
|
loop:
|
||||||
|
for {
|
||||||
|
c1, err := er.ReadByte()
|
||||||
|
if err != nil {
|
||||||
|
plaintext.WriteTo(w.out)
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
if c1 != 0x1b {
|
||||||
|
plaintext.WriteByte(c1)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
_, err = plaintext.WriteTo(w.out)
|
||||||
|
if err != nil {
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
c2, err := er.ReadByte()
|
||||||
|
if err != nil {
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
if c2 != 0x5b {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
c, err := er.ReadByte()
|
||||||
|
if err != nil {
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return len(data), nil
|
||||||
|
}
|
9
vendor/github.com/mattn/go-isatty/LICENSE
generated
vendored
Normal file
9
vendor/github.com/mattn/go-isatty/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
Copyright (c) Yasuhiro MATSUMOTO <mattn.jp@gmail.com>
|
||||||
|
|
||||||
|
MIT License (Expat)
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
50
vendor/github.com/mattn/go-isatty/README.md
generated
vendored
Normal file
50
vendor/github.com/mattn/go-isatty/README.md
generated
vendored
Normal file
|
@ -0,0 +1,50 @@
|
||||||
|
# go-isatty
|
||||||
|
|
||||||
|
[![Godoc Reference](https://godoc.org/github.com/mattn/go-isatty?status.svg)](http://godoc.org/github.com/mattn/go-isatty)
|
||||||
|
[![Codecov](https://codecov.io/gh/mattn/go-isatty/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-isatty)
|
||||||
|
[![Coverage Status](https://coveralls.io/repos/github/mattn/go-isatty/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-isatty?branch=master)
|
||||||
|
[![Go Report Card](https://goreportcard.com/badge/mattn/go-isatty)](https://goreportcard.com/report/mattn/go-isatty)
|
||||||
|
|
||||||
|
isatty for golang
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/mattn/go-isatty"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
if isatty.IsTerminal(os.Stdout.Fd()) {
|
||||||
|
fmt.Println("Is Terminal")
|
||||||
|
} else if isatty.IsCygwinTerminal(os.Stdout.Fd()) {
|
||||||
|
fmt.Println("Is Cygwin/MSYS2 Terminal")
|
||||||
|
} else {
|
||||||
|
fmt.Println("Is Not Terminal")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
```
|
||||||
|
$ go get github.com/mattn/go-isatty
|
||||||
|
```
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
MIT
|
||||||
|
|
||||||
|
## Author
|
||||||
|
|
||||||
|
Yasuhiro Matsumoto (a.k.a mattn)
|
||||||
|
|
||||||
|
## Thanks
|
||||||
|
|
||||||
|
* k-takata: base idea for IsCygwinTerminal
|
||||||
|
|
||||||
|
https://github.com/k-takata/go-iscygpty
|
2
vendor/github.com/mattn/go-isatty/doc.go
generated
vendored
Normal file
2
vendor/github.com/mattn/go-isatty/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
// Package isatty implements interface to isatty
|
||||||
|
package isatty
|
12
vendor/github.com/mattn/go-isatty/go.test.sh
generated
vendored
Normal file
12
vendor/github.com/mattn/go-isatty/go.test.sh
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
echo "" > coverage.txt
|
||||||
|
|
||||||
|
for d in $(go list ./... | grep -v vendor); do
|
||||||
|
go test -race -coverprofile=profile.out -covermode=atomic "$d"
|
||||||
|
if [ -f profile.out ]; then
|
||||||
|
cat profile.out >> coverage.txt
|
||||||
|
rm profile.out
|
||||||
|
fi
|
||||||
|
done
|
19
vendor/github.com/mattn/go-isatty/isatty_bsd.go
generated
vendored
Normal file
19
vendor/github.com/mattn/go-isatty/isatty_bsd.go
generated
vendored
Normal file
|
@ -0,0 +1,19 @@
|
||||||
|
//go:build (darwin || freebsd || openbsd || netbsd || dragonfly || hurd) && !appengine
|
||||||
|
// +build darwin freebsd openbsd netbsd dragonfly hurd
|
||||||
|
// +build !appengine
|
||||||
|
|
||||||
|
package isatty
|
||||||
|
|
||||||
|
import "golang.org/x/sys/unix"
|
||||||
|
|
||||||
|
// IsTerminal return true if the file descriptor is terminal.
|
||||||
|
func IsTerminal(fd uintptr) bool {
|
||||||
|
_, err := unix.IoctlGetTermios(int(fd), unix.TIOCGETA)
|
||||||
|
return err == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
|
||||||
|
// terminal. This is also always false on this environment.
|
||||||
|
func IsCygwinTerminal(fd uintptr) bool {
|
||||||
|
return false
|
||||||
|
}
|
16
vendor/github.com/mattn/go-isatty/isatty_others.go
generated
vendored
Normal file
16
vendor/github.com/mattn/go-isatty/isatty_others.go
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
//go:build appengine || js || nacl || wasm
|
||||||
|
// +build appengine js nacl wasm
|
||||||
|
|
||||||
|
package isatty
|
||||||
|
|
||||||
|
// IsTerminal returns true if the file descriptor is terminal which
|
||||||
|
// is always false on js and appengine classic which is a sandboxed PaaS.
|
||||||
|
func IsTerminal(fd uintptr) bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2
|
||||||
|
// terminal. This is also always false on this environment.
|
||||||
|
func IsCygwinTerminal(fd uintptr) bool {
|
||||||
|
return false
|
||||||
|
}
|
23
vendor/github.com/mattn/go-isatty/isatty_plan9.go
generated
vendored
Normal file
23
vendor/github.com/mattn/go-isatty/isatty_plan9.go
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
//go:build plan9
|
||||||
|
// +build plan9
|
||||||
|
|
||||||
|
package isatty
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IsTerminal returns true if the given file descriptor is a terminal.
|
||||||
|
func IsTerminal(fd uintptr) bool {
|
||||||
|
path, err := syscall.Fd2path(int(fd))
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return path == "/dev/cons" || path == "/mnt/term/dev/cons"
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
|
||||||
|
// terminal. This is also always false on this environment.
|
||||||
|
func IsCygwinTerminal(fd uintptr) bool {
|
||||||
|
return false
|
||||||
|
}
|
21
vendor/github.com/mattn/go-isatty/isatty_solaris.go
generated
vendored
Normal file
21
vendor/github.com/mattn/go-isatty/isatty_solaris.go
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
//go:build solaris && !appengine
|
||||||
|
// +build solaris,!appengine
|
||||||
|
|
||||||
|
package isatty
|
||||||
|
|
||||||
|
import (
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IsTerminal returns true if the given file descriptor is a terminal.
|
||||||
|
// see: https://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libc/port/gen/isatty.c
|
||||||
|
func IsTerminal(fd uintptr) bool {
|
||||||
|
_, err := unix.IoctlGetTermio(int(fd), unix.TCGETA)
|
||||||
|
return err == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
|
||||||
|
// terminal. This is also always false on this environment.
|
||||||
|
func IsCygwinTerminal(fd uintptr) bool {
|
||||||
|
return false
|
||||||
|
}
|
19
vendor/github.com/mattn/go-isatty/isatty_tcgets.go
generated
vendored
Normal file
19
vendor/github.com/mattn/go-isatty/isatty_tcgets.go
generated
vendored
Normal file
|
@ -0,0 +1,19 @@
|
||||||
|
//go:build (linux || aix || zos) && !appengine
|
||||||
|
// +build linux aix zos
|
||||||
|
// +build !appengine
|
||||||
|
|
||||||
|
package isatty
|
||||||
|
|
||||||
|
import "golang.org/x/sys/unix"
|
||||||
|
|
||||||
|
// IsTerminal return true if the file descriptor is terminal.
|
||||||
|
func IsTerminal(fd uintptr) bool {
|
||||||
|
_, err := unix.IoctlGetTermios(int(fd), unix.TCGETS)
|
||||||
|
return err == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
|
||||||
|
// terminal. This is also always false on this environment.
|
||||||
|
func IsCygwinTerminal(fd uintptr) bool {
|
||||||
|
return false
|
||||||
|
}
|
125
vendor/github.com/mattn/go-isatty/isatty_windows.go
generated
vendored
Normal file
125
vendor/github.com/mattn/go-isatty/isatty_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,125 @@
|
||||||
|
//go:build windows && !appengine
|
||||||
|
// +build windows,!appengine
|
||||||
|
|
||||||
|
package isatty
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
"unicode/utf16"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
objectNameInfo uintptr = 1
|
||||||
|
fileNameInfo = 2
|
||||||
|
fileTypePipe = 3
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
kernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||||
|
ntdll = syscall.NewLazyDLL("ntdll.dll")
|
||||||
|
procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
|
||||||
|
procGetFileInformationByHandleEx = kernel32.NewProc("GetFileInformationByHandleEx")
|
||||||
|
procGetFileType = kernel32.NewProc("GetFileType")
|
||||||
|
procNtQueryObject = ntdll.NewProc("NtQueryObject")
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// Check if GetFileInformationByHandleEx is available.
|
||||||
|
if procGetFileInformationByHandleEx.Find() != nil {
|
||||||
|
procGetFileInformationByHandleEx = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsTerminal return true if the file descriptor is terminal.
|
||||||
|
func IsTerminal(fd uintptr) bool {
|
||||||
|
var st uint32
|
||||||
|
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0)
|
||||||
|
return r != 0 && e == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check pipe name is used for cygwin/msys2 pty.
|
||||||
|
// Cygwin/MSYS2 PTY has a name like:
|
||||||
|
// \{cygwin,msys}-XXXXXXXXXXXXXXXX-ptyN-{from,to}-master
|
||||||
|
func isCygwinPipeName(name string) bool {
|
||||||
|
token := strings.Split(name, "-")
|
||||||
|
if len(token) < 5 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if token[0] != `\msys` &&
|
||||||
|
token[0] != `\cygwin` &&
|
||||||
|
token[0] != `\Device\NamedPipe\msys` &&
|
||||||
|
token[0] != `\Device\NamedPipe\cygwin` {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if token[1] == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if !strings.HasPrefix(token[2], "pty") {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if token[3] != `from` && token[3] != `to` {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if token[4] != "master" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// getFileNameByHandle use the undocomented ntdll NtQueryObject to get file full name from file handler
|
||||||
|
// since GetFileInformationByHandleEx is not available under windows Vista and still some old fashion
|
||||||
|
// guys are using Windows XP, this is a workaround for those guys, it will also work on system from
|
||||||
|
// Windows vista to 10
|
||||||
|
// see https://stackoverflow.com/a/18792477 for details
|
||||||
|
func getFileNameByHandle(fd uintptr) (string, error) {
|
||||||
|
if procNtQueryObject == nil {
|
||||||
|
return "", errors.New("ntdll.dll: NtQueryObject not supported")
|
||||||
|
}
|
||||||
|
|
||||||
|
var buf [4 + syscall.MAX_PATH]uint16
|
||||||
|
var result int
|
||||||
|
r, _, e := syscall.Syscall6(procNtQueryObject.Addr(), 5,
|
||||||
|
fd, objectNameInfo, uintptr(unsafe.Pointer(&buf)), uintptr(2*len(buf)), uintptr(unsafe.Pointer(&result)), 0)
|
||||||
|
if r != 0 {
|
||||||
|
return "", e
|
||||||
|
}
|
||||||
|
return string(utf16.Decode(buf[4 : 4+buf[0]/2])), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2
|
||||||
|
// terminal.
|
||||||
|
func IsCygwinTerminal(fd uintptr) bool {
|
||||||
|
if procGetFileInformationByHandleEx == nil {
|
||||||
|
name, err := getFileNameByHandle(fd)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return isCygwinPipeName(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cygwin/msys's pty is a pipe.
|
||||||
|
ft, _, e := syscall.Syscall(procGetFileType.Addr(), 1, fd, 0, 0)
|
||||||
|
if ft != fileTypePipe || e != 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
var buf [2 + syscall.MAX_PATH]uint16
|
||||||
|
r, _, e := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(),
|
||||||
|
4, fd, fileNameInfo, uintptr(unsafe.Pointer(&buf)),
|
||||||
|
uintptr(len(buf)*2), 0, 0)
|
||||||
|
if r == 0 || e != 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
l := *(*uint32)(unsafe.Pointer(&buf))
|
||||||
|
return isCygwinPipeName(string(utf16.Decode(buf[2 : 2+l/2])))
|
||||||
|
}
|
10
vendor/github.com/pkg/sftp/.gitignore
generated
vendored
Normal file
10
vendor/github.com/pkg/sftp/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
.*.swo
|
||||||
|
.*.swp
|
||||||
|
|
||||||
|
server_standalone/server_standalone
|
||||||
|
|
||||||
|
examples/*/id_rsa
|
||||||
|
examples/*/id_rsa.pub
|
||||||
|
|
||||||
|
memprofile.out
|
||||||
|
memprofile.svg
|
3
vendor/github.com/pkg/sftp/CONTRIBUTORS
generated
vendored
Normal file
3
vendor/github.com/pkg/sftp/CONTRIBUTORS
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
Dave Cheney <dave@cheney.net>
|
||||||
|
Saulius Gurklys <s4uliu5@gmail.com>
|
||||||
|
John Eikenberry <jae@zhar.net>
|
9
vendor/github.com/pkg/sftp/LICENSE
generated
vendored
Normal file
9
vendor/github.com/pkg/sftp/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
Copyright (c) 2013, Dave Cheney
|
||||||
|
All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
27
vendor/github.com/pkg/sftp/Makefile
generated
vendored
Normal file
27
vendor/github.com/pkg/sftp/Makefile
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
||||||
|
.PHONY: integration integration_w_race benchmark
|
||||||
|
|
||||||
|
integration:
|
||||||
|
go test -integration -v ./...
|
||||||
|
go test -testserver -v ./...
|
||||||
|
go test -integration -testserver -v ./...
|
||||||
|
go test -integration -allocator -v ./...
|
||||||
|
go test -testserver -allocator -v ./...
|
||||||
|
go test -integration -testserver -allocator -v ./...
|
||||||
|
|
||||||
|
integration_w_race:
|
||||||
|
go test -race -integration -v ./...
|
||||||
|
go test -race -testserver -v ./...
|
||||||
|
go test -race -integration -testserver -v ./...
|
||||||
|
go test -race -integration -allocator -v ./...
|
||||||
|
go test -race -testserver -allocator -v ./...
|
||||||
|
go test -race -integration -allocator -testserver -v ./...
|
||||||
|
|
||||||
|
COUNT ?= 1
|
||||||
|
BENCHMARK_PATTERN ?= "."
|
||||||
|
|
||||||
|
benchmark:
|
||||||
|
go test -integration -run=NONE -bench=$(BENCHMARK_PATTERN) -benchmem -count=$(COUNT)
|
||||||
|
|
||||||
|
benchmark_w_memprofile:
|
||||||
|
go test -integration -run=NONE -bench=$(BENCHMARK_PATTERN) -benchmem -count=$(COUNT) -memprofile memprofile.out
|
||||||
|
go tool pprof -svg -output=memprofile.svg memprofile.out
|
44
vendor/github.com/pkg/sftp/README.md
generated
vendored
Normal file
44
vendor/github.com/pkg/sftp/README.md
generated
vendored
Normal file
|
@ -0,0 +1,44 @@
|
||||||
|
sftp
|
||||||
|
----
|
||||||
|
|
||||||
|
The `sftp` package provides support for file system operations on remote ssh
|
||||||
|
servers using the SFTP subsystem. It also implements an SFTP server for serving
|
||||||
|
files from the filesystem.
|
||||||
|
|
||||||
|
![CI Status](https://github.com/pkg/sftp/workflows/CI/badge.svg?branch=master&event=push) [![Go Reference](https://pkg.go.dev/badge/github.com/pkg/sftp.svg)](https://pkg.go.dev/github.com/pkg/sftp)
|
||||||
|
|
||||||
|
usage and examples
|
||||||
|
------------------
|
||||||
|
|
||||||
|
See [https://pkg.go.dev/github.com/pkg/sftp](https://pkg.go.dev/github.com/pkg/sftp) for
|
||||||
|
examples and usage.
|
||||||
|
|
||||||
|
The basic operation of the package mirrors the facilities of the
|
||||||
|
[os](http://golang.org/pkg/os) package.
|
||||||
|
|
||||||
|
The Walker interface for directory traversal is heavily inspired by Keith
|
||||||
|
Rarick's [fs](https://pkg.go.dev/github.com/kr/fs) package.
|
||||||
|
|
||||||
|
roadmap
|
||||||
|
-------
|
||||||
|
|
||||||
|
* There is way too much duplication in the Client methods. If there was an
|
||||||
|
unmarshal(interface{}) method this would reduce a heap of the duplication.
|
||||||
|
|
||||||
|
contributing
|
||||||
|
------------
|
||||||
|
|
||||||
|
We welcome pull requests, bug fixes and issue reports.
|
||||||
|
|
||||||
|
Before proposing a large change, first please discuss your change by raising an
|
||||||
|
issue.
|
||||||
|
|
||||||
|
For API/code bugs, please include a small, self contained code example to
|
||||||
|
reproduce the issue. For pull requests, remember test coverage.
|
||||||
|
|
||||||
|
We try to handle issues and pull requests with a 0 open philosophy. That means
|
||||||
|
we will try to address the submission as soon as possible and will work toward
|
||||||
|
a resolution. If progress can no longer be made (eg. unreproducible bug) or
|
||||||
|
stops (eg. unresponsive submitter), we will close the bug.
|
||||||
|
|
||||||
|
Thanks.
|
96
vendor/github.com/pkg/sftp/allocator.go
generated
vendored
Normal file
96
vendor/github.com/pkg/sftp/allocator.go
generated
vendored
Normal file
|
@ -0,0 +1,96 @@
|
||||||
|
package sftp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
type allocator struct {
|
||||||
|
sync.Mutex
|
||||||
|
available [][]byte
|
||||||
|
// map key is the request order
|
||||||
|
used map[uint32][][]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func newAllocator() *allocator {
|
||||||
|
return &allocator{
|
||||||
|
// micro optimization: initialize available pages with an initial capacity
|
||||||
|
available: make([][]byte, 0, SftpServerWorkerCount*2),
|
||||||
|
used: make(map[uint32][][]byte),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPage returns a previously allocated and unused []byte or create a new one.
|
||||||
|
// The slice have a fixed size = maxMsgLength, this value is suitable for both
|
||||||
|
// receiving new packets and reading the files to serve
|
||||||
|
func (a *allocator) GetPage(requestOrderID uint32) []byte {
|
||||||
|
a.Lock()
|
||||||
|
defer a.Unlock()
|
||||||
|
|
||||||
|
var result []byte
|
||||||
|
|
||||||
|
// get an available page and remove it from the available ones.
|
||||||
|
if len(a.available) > 0 {
|
||||||
|
truncLength := len(a.available) - 1
|
||||||
|
result = a.available[truncLength]
|
||||||
|
|
||||||
|
a.available[truncLength] = nil // clear out the internal pointer
|
||||||
|
a.available = a.available[:truncLength] // truncate the slice
|
||||||
|
}
|
||||||
|
|
||||||
|
// no preallocated slice found, just allocate a new one
|
||||||
|
if result == nil {
|
||||||
|
result = make([]byte, maxMsgLength)
|
||||||
|
}
|
||||||
|
|
||||||
|
// put result in used pages
|
||||||
|
a.used[requestOrderID] = append(a.used[requestOrderID], result)
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReleasePages marks unused all pages in use for the given requestID
|
||||||
|
func (a *allocator) ReleasePages(requestOrderID uint32) {
|
||||||
|
a.Lock()
|
||||||
|
defer a.Unlock()
|
||||||
|
|
||||||
|
if used := a.used[requestOrderID]; len(used) > 0 {
|
||||||
|
a.available = append(a.available, used...)
|
||||||
|
}
|
||||||
|
delete(a.used, requestOrderID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Free removes all the used and available pages.
|
||||||
|
// Call this method when the allocator is not needed anymore
|
||||||
|
func (a *allocator) Free() {
|
||||||
|
a.Lock()
|
||||||
|
defer a.Unlock()
|
||||||
|
|
||||||
|
a.available = nil
|
||||||
|
a.used = make(map[uint32][][]byte)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *allocator) countUsedPages() int {
|
||||||
|
a.Lock()
|
||||||
|
defer a.Unlock()
|
||||||
|
|
||||||
|
num := 0
|
||||||
|
for _, p := range a.used {
|
||||||
|
num += len(p)
|
||||||
|
}
|
||||||
|
return num
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *allocator) countAvailablePages() int {
|
||||||
|
a.Lock()
|
||||||
|
defer a.Unlock()
|
||||||
|
|
||||||
|
return len(a.available)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *allocator) isRequestOrderIDUsed(requestOrderID uint32) bool {
|
||||||
|
a.Lock()
|
||||||
|
defer a.Unlock()
|
||||||
|
|
||||||
|
_, ok := a.used[requestOrderID]
|
||||||
|
return ok
|
||||||
|
}
|
121
vendor/github.com/pkg/sftp/attrs.go
generated
vendored
Normal file
121
vendor/github.com/pkg/sftp/attrs.go
generated
vendored
Normal file
|
@ -0,0 +1,121 @@
|
||||||
|
package sftp
|
||||||
|
|
||||||
|
// ssh_FXP_ATTRS support
|
||||||
|
// see https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt#section-5
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
sshFileXferAttrSize = 0x00000001
|
||||||
|
sshFileXferAttrUIDGID = 0x00000002
|
||||||
|
sshFileXferAttrPermissions = 0x00000004
|
||||||
|
sshFileXferAttrACmodTime = 0x00000008
|
||||||
|
sshFileXferAttrExtended = 0x80000000
|
||||||
|
|
||||||
|
sshFileXferAttrAll = sshFileXferAttrSize | sshFileXferAttrUIDGID | sshFileXferAttrPermissions |
|
||||||
|
sshFileXferAttrACmodTime | sshFileXferAttrExtended
|
||||||
|
)
|
||||||
|
|
||||||
|
// fileInfo is an artificial type designed to satisfy os.FileInfo.
|
||||||
|
type fileInfo struct {
|
||||||
|
name string
|
||||||
|
stat *FileStat
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name returns the base name of the file.
|
||||||
|
func (fi *fileInfo) Name() string { return fi.name }
|
||||||
|
|
||||||
|
// Size returns the length in bytes for regular files; system-dependent for others.
|
||||||
|
func (fi *fileInfo) Size() int64 { return int64(fi.stat.Size) }
|
||||||
|
|
||||||
|
// Mode returns file mode bits.
|
||||||
|
func (fi *fileInfo) Mode() os.FileMode { return toFileMode(fi.stat.Mode) }
|
||||||
|
|
||||||
|
// ModTime returns the last modification time of the file.
|
||||||
|
func (fi *fileInfo) ModTime() time.Time { return time.Unix(int64(fi.stat.Mtime), 0) }
|
||||||
|
|
||||||
|
// IsDir returns true if the file is a directory.
|
||||||
|
func (fi *fileInfo) IsDir() bool { return fi.Mode().IsDir() }
|
||||||
|
|
||||||
|
func (fi *fileInfo) Sys() interface{} { return fi.stat }
|
||||||
|
|
||||||
|
// FileStat holds the original unmarshalled values from a call to READDIR or
|
||||||
|
// *STAT. It is exported for the purposes of accessing the raw values via
|
||||||
|
// os.FileInfo.Sys(). It is also used server side to store the unmarshalled
|
||||||
|
// values for SetStat.
|
||||||
|
type FileStat struct {
|
||||||
|
Size uint64
|
||||||
|
Mode uint32
|
||||||
|
Mtime uint32
|
||||||
|
Atime uint32
|
||||||
|
UID uint32
|
||||||
|
GID uint32
|
||||||
|
Extended []StatExtended
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatExtended contains additional, extended information for a FileStat.
|
||||||
|
type StatExtended struct {
|
||||||
|
ExtType string
|
||||||
|
ExtData string
|
||||||
|
}
|
||||||
|
|
||||||
|
func fileInfoFromStat(stat *FileStat, name string) os.FileInfo {
|
||||||
|
return &fileInfo{
|
||||||
|
name: name,
|
||||||
|
stat: stat,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileInfoUidGid extends os.FileInfo and adds callbacks for Uid and Gid retrieval,
|
||||||
|
// as an alternative to *syscall.Stat_t objects on unix systems.
|
||||||
|
type FileInfoUidGid interface {
|
||||||
|
os.FileInfo
|
||||||
|
Uid() uint32
|
||||||
|
Gid() uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileInfoUidGid extends os.FileInfo and adds a callbacks for extended data retrieval.
|
||||||
|
type FileInfoExtendedData interface {
|
||||||
|
os.FileInfo
|
||||||
|
Extended() []StatExtended
|
||||||
|
}
|
||||||
|
|
||||||
|
func fileStatFromInfo(fi os.FileInfo) (uint32, *FileStat) {
|
||||||
|
mtime := fi.ModTime().Unix()
|
||||||
|
atime := mtime
|
||||||
|
var flags uint32 = sshFileXferAttrSize |
|
||||||
|
sshFileXferAttrPermissions |
|
||||||
|
sshFileXferAttrACmodTime
|
||||||
|
|
||||||
|
fileStat := &FileStat{
|
||||||
|
Size: uint64(fi.Size()),
|
||||||
|
Mode: fromFileMode(fi.Mode()),
|
||||||
|
Mtime: uint32(mtime),
|
||||||
|
Atime: uint32(atime),
|
||||||
|
}
|
||||||
|
|
||||||
|
// os specific file stat decoding
|
||||||
|
fileStatFromInfoOs(fi, &flags, fileStat)
|
||||||
|
|
||||||
|
// The call above will include the sshFileXferAttrUIDGID in case
|
||||||
|
// the os.FileInfo can be casted to *syscall.Stat_t on unix.
|
||||||
|
// If fi implements FileInfoUidGid, retrieve Uid, Gid from it instead.
|
||||||
|
if fiExt, ok := fi.(FileInfoUidGid); ok {
|
||||||
|
flags |= sshFileXferAttrUIDGID
|
||||||
|
fileStat.UID = fiExt.Uid()
|
||||||
|
fileStat.GID = fiExt.Gid()
|
||||||
|
}
|
||||||
|
|
||||||
|
// if fi implements FileInfoExtendedData, retrieve extended data from it
|
||||||
|
if fiExt, ok := fi.(FileInfoExtendedData); ok {
|
||||||
|
fileStat.Extended = fiExt.Extended()
|
||||||
|
if len(fileStat.Extended) > 0 {
|
||||||
|
flags |= sshFileXferAttrExtended
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return flags, fileStat
|
||||||
|
}
|
12
vendor/github.com/pkg/sftp/attrs_stubs.go
generated
vendored
Normal file
12
vendor/github.com/pkg/sftp/attrs_stubs.go
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
//go:build plan9 || windows || android
|
||||||
|
// +build plan9 windows android
|
||||||
|
|
||||||
|
package sftp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
func fileStatFromInfoOs(fi os.FileInfo, flags *uint32, fileStat *FileStat) {
|
||||||
|
// todo
|
||||||
|
}
|
17
vendor/github.com/pkg/sftp/attrs_unix.go
generated
vendored
Normal file
17
vendor/github.com/pkg/sftp/attrs_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
||||||
|
//go:build darwin || dragonfly || freebsd || (!android && linux) || netbsd || openbsd || solaris || aix || js
|
||||||
|
// +build darwin dragonfly freebsd !android,linux netbsd openbsd solaris aix js
|
||||||
|
|
||||||
|
package sftp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
func fileStatFromInfoOs(fi os.FileInfo, flags *uint32, fileStat *FileStat) {
|
||||||
|
if statt, ok := fi.Sys().(*syscall.Stat_t); ok {
|
||||||
|
*flags |= sshFileXferAttrUIDGID
|
||||||
|
fileStat.UID = statt.Uid
|
||||||
|
fileStat.GID = statt.Gid
|
||||||
|
}
|
||||||
|
}
|
2030
vendor/github.com/pkg/sftp/client.go
generated
vendored
Normal file
2030
vendor/github.com/pkg/sftp/client.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
183
vendor/github.com/pkg/sftp/conn.go
generated
vendored
Normal file
183
vendor/github.com/pkg/sftp/conn.go
generated
vendored
Normal file
|
@ -0,0 +1,183 @@
|
||||||
|
package sftp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// conn implements a bidirectional channel on which client and server
|
||||||
|
// connections are multiplexed.
|
||||||
|
type conn struct {
|
||||||
|
io.Reader
|
||||||
|
io.WriteCloser
|
||||||
|
// this is the same allocator used in packet manager
|
||||||
|
alloc *allocator
|
||||||
|
sync.Mutex // used to serialise writes to sendPacket
|
||||||
|
}
|
||||||
|
|
||||||
|
// the orderID is used in server mode if the allocator is enabled.
|
||||||
|
// For the client mode just pass 0.
|
||||||
|
// It returns io.EOF if the connection is closed and
|
||||||
|
// there are no more packets to read.
|
||||||
|
func (c *conn) recvPacket(orderID uint32) (uint8, []byte, error) {
|
||||||
|
return recvPacket(c, c.alloc, orderID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *conn) sendPacket(m encoding.BinaryMarshaler) error {
|
||||||
|
c.Lock()
|
||||||
|
defer c.Unlock()
|
||||||
|
|
||||||
|
return sendPacket(c, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *conn) Close() error {
|
||||||
|
c.Lock()
|
||||||
|
defer c.Unlock()
|
||||||
|
return c.WriteCloser.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
type clientConn struct {
|
||||||
|
conn
|
||||||
|
wg sync.WaitGroup
|
||||||
|
|
||||||
|
sync.Mutex // protects inflight
|
||||||
|
inflight map[uint32]chan<- result // outstanding requests
|
||||||
|
|
||||||
|
closed chan struct{}
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait blocks until the conn has shut down, and return the error
|
||||||
|
// causing the shutdown. It can be called concurrently from multiple
|
||||||
|
// goroutines.
|
||||||
|
func (c *clientConn) Wait() error {
|
||||||
|
<-c.closed
|
||||||
|
return c.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the SFTP session.
|
||||||
|
func (c *clientConn) Close() error {
|
||||||
|
defer c.wg.Wait()
|
||||||
|
return c.conn.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// recv continuously reads from the server and forwards responses to the
|
||||||
|
// appropriate channel.
|
||||||
|
func (c *clientConn) recv() error {
|
||||||
|
defer c.conn.Close()
|
||||||
|
|
||||||
|
for {
|
||||||
|
typ, data, err := c.recvPacket(0)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
sid, _, err := unmarshalUint32Safe(data)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
ch, ok := c.getChannel(sid)
|
||||||
|
if !ok {
|
||||||
|
// This is an unexpected occurrence. Send the error
|
||||||
|
// back to all listeners so that they terminate
|
||||||
|
// gracefully.
|
||||||
|
return fmt.Errorf("sid not found: %d", sid)
|
||||||
|
}
|
||||||
|
|
||||||
|
ch <- result{typ: typ, data: data}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *clientConn) putChannel(ch chan<- result, sid uint32) bool {
|
||||||
|
c.Lock()
|
||||||
|
defer c.Unlock()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-c.closed:
|
||||||
|
// already closed with broadcastErr, return error on chan.
|
||||||
|
ch <- result{err: ErrSSHFxConnectionLost}
|
||||||
|
return false
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
c.inflight[sid] = ch
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *clientConn) getChannel(sid uint32) (chan<- result, bool) {
|
||||||
|
c.Lock()
|
||||||
|
defer c.Unlock()
|
||||||
|
|
||||||
|
ch, ok := c.inflight[sid]
|
||||||
|
delete(c.inflight, sid)
|
||||||
|
|
||||||
|
return ch, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// result captures the result of receiving the a packet from the server
|
||||||
|
type result struct {
|
||||||
|
typ byte
|
||||||
|
data []byte
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
type idmarshaler interface {
|
||||||
|
id() uint32
|
||||||
|
encoding.BinaryMarshaler
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *clientConn) sendPacket(ch chan result, p idmarshaler) (byte, []byte, error) {
|
||||||
|
if cap(ch) < 1 {
|
||||||
|
ch = make(chan result, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.dispatchRequest(ch, p)
|
||||||
|
s := <-ch
|
||||||
|
return s.typ, s.data, s.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// dispatchRequest should ideally only be called by race-detection tests outside of this file,
|
||||||
|
// where you have to ensure two packets are in flight sequentially after each other.
|
||||||
|
func (c *clientConn) dispatchRequest(ch chan<- result, p idmarshaler) {
|
||||||
|
sid := p.id()
|
||||||
|
|
||||||
|
if !c.putChannel(ch, sid) {
|
||||||
|
// already closed.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := c.conn.sendPacket(p); err != nil {
|
||||||
|
if ch, ok := c.getChannel(sid); ok {
|
||||||
|
ch <- result{err: err}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// broadcastErr sends an error to all goroutines waiting for a response.
|
||||||
|
func (c *clientConn) broadcastErr(err error) {
|
||||||
|
c.Lock()
|
||||||
|
defer c.Unlock()
|
||||||
|
|
||||||
|
bcastRes := result{err: ErrSSHFxConnectionLost}
|
||||||
|
for sid, ch := range c.inflight {
|
||||||
|
ch <- bcastRes
|
||||||
|
|
||||||
|
// Replace the chan in inflight,
|
||||||
|
// we have hijacked this chan,
|
||||||
|
// and this guarantees always-only-once sending.
|
||||||
|
c.inflight[sid] = make(chan<- result, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.err = err
|
||||||
|
close(c.closed)
|
||||||
|
}
|
||||||
|
|
||||||
|
type serverConn struct {
|
||||||
|
conn
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *serverConn) sendError(id uint32, err error) error {
|
||||||
|
return s.sendPacket(statusFromError(id, err))
|
||||||
|
}
|
10
vendor/github.com/pkg/sftp/debug.go
generated
vendored
Normal file
10
vendor/github.com/pkg/sftp/debug.go
generated
vendored
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
//go:build debug
|
||||||
|
// +build debug
|
||||||
|
|
||||||
|
package sftp
|
||||||
|
|
||||||
|
import "log"
|
||||||
|
|
||||||
|
func debug(fmt string, args ...interface{}) {
|
||||||
|
log.Printf(fmt, args...)
|
||||||
|
}
|
23
vendor/github.com/pkg/sftp/fuzz.go
generated
vendored
Normal file
23
vendor/github.com/pkg/sftp/fuzz.go
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
//go:build gofuzz
|
||||||
|
// +build gofuzz
|
||||||
|
|
||||||
|
package sftp
|
||||||
|
|
||||||
|
import "bytes"
|
||||||
|
|
||||||
|
type sinkfuzz struct{}
|
||||||
|
|
||||||
|
func (*sinkfuzz) Close() error { return nil }
|
||||||
|
func (*sinkfuzz) Write(p []byte) (int, error) { return len(p), nil }
|
||||||
|
|
||||||
|
var devnull = &sinkfuzz{}
|
||||||
|
|
||||||
|
// To run: go-fuzz-build && go-fuzz
|
||||||
|
func Fuzz(data []byte) int {
|
||||||
|
c, err := NewClientPipe(bytes.NewReader(data), devnull)
|
||||||
|
if err != nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
c.Close()
|
||||||
|
return 1
|
||||||
|
}
|
296
vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/attrs.go
generated
vendored
Normal file
296
vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/attrs.go
generated
vendored
Normal file
|
@ -0,0 +1,296 @@
|
||||||
|
package sshfx
|
||||||
|
|
||||||
|
// Attributes related flags.
|
||||||
|
const (
|
||||||
|
AttrSize = 1 << iota // SSH_FILEXFER_ATTR_SIZE
|
||||||
|
AttrUIDGID // SSH_FILEXFER_ATTR_UIDGID
|
||||||
|
AttrPermissions // SSH_FILEXFER_ATTR_PERMISSIONS
|
||||||
|
AttrACModTime // SSH_FILEXFER_ACMODTIME
|
||||||
|
|
||||||
|
AttrExtended = 1 << 31 // SSH_FILEXFER_ATTR_EXTENDED
|
||||||
|
)
|
||||||
|
|
||||||
|
// Attributes defines the file attributes type defined in draft-ietf-secsh-filexfer-02
|
||||||
|
//
|
||||||
|
// Defined in: https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt#section-5
|
||||||
|
type Attributes struct {
|
||||||
|
Flags uint32
|
||||||
|
|
||||||
|
// AttrSize
|
||||||
|
Size uint64
|
||||||
|
|
||||||
|
// AttrUIDGID
|
||||||
|
UID uint32
|
||||||
|
GID uint32
|
||||||
|
|
||||||
|
// AttrPermissions
|
||||||
|
Permissions FileMode
|
||||||
|
|
||||||
|
// AttrACmodTime
|
||||||
|
ATime uint32
|
||||||
|
MTime uint32
|
||||||
|
|
||||||
|
// AttrExtended
|
||||||
|
ExtendedAttributes []ExtendedAttribute
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSize returns the Size field and a bool that is true if and only if the value is valid/defined.
|
||||||
|
func (a *Attributes) GetSize() (size uint64, ok bool) {
|
||||||
|
return a.Size, a.Flags&AttrSize != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSize is a convenience function that sets the Size field,
|
||||||
|
// and marks the field as valid/defined in Flags.
|
||||||
|
func (a *Attributes) SetSize(size uint64) {
|
||||||
|
a.Flags |= AttrSize
|
||||||
|
a.Size = size
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetUIDGID returns the UID and GID fields and a bool that is true if and only if the values are valid/defined.
|
||||||
|
func (a *Attributes) GetUIDGID() (uid, gid uint32, ok bool) {
|
||||||
|
return a.UID, a.GID, a.Flags&AttrUIDGID != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUIDGID is a convenience function that sets the UID and GID fields,
|
||||||
|
// and marks the fields as valid/defined in Flags.
|
||||||
|
func (a *Attributes) SetUIDGID(uid, gid uint32) {
|
||||||
|
a.Flags |= AttrUIDGID
|
||||||
|
a.UID = uid
|
||||||
|
a.GID = gid
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPermissions returns the Permissions field and a bool that is true if and only if the value is valid/defined.
|
||||||
|
func (a *Attributes) GetPermissions() (perms FileMode, ok bool) {
|
||||||
|
return a.Permissions, a.Flags&AttrPermissions != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPermissions is a convenience function that sets the Permissions field,
|
||||||
|
// and marks the field as valid/defined in Flags.
|
||||||
|
func (a *Attributes) SetPermissions(perms FileMode) {
|
||||||
|
a.Flags |= AttrPermissions
|
||||||
|
a.Permissions = perms
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetACModTime returns the ATime and MTime fields and a bool that is true if and only if the values are valid/defined.
|
||||||
|
func (a *Attributes) GetACModTime() (atime, mtime uint32, ok bool) {
|
||||||
|
return a.ATime, a.MTime, a.Flags&AttrACModTime != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetACModTime is a convenience function that sets the ATime and MTime fields,
|
||||||
|
// and marks the fields as valid/defined in Flags.
|
||||||
|
func (a *Attributes) SetACModTime(atime, mtime uint32) {
|
||||||
|
a.Flags |= AttrACModTime
|
||||||
|
a.ATime = atime
|
||||||
|
a.MTime = mtime
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len returns the number of bytes a would marshal into.
|
||||||
|
func (a *Attributes) Len() int {
|
||||||
|
length := 4
|
||||||
|
|
||||||
|
if a.Flags&AttrSize != 0 {
|
||||||
|
length += 8
|
||||||
|
}
|
||||||
|
|
||||||
|
if a.Flags&AttrUIDGID != 0 {
|
||||||
|
length += 4 + 4
|
||||||
|
}
|
||||||
|
|
||||||
|
if a.Flags&AttrPermissions != 0 {
|
||||||
|
length += 4
|
||||||
|
}
|
||||||
|
|
||||||
|
if a.Flags&AttrACModTime != 0 {
|
||||||
|
length += 4 + 4
|
||||||
|
}
|
||||||
|
|
||||||
|
if a.Flags&AttrExtended != 0 {
|
||||||
|
length += 4
|
||||||
|
|
||||||
|
for _, ext := range a.ExtendedAttributes {
|
||||||
|
length += ext.Len()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return length
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalInto marshals e onto the end of the given Buffer.
|
||||||
|
func (a *Attributes) MarshalInto(buf *Buffer) {
|
||||||
|
buf.AppendUint32(a.Flags)
|
||||||
|
|
||||||
|
if a.Flags&AttrSize != 0 {
|
||||||
|
buf.AppendUint64(a.Size)
|
||||||
|
}
|
||||||
|
|
||||||
|
if a.Flags&AttrUIDGID != 0 {
|
||||||
|
buf.AppendUint32(a.UID)
|
||||||
|
buf.AppendUint32(a.GID)
|
||||||
|
}
|
||||||
|
|
||||||
|
if a.Flags&AttrPermissions != 0 {
|
||||||
|
buf.AppendUint32(uint32(a.Permissions))
|
||||||
|
}
|
||||||
|
|
||||||
|
if a.Flags&AttrACModTime != 0 {
|
||||||
|
buf.AppendUint32(a.ATime)
|
||||||
|
buf.AppendUint32(a.MTime)
|
||||||
|
}
|
||||||
|
|
||||||
|
if a.Flags&AttrExtended != 0 {
|
||||||
|
buf.AppendUint32(uint32(len(a.ExtendedAttributes)))
|
||||||
|
|
||||||
|
for _, ext := range a.ExtendedAttributes {
|
||||||
|
ext.MarshalInto(buf)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalBinary returns a as the binary encoding of a.
|
||||||
|
func (a *Attributes) MarshalBinary() ([]byte, error) {
|
||||||
|
buf := NewBuffer(make([]byte, 0, a.Len()))
|
||||||
|
a.MarshalInto(buf)
|
||||||
|
return buf.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalFrom unmarshals an Attributes from the given Buffer into e.
|
||||||
|
//
|
||||||
|
// NOTE: The values of fields not covered in the a.Flags are explicitly undefined.
|
||||||
|
func (a *Attributes) UnmarshalFrom(buf *Buffer) (err error) {
|
||||||
|
flags := buf.ConsumeUint32()
|
||||||
|
|
||||||
|
return a.XXX_UnmarshalByFlags(flags, buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
// XXX_UnmarshalByFlags uses the pre-existing a.Flags field to determine which fields to decode.
|
||||||
|
// DO NOT USE THIS: it is an anti-corruption function to implement existing internal usage in pkg/sftp.
|
||||||
|
// This function is not a part of any compatibility promise.
|
||||||
|
func (a *Attributes) XXX_UnmarshalByFlags(flags uint32, buf *Buffer) (err error) {
|
||||||
|
a.Flags = flags
|
||||||
|
|
||||||
|
// Short-circuit dummy attributes.
|
||||||
|
if a.Flags == 0 {
|
||||||
|
return buf.Err
|
||||||
|
}
|
||||||
|
|
||||||
|
if a.Flags&AttrSize != 0 {
|
||||||
|
a.Size = buf.ConsumeUint64()
|
||||||
|
}
|
||||||
|
|
||||||
|
if a.Flags&AttrUIDGID != 0 {
|
||||||
|
a.UID = buf.ConsumeUint32()
|
||||||
|
a.GID = buf.ConsumeUint32()
|
||||||
|
}
|
||||||
|
|
||||||
|
if a.Flags&AttrPermissions != 0 {
|
||||||
|
a.Permissions = FileMode(buf.ConsumeUint32())
|
||||||
|
}
|
||||||
|
|
||||||
|
if a.Flags&AttrACModTime != 0 {
|
||||||
|
a.ATime = buf.ConsumeUint32()
|
||||||
|
a.MTime = buf.ConsumeUint32()
|
||||||
|
}
|
||||||
|
|
||||||
|
if a.Flags&AttrExtended != 0 {
|
||||||
|
count := buf.ConsumeCount()
|
||||||
|
|
||||||
|
a.ExtendedAttributes = make([]ExtendedAttribute, count)
|
||||||
|
for i := range a.ExtendedAttributes {
|
||||||
|
a.ExtendedAttributes[i].UnmarshalFrom(buf)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.Err
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalBinary decodes the binary encoding of Attributes into e.
|
||||||
|
func (a *Attributes) UnmarshalBinary(data []byte) error {
|
||||||
|
return a.UnmarshalFrom(NewBuffer(data))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExtendedAttribute defines the extended file attribute type defined in draft-ietf-secsh-filexfer-02
|
||||||
|
//
|
||||||
|
// Defined in: https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt#section-5
|
||||||
|
type ExtendedAttribute struct {
|
||||||
|
Type string
|
||||||
|
Data string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len returns the number of bytes e would marshal into.
|
||||||
|
func (e *ExtendedAttribute) Len() int {
|
||||||
|
return 4 + len(e.Type) + 4 + len(e.Data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalInto marshals e onto the end of the given Buffer.
|
||||||
|
func (e *ExtendedAttribute) MarshalInto(buf *Buffer) {
|
||||||
|
buf.AppendString(e.Type)
|
||||||
|
buf.AppendString(e.Data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalBinary returns e as the binary encoding of e.
|
||||||
|
func (e *ExtendedAttribute) MarshalBinary() ([]byte, error) {
|
||||||
|
buf := NewBuffer(make([]byte, 0, e.Len()))
|
||||||
|
e.MarshalInto(buf)
|
||||||
|
return buf.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalFrom unmarshals an ExtendedAattribute from the given Buffer into e.
|
||||||
|
func (e *ExtendedAttribute) UnmarshalFrom(buf *Buffer) (err error) {
|
||||||
|
*e = ExtendedAttribute{
|
||||||
|
Type: buf.ConsumeString(),
|
||||||
|
Data: buf.ConsumeString(),
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.Err
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalBinary decodes the binary encoding of ExtendedAttribute into e.
|
||||||
|
func (e *ExtendedAttribute) UnmarshalBinary(data []byte) error {
|
||||||
|
return e.UnmarshalFrom(NewBuffer(data))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NameEntry implements the SSH_FXP_NAME repeated data type from draft-ietf-secsh-filexfer-02
|
||||||
|
//
|
||||||
|
// This type is incompatible with versions 4 or higher.
|
||||||
|
type NameEntry struct {
|
||||||
|
Filename string
|
||||||
|
Longname string
|
||||||
|
Attrs Attributes
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len returns the number of bytes e would marshal into.
|
||||||
|
func (e *NameEntry) Len() int {
|
||||||
|
return 4 + len(e.Filename) + 4 + len(e.Longname) + e.Attrs.Len()
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalInto marshals e onto the end of the given Buffer.
|
||||||
|
func (e *NameEntry) MarshalInto(buf *Buffer) {
|
||||||
|
buf.AppendString(e.Filename)
|
||||||
|
buf.AppendString(e.Longname)
|
||||||
|
|
||||||
|
e.Attrs.MarshalInto(buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalBinary returns e as the binary encoding of e.
|
||||||
|
func (e *NameEntry) MarshalBinary() ([]byte, error) {
|
||||||
|
buf := NewBuffer(make([]byte, 0, e.Len()))
|
||||||
|
e.MarshalInto(buf)
|
||||||
|
return buf.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalFrom unmarshals an NameEntry from the given Buffer into e.
|
||||||
|
//
|
||||||
|
// NOTE: The values of fields not covered in the a.Flags are explicitly undefined.
|
||||||
|
func (e *NameEntry) UnmarshalFrom(buf *Buffer) (err error) {
|
||||||
|
*e = NameEntry{
|
||||||
|
Filename: buf.ConsumeString(),
|
||||||
|
Longname: buf.ConsumeString(),
|
||||||
|
}
|
||||||
|
|
||||||
|
return e.Attrs.UnmarshalFrom(buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalBinary decodes the binary encoding of NameEntry into e.
|
||||||
|
func (e *NameEntry) UnmarshalBinary(data []byte) error {
|
||||||
|
return e.UnmarshalFrom(NewBuffer(data))
|
||||||
|
}
|
340
vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/buffer.go
generated
vendored
Normal file
340
vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/buffer.go
generated
vendored
Normal file
|
@ -0,0 +1,340 @@
|
||||||
|
package sshfx
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Various encoding errors.
|
||||||
|
var (
|
||||||
|
ErrShortPacket = errors.New("packet too short")
|
||||||
|
ErrLongPacket = errors.New("packet too long")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Buffer wraps up the various encoding details of the SSH format.
|
||||||
|
//
|
||||||
|
// Data types are encoded as per section 4 from https://tools.ietf.org/html/draft-ietf-secsh-architecture-09#page-8
|
||||||
|
type Buffer struct {
|
||||||
|
b []byte
|
||||||
|
off int
|
||||||
|
Err error
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBuffer creates and initializes a new buffer using buf as its initial contents.
|
||||||
|
// The new buffer takes ownership of buf, and the caller should not use buf after this call.
|
||||||
|
//
|
||||||
|
// In most cases, new(Buffer) (or just declaring a Buffer variable) is sufficient to initialize a Buffer.
|
||||||
|
func NewBuffer(buf []byte) *Buffer {
|
||||||
|
return &Buffer{
|
||||||
|
b: buf,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMarshalBuffer creates a new Buffer ready to start marshaling a Packet into.
|
||||||
|
// It preallocates enough space for uint32(length), uint8(type), uint32(request-id) and size more bytes.
|
||||||
|
func NewMarshalBuffer(size int) *Buffer {
|
||||||
|
return NewBuffer(make([]byte, 4+1+4+size))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bytes returns a slice of length b.Len() holding the unconsumed bytes in the Buffer.
|
||||||
|
// The slice is valid for use only until the next buffer modification
|
||||||
|
// (that is, only until the next call to an Append or Consume method).
|
||||||
|
func (b *Buffer) Bytes() []byte {
|
||||||
|
return b.b[b.off:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len returns the number of unconsumed bytes in the buffer.
|
||||||
|
func (b *Buffer) Len() int { return len(b.b) - b.off }
|
||||||
|
|
||||||
|
// Cap returns the capacity of the buffer’s underlying byte slice,
|
||||||
|
// that is, the total space allocated for the buffer’s data.
|
||||||
|
func (b *Buffer) Cap() int { return cap(b.b) }
|
||||||
|
|
||||||
|
// Reset resets the buffer to be empty, but it retains the underlying storage for use by future Appends.
|
||||||
|
func (b *Buffer) Reset() {
|
||||||
|
*b = Buffer{
|
||||||
|
b: b.b[:0],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartPacket resets and initializes the buffer to be ready to start marshaling a packet into.
|
||||||
|
// It truncates the buffer, reserves space for uint32(length), then appends the given packetType and requestID.
|
||||||
|
func (b *Buffer) StartPacket(packetType PacketType, requestID uint32) {
|
||||||
|
*b = Buffer{
|
||||||
|
b: append(b.b[:0], make([]byte, 4)...),
|
||||||
|
}
|
||||||
|
|
||||||
|
b.AppendUint8(uint8(packetType))
|
||||||
|
b.AppendUint32(requestID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Packet finalizes the packet started from StartPacket.
|
||||||
|
// It is expected that this will end the ownership of the underlying byte-slice,
|
||||||
|
// and so the returned byte-slices may be reused the same as any other byte-slice,
|
||||||
|
// the caller should not use this buffer after this call.
|
||||||
|
//
|
||||||
|
// It writes the packet body length into the first four bytes of the buffer in network byte order (big endian).
|
||||||
|
// The packet body length is the length of this buffer less the 4-byte length itself, plus the length of payload.
|
||||||
|
//
|
||||||
|
// It is assumed that no Consume methods have been called on this buffer,
|
||||||
|
// and so it returns the whole underlying slice.
|
||||||
|
func (b *Buffer) Packet(payload []byte) (header, payloadPassThru []byte, err error) {
|
||||||
|
b.PutLength(len(b.b) - 4 + len(payload))
|
||||||
|
|
||||||
|
return b.b, payload, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConsumeUint8 consumes a single byte from the buffer.
|
||||||
|
// If the buffer does not have enough data, it will set Err to ErrShortPacket.
|
||||||
|
func (b *Buffer) ConsumeUint8() uint8 {
|
||||||
|
if b.Err != nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.Len() < 1 {
|
||||||
|
b.off = len(b.b)
|
||||||
|
b.Err = ErrShortPacket
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
var v uint8
|
||||||
|
v, b.off = b.b[b.off], b.off+1
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendUint8 appends a single byte into the buffer.
|
||||||
|
func (b *Buffer) AppendUint8(v uint8) {
|
||||||
|
b.b = append(b.b, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConsumeBool consumes a single byte from the buffer, and returns true if that byte is non-zero.
|
||||||
|
// If the buffer does not have enough data, it will set Err to ErrShortPacket.
|
||||||
|
func (b *Buffer) ConsumeBool() bool {
|
||||||
|
return b.ConsumeUint8() != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendBool appends a single bool into the buffer.
|
||||||
|
// It encodes it as a single byte, with false as 0, and true as 1.
|
||||||
|
func (b *Buffer) AppendBool(v bool) {
|
||||||
|
if v {
|
||||||
|
b.AppendUint8(1)
|
||||||
|
} else {
|
||||||
|
b.AppendUint8(0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConsumeUint16 consumes a single uint16 from the buffer, in network byte order (big-endian).
|
||||||
|
// If the buffer does not have enough data, it will set Err to ErrShortPacket.
|
||||||
|
func (b *Buffer) ConsumeUint16() uint16 {
|
||||||
|
if b.Err != nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.Len() < 2 {
|
||||||
|
b.off = len(b.b)
|
||||||
|
b.Err = ErrShortPacket
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
v := binary.BigEndian.Uint16(b.b[b.off:])
|
||||||
|
b.off += 2
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendUint16 appends single uint16 into the buffer, in network byte order (big-endian).
|
||||||
|
func (b *Buffer) AppendUint16(v uint16) {
|
||||||
|
b.b = append(b.b,
|
||||||
|
byte(v>>8),
|
||||||
|
byte(v>>0),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// unmarshalUint32 is used internally to read the packet length.
|
||||||
|
// It is unsafe, and so not exported.
|
||||||
|
// Even within this package, its use should be avoided.
|
||||||
|
func unmarshalUint32(b []byte) uint32 {
|
||||||
|
return binary.BigEndian.Uint32(b[:4])
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConsumeUint32 consumes a single uint32 from the buffer, in network byte order (big-endian).
|
||||||
|
// If the buffer does not have enough data, it will set Err to ErrShortPacket.
|
||||||
|
func (b *Buffer) ConsumeUint32() uint32 {
|
||||||
|
if b.Err != nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.Len() < 4 {
|
||||||
|
b.off = len(b.b)
|
||||||
|
b.Err = ErrShortPacket
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
v := binary.BigEndian.Uint32(b.b[b.off:])
|
||||||
|
b.off += 4
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendUint32 appends a single uint32 into the buffer, in network byte order (big-endian).
|
||||||
|
func (b *Buffer) AppendUint32(v uint32) {
|
||||||
|
b.b = append(b.b,
|
||||||
|
byte(v>>24),
|
||||||
|
byte(v>>16),
|
||||||
|
byte(v>>8),
|
||||||
|
byte(v>>0),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConsumeCount consumes a single uint32 count from the buffer, in network byte order (big-endian) as an int.
|
||||||
|
// If the buffer does not have enough data, it will set Err to ErrShortPacket.
|
||||||
|
func (b *Buffer) ConsumeCount() int {
|
||||||
|
return int(b.ConsumeUint32())
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendCount appends a single int length as a uint32 into the buffer, in network byte order (big-endian).
|
||||||
|
func (b *Buffer) AppendCount(v int) {
|
||||||
|
b.AppendUint32(uint32(v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConsumeUint64 consumes a single uint64 from the buffer, in network byte order (big-endian).
|
||||||
|
// If the buffer does not have enough data, it will set Err to ErrShortPacket.
|
||||||
|
func (b *Buffer) ConsumeUint64() uint64 {
|
||||||
|
if b.Err != nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.Len() < 8 {
|
||||||
|
b.off = len(b.b)
|
||||||
|
b.Err = ErrShortPacket
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
v := binary.BigEndian.Uint64(b.b[b.off:])
|
||||||
|
b.off += 8
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendUint64 appends a single uint64 into the buffer, in network byte order (big-endian).
|
||||||
|
func (b *Buffer) AppendUint64(v uint64) {
|
||||||
|
b.b = append(b.b,
|
||||||
|
byte(v>>56),
|
||||||
|
byte(v>>48),
|
||||||
|
byte(v>>40),
|
||||||
|
byte(v>>32),
|
||||||
|
byte(v>>24),
|
||||||
|
byte(v>>16),
|
||||||
|
byte(v>>8),
|
||||||
|
byte(v>>0),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConsumeInt64 consumes a single int64 from the buffer, in network byte order (big-endian) with two’s complement.
|
||||||
|
// If the buffer does not have enough data, it will set Err to ErrShortPacket.
|
||||||
|
func (b *Buffer) ConsumeInt64() int64 {
|
||||||
|
return int64(b.ConsumeUint64())
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendInt64 appends a single int64 into the buffer, in network byte order (big-endian) with two’s complement.
|
||||||
|
func (b *Buffer) AppendInt64(v int64) {
|
||||||
|
b.AppendUint64(uint64(v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConsumeByteSlice consumes a single string of raw binary data from the buffer.
|
||||||
|
// A string is a uint32 length, followed by that number of raw bytes.
|
||||||
|
// If the buffer does not have enough data, or defines a length larger than available, it will set Err to ErrShortPacket.
|
||||||
|
//
|
||||||
|
// The returned slice aliases the buffer contents, and is valid only as long as the buffer is not reused
|
||||||
|
// (that is, only until the next call to Reset, PutLength, StartPacket, or UnmarshalBinary).
|
||||||
|
//
|
||||||
|
// In no case will any Consume calls return overlapping slice aliases,
|
||||||
|
// and Append calls are guaranteed to not disturb this slice alias.
|
||||||
|
func (b *Buffer) ConsumeByteSlice() []byte {
|
||||||
|
length := int(b.ConsumeUint32())
|
||||||
|
if b.Err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.Len() < length || length < 0 {
|
||||||
|
b.off = len(b.b)
|
||||||
|
b.Err = ErrShortPacket
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
v := b.b[b.off:]
|
||||||
|
if len(v) > length || cap(v) > length {
|
||||||
|
v = v[:length:length]
|
||||||
|
}
|
||||||
|
b.off += int(length)
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConsumeByteSliceCopy consumes a single string of raw binary data as a copy from the buffer.
|
||||||
|
// A string is a uint32 length, followed by that number of raw bytes.
|
||||||
|
// If the buffer does not have enough data, or defines a length larger than available, it will set Err to ErrShortPacket.
|
||||||
|
//
|
||||||
|
// The returned slice does not alias any buffer contents,
|
||||||
|
// and will therefore be valid even if the buffer is later reused.
|
||||||
|
//
|
||||||
|
// If hint has sufficient capacity to hold the data, it will be reused and overwritten,
|
||||||
|
// otherwise a new backing slice will be allocated and returned.
|
||||||
|
func (b *Buffer) ConsumeByteSliceCopy(hint []byte) []byte {
|
||||||
|
data := b.ConsumeByteSlice()
|
||||||
|
|
||||||
|
if grow := len(data) - len(hint); grow > 0 {
|
||||||
|
hint = append(hint, make([]byte, grow)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
n := copy(hint, data)
|
||||||
|
hint = hint[:n]
|
||||||
|
return hint
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendByteSlice appends a single string of raw binary data into the buffer.
|
||||||
|
// A string is a uint32 length, followed by that number of raw bytes.
|
||||||
|
func (b *Buffer) AppendByteSlice(v []byte) {
|
||||||
|
b.AppendUint32(uint32(len(v)))
|
||||||
|
b.b = append(b.b, v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConsumeString consumes a single string of binary data from the buffer.
|
||||||
|
// A string is a uint32 length, followed by that number of raw bytes.
|
||||||
|
// If the buffer does not have enough data, or defines a length larger than available, it will set Err to ErrShortPacket.
|
||||||
|
//
|
||||||
|
// NOTE: Go implicitly assumes that strings contain UTF-8 encoded data.
|
||||||
|
// All caveats on using arbitrary binary data in Go strings applies.
|
||||||
|
func (b *Buffer) ConsumeString() string {
|
||||||
|
return string(b.ConsumeByteSlice())
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendString appends a single string of binary data into the buffer.
|
||||||
|
// A string is a uint32 length, followed by that number of raw bytes.
|
||||||
|
func (b *Buffer) AppendString(v string) {
|
||||||
|
b.AppendByteSlice([]byte(v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutLength writes the given size into the first four bytes of the buffer in network byte order (big endian).
|
||||||
|
func (b *Buffer) PutLength(size int) {
|
||||||
|
if len(b.b) < 4 {
|
||||||
|
b.b = append(b.b, make([]byte, 4-len(b.b))...)
|
||||||
|
}
|
||||||
|
|
||||||
|
binary.BigEndian.PutUint32(b.b, uint32(size))
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalBinary returns a clone of the full internal buffer.
|
||||||
|
func (b *Buffer) MarshalBinary() ([]byte, error) {
|
||||||
|
clone := make([]byte, len(b.b))
|
||||||
|
n := copy(clone, b.b)
|
||||||
|
return clone[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalBinary sets the internal buffer of b to be a clone of data, and zeros the internal offset.
|
||||||
|
func (b *Buffer) UnmarshalBinary(data []byte) error {
|
||||||
|
if grow := len(data) - len(b.b); grow > 0 {
|
||||||
|
b.b = append(b.b, make([]byte, grow)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
n := copy(b.b, data)
|
||||||
|
b.b = b.b[:n]
|
||||||
|
b.off = 0
|
||||||
|
return nil
|
||||||
|
}
|
143
vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extended_packets.go
generated
vendored
Normal file
143
vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extended_packets.go
generated
vendored
Normal file
|
@ -0,0 +1,143 @@
|
||||||
|
package sshfx
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ExtendedData aliases the untyped interface composition of encoding.BinaryMarshaler and encoding.BinaryUnmarshaler.
|
||||||
|
type ExtendedData = interface {
|
||||||
|
encoding.BinaryMarshaler
|
||||||
|
encoding.BinaryUnmarshaler
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExtendedDataConstructor defines a function that returns a new(ArbitraryExtendedPacket).
|
||||||
|
type ExtendedDataConstructor func() ExtendedData
|
||||||
|
|
||||||
|
var extendedPacketTypes = struct {
|
||||||
|
mu sync.RWMutex
|
||||||
|
constructors map[string]ExtendedDataConstructor
|
||||||
|
}{
|
||||||
|
constructors: make(map[string]ExtendedDataConstructor),
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterExtendedPacketType defines a specific ExtendedDataConstructor for the given extension string.
|
||||||
|
func RegisterExtendedPacketType(extension string, constructor ExtendedDataConstructor) {
|
||||||
|
extendedPacketTypes.mu.Lock()
|
||||||
|
defer extendedPacketTypes.mu.Unlock()
|
||||||
|
|
||||||
|
if _, exist := extendedPacketTypes.constructors[extension]; exist {
|
||||||
|
panic("encoding/ssh/filexfer: multiple registration of extended packet type " + extension)
|
||||||
|
}
|
||||||
|
|
||||||
|
extendedPacketTypes.constructors[extension] = constructor
|
||||||
|
}
|
||||||
|
|
||||||
|
func newExtendedPacket(extension string) ExtendedData {
|
||||||
|
extendedPacketTypes.mu.RLock()
|
||||||
|
defer extendedPacketTypes.mu.RUnlock()
|
||||||
|
|
||||||
|
if f := extendedPacketTypes.constructors[extension]; f != nil {
|
||||||
|
return f()
|
||||||
|
}
|
||||||
|
|
||||||
|
return new(Buffer)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExtendedPacket defines the SSH_FXP_CLOSE packet.
|
||||||
|
type ExtendedPacket struct {
|
||||||
|
ExtendedRequest string
|
||||||
|
|
||||||
|
Data ExtendedData
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type returns the SSH_FXP_xy value associated with this packet type.
|
||||||
|
func (p *ExtendedPacket) Type() PacketType {
|
||||||
|
return PacketTypeExtended
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalPacket returns p as a two-part binary encoding of p.
|
||||||
|
//
|
||||||
|
// The Data is marshaled into binary, and returned as the payload.
|
||||||
|
func (p *ExtendedPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
|
||||||
|
buf := NewBuffer(b)
|
||||||
|
if buf.Cap() < 9 {
|
||||||
|
size := 4 + len(p.ExtendedRequest) // string(extended-request)
|
||||||
|
buf = NewMarshalBuffer(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.StartPacket(PacketTypeExtended, reqid)
|
||||||
|
buf.AppendString(p.ExtendedRequest)
|
||||||
|
|
||||||
|
if p.Data != nil {
|
||||||
|
payload, err = p.Data.MarshalBinary()
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.Packet(payload)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalPacketBody unmarshals the packet body from the given Buffer.
|
||||||
|
// It is assumed that the uint32(request-id) has already been consumed.
|
||||||
|
//
|
||||||
|
// If p.Data is nil, and the extension has been registered, a new type will be made from the registration.
|
||||||
|
// If the extension has not been registered, then a new Buffer will be allocated.
|
||||||
|
// Then the request-specific-data will be unmarshaled from the rest of the buffer.
|
||||||
|
func (p *ExtendedPacket) UnmarshalPacketBody(buf *Buffer) (err error) {
|
||||||
|
p.ExtendedRequest = buf.ConsumeString()
|
||||||
|
if buf.Err != nil {
|
||||||
|
return buf.Err
|
||||||
|
}
|
||||||
|
|
||||||
|
if p.Data == nil {
|
||||||
|
p.Data = newExtendedPacket(p.ExtendedRequest)
|
||||||
|
}
|
||||||
|
|
||||||
|
return p.Data.UnmarshalBinary(buf.Bytes())
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExtendedReplyPacket defines the SSH_FXP_CLOSE packet.
|
||||||
|
type ExtendedReplyPacket struct {
|
||||||
|
Data ExtendedData
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type returns the SSH_FXP_xy value associated with this packet type.
|
||||||
|
func (p *ExtendedReplyPacket) Type() PacketType {
|
||||||
|
return PacketTypeExtendedReply
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalPacket returns p as a two-part binary encoding of p.
|
||||||
|
//
|
||||||
|
// The Data is marshaled into binary, and returned as the payload.
|
||||||
|
func (p *ExtendedReplyPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
|
||||||
|
buf := NewBuffer(b)
|
||||||
|
if buf.Cap() < 9 {
|
||||||
|
buf = NewMarshalBuffer(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.StartPacket(PacketTypeExtendedReply, reqid)
|
||||||
|
|
||||||
|
if p.Data != nil {
|
||||||
|
payload, err = p.Data.MarshalBinary()
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.Packet(payload)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalPacketBody unmarshals the packet body from the given Buffer.
|
||||||
|
// It is assumed that the uint32(request-id) has already been consumed.
|
||||||
|
//
|
||||||
|
// If p.Data is nil, and there is request-specific-data,
|
||||||
|
// then the request-specific-data will be wrapped in a Buffer and assigned to p.Data.
|
||||||
|
func (p *ExtendedReplyPacket) UnmarshalPacketBody(buf *Buffer) (err error) {
|
||||||
|
if p.Data == nil {
|
||||||
|
p.Data = new(Buffer)
|
||||||
|
}
|
||||||
|
|
||||||
|
return p.Data.UnmarshalBinary(buf.Bytes())
|
||||||
|
}
|
43
vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extensions.go
generated
vendored
Normal file
43
vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extensions.go
generated
vendored
Normal file
|
@ -0,0 +1,43 @@
|
||||||
|
package sshfx
|
||||||
|
|
||||||
|
// ExtensionPair defines the extension-pair type defined in draft-ietf-secsh-filexfer-13.
|
||||||
|
// This type is backwards-compatible with how draft-ietf-secsh-filexfer-02 defines extensions.
|
||||||
|
//
|
||||||
|
// Defined in: https://tools.ietf.org/html/draft-ietf-secsh-filexfer-13#section-4.2
|
||||||
|
type ExtensionPair struct {
|
||||||
|
Name string
|
||||||
|
Data string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len returns the number of bytes e would marshal into.
|
||||||
|
func (e *ExtensionPair) Len() int {
|
||||||
|
return 4 + len(e.Name) + 4 + len(e.Data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalInto marshals e onto the end of the given Buffer.
|
||||||
|
func (e *ExtensionPair) MarshalInto(buf *Buffer) {
|
||||||
|
buf.AppendString(e.Name)
|
||||||
|
buf.AppendString(e.Data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalBinary returns e as the binary encoding of e.
|
||||||
|
func (e *ExtensionPair) MarshalBinary() ([]byte, error) {
|
||||||
|
buf := NewBuffer(make([]byte, 0, e.Len()))
|
||||||
|
e.MarshalInto(buf)
|
||||||
|
return buf.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalFrom unmarshals an ExtensionPair from the given Buffer into e.
|
||||||
|
func (e *ExtensionPair) UnmarshalFrom(buf *Buffer) (err error) {
|
||||||
|
*e = ExtensionPair{
|
||||||
|
Name: buf.ConsumeString(),
|
||||||
|
Data: buf.ConsumeString(),
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.Err
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalBinary decodes the binary encoding of ExtensionPair into e.
|
||||||
|
func (e *ExtensionPair) UnmarshalBinary(data []byte) error {
|
||||||
|
return e.UnmarshalFrom(NewBuffer(data))
|
||||||
|
}
|
54
vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/filexfer.go
generated
vendored
Normal file
54
vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/filexfer.go
generated
vendored
Normal file
|
@ -0,0 +1,54 @@
|
||||||
|
// Package sshfx implements the wire encoding for secsh-filexfer as described in https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt
|
||||||
|
package sshfx
|
||||||
|
|
||||||
|
// PacketMarshaller narrowly defines packets that will only be transmitted.
|
||||||
|
//
|
||||||
|
// ExtendedPacket types will often only implement this interface,
|
||||||
|
// since decoding the whole packet body of an ExtendedPacket can only be done dependent on the ExtendedRequest field.
|
||||||
|
type PacketMarshaller interface {
|
||||||
|
// MarshalPacket is the primary intended way to encode a packet.
|
||||||
|
// The request-id for the packet is set from reqid.
|
||||||
|
//
|
||||||
|
// An optional buffer may be given in b.
|
||||||
|
// If the buffer has a minimum capacity, it shall be truncated and used to marshal the header into.
|
||||||
|
// The minimum capacity for the packet must be a constant expression, and should be at least 9.
|
||||||
|
//
|
||||||
|
// It shall return the main body of the encoded packet in header,
|
||||||
|
// and may optionally return an additional payload to be written immediately after the header.
|
||||||
|
//
|
||||||
|
// It shall encode in the first 4-bytes of the header the proper length of the rest of the header+payload.
|
||||||
|
MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Packet defines the behavior of a full generic SFTP packet.
|
||||||
|
//
|
||||||
|
// InitPacket, and VersionPacket are not generic SFTP packets, and instead implement (Un)MarshalBinary.
|
||||||
|
//
|
||||||
|
// ExtendedPacket types should not iplement this interface,
|
||||||
|
// since decoding the whole packet body of an ExtendedPacket can only be done dependent on the ExtendedRequest field.
|
||||||
|
type Packet interface {
|
||||||
|
PacketMarshaller
|
||||||
|
|
||||||
|
// Type returns the SSH_FXP_xy value associated with the specific packet.
|
||||||
|
Type() PacketType
|
||||||
|
|
||||||
|
// UnmarshalPacketBody decodes a packet body from the given Buffer.
|
||||||
|
// It is assumed that the common header values of the length, type and request-id have already been consumed.
|
||||||
|
//
|
||||||
|
// Implementations should not alias the given Buffer,
|
||||||
|
// instead they can consider prepopulating an internal buffer as a hint,
|
||||||
|
// and copying into that buffer if it has sufficient length.
|
||||||
|
UnmarshalPacketBody(buf *Buffer) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// ComposePacket converts returns from MarshalPacket into an equivalent call to MarshalBinary.
|
||||||
|
func ComposePacket(header, payload []byte, err error) ([]byte, error) {
|
||||||
|
return append(header, payload...), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Default length values,
|
||||||
|
// Defined in draft-ietf-secsh-filexfer-02 section 3.
|
||||||
|
const (
|
||||||
|
DefaultMaxPacketLength = 34000
|
||||||
|
DefaultMaxDataLength = 32768
|
||||||
|
)
|
147
vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fx.go
generated
vendored
Normal file
147
vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fx.go
generated
vendored
Normal file
|
@ -0,0 +1,147 @@
|
||||||
|
package sshfx
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Status defines the SFTP error codes used in SSH_FXP_STATUS response packets.
|
||||||
|
type Status uint32
|
||||||
|
|
||||||
|
// Defines the various SSH_FX_* values.
|
||||||
|
const (
|
||||||
|
// see draft-ietf-secsh-filexfer-02
|
||||||
|
// https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt#section-7
|
||||||
|
StatusOK = Status(iota)
|
||||||
|
StatusEOF
|
||||||
|
StatusNoSuchFile
|
||||||
|
StatusPermissionDenied
|
||||||
|
StatusFailure
|
||||||
|
StatusBadMessage
|
||||||
|
StatusNoConnection
|
||||||
|
StatusConnectionLost
|
||||||
|
StatusOPUnsupported
|
||||||
|
|
||||||
|
// https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-03.txt#section-7
|
||||||
|
StatusV4InvalidHandle
|
||||||
|
StatusV4NoSuchPath
|
||||||
|
StatusV4FileAlreadyExists
|
||||||
|
StatusV4WriteProtect
|
||||||
|
|
||||||
|
// https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-04.txt#section-7
|
||||||
|
StatusV4NoMedia
|
||||||
|
|
||||||
|
// https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-05.txt#section-7
|
||||||
|
StatusV5NoSpaceOnFilesystem
|
||||||
|
StatusV5QuotaExceeded
|
||||||
|
StatusV5UnknownPrincipal
|
||||||
|
StatusV5LockConflict
|
||||||
|
|
||||||
|
// https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-06.txt#section-8
|
||||||
|
StatusV6DirNotEmpty
|
||||||
|
StatusV6NotADirectory
|
||||||
|
StatusV6InvalidFilename
|
||||||
|
StatusV6LinkLoop
|
||||||
|
|
||||||
|
// https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-07.txt#section-8
|
||||||
|
StatusV6CannotDelete
|
||||||
|
StatusV6InvalidParameter
|
||||||
|
StatusV6FileIsADirectory
|
||||||
|
StatusV6ByteRangeLockConflict
|
||||||
|
StatusV6ByteRangeLockRefused
|
||||||
|
StatusV6DeletePending
|
||||||
|
|
||||||
|
// https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-08.txt#section-8.1
|
||||||
|
StatusV6FileCorrupt
|
||||||
|
|
||||||
|
// https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-10.txt#section-9.1
|
||||||
|
StatusV6OwnerInvalid
|
||||||
|
StatusV6GroupInvalid
|
||||||
|
|
||||||
|
// https://tools.ietf.org/html/draft-ietf-secsh-filexfer-13#section-9.1
|
||||||
|
StatusV6NoMatchingByteRangeLock
|
||||||
|
)
|
||||||
|
|
||||||
|
func (s Status) Error() string {
|
||||||
|
return s.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Is returns true if the target is the same Status code,
|
||||||
|
// or target is a StatusPacket with the same Status code.
|
||||||
|
func (s Status) Is(target error) bool {
|
||||||
|
if target, ok := target.(*StatusPacket); ok {
|
||||||
|
return target.StatusCode == s
|
||||||
|
}
|
||||||
|
|
||||||
|
return s == target
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s Status) String() string {
|
||||||
|
switch s {
|
||||||
|
case StatusOK:
|
||||||
|
return "SSH_FX_OK"
|
||||||
|
case StatusEOF:
|
||||||
|
return "SSH_FX_EOF"
|
||||||
|
case StatusNoSuchFile:
|
||||||
|
return "SSH_FX_NO_SUCH_FILE"
|
||||||
|
case StatusPermissionDenied:
|
||||||
|
return "SSH_FX_PERMISSION_DENIED"
|
||||||
|
case StatusFailure:
|
||||||
|
return "SSH_FX_FAILURE"
|
||||||
|
case StatusBadMessage:
|
||||||
|
return "SSH_FX_BAD_MESSAGE"
|
||||||
|
case StatusNoConnection:
|
||||||
|
return "SSH_FX_NO_CONNECTION"
|
||||||
|
case StatusConnectionLost:
|
||||||
|
return "SSH_FX_CONNECTION_LOST"
|
||||||
|
case StatusOPUnsupported:
|
||||||
|
return "SSH_FX_OP_UNSUPPORTED"
|
||||||
|
case StatusV4InvalidHandle:
|
||||||
|
return "SSH_FX_INVALID_HANDLE"
|
||||||
|
case StatusV4NoSuchPath:
|
||||||
|
return "SSH_FX_NO_SUCH_PATH"
|
||||||
|
case StatusV4FileAlreadyExists:
|
||||||
|
return "SSH_FX_FILE_ALREADY_EXISTS"
|
||||||
|
case StatusV4WriteProtect:
|
||||||
|
return "SSH_FX_WRITE_PROTECT"
|
||||||
|
case StatusV4NoMedia:
|
||||||
|
return "SSH_FX_NO_MEDIA"
|
||||||
|
case StatusV5NoSpaceOnFilesystem:
|
||||||
|
return "SSH_FX_NO_SPACE_ON_FILESYSTEM"
|
||||||
|
case StatusV5QuotaExceeded:
|
||||||
|
return "SSH_FX_QUOTA_EXCEEDED"
|
||||||
|
case StatusV5UnknownPrincipal:
|
||||||
|
return "SSH_FX_UNKNOWN_PRINCIPAL"
|
||||||
|
case StatusV5LockConflict:
|
||||||
|
return "SSH_FX_LOCK_CONFLICT"
|
||||||
|
case StatusV6DirNotEmpty:
|
||||||
|
return "SSH_FX_DIR_NOT_EMPTY"
|
||||||
|
case StatusV6NotADirectory:
|
||||||
|
return "SSH_FX_NOT_A_DIRECTORY"
|
||||||
|
case StatusV6InvalidFilename:
|
||||||
|
return "SSH_FX_INVALID_FILENAME"
|
||||||
|
case StatusV6LinkLoop:
|
||||||
|
return "SSH_FX_LINK_LOOP"
|
||||||
|
case StatusV6CannotDelete:
|
||||||
|
return "SSH_FX_CANNOT_DELETE"
|
||||||
|
case StatusV6InvalidParameter:
|
||||||
|
return "SSH_FX_INVALID_PARAMETER"
|
||||||
|
case StatusV6FileIsADirectory:
|
||||||
|
return "SSH_FX_FILE_IS_A_DIRECTORY"
|
||||||
|
case StatusV6ByteRangeLockConflict:
|
||||||
|
return "SSH_FX_BYTE_RANGE_LOCK_CONFLICT"
|
||||||
|
case StatusV6ByteRangeLockRefused:
|
||||||
|
return "SSH_FX_BYTE_RANGE_LOCK_REFUSED"
|
||||||
|
case StatusV6DeletePending:
|
||||||
|
return "SSH_FX_DELETE_PENDING"
|
||||||
|
case StatusV6FileCorrupt:
|
||||||
|
return "SSH_FX_FILE_CORRUPT"
|
||||||
|
case StatusV6OwnerInvalid:
|
||||||
|
return "SSH_FX_OWNER_INVALID"
|
||||||
|
case StatusV6GroupInvalid:
|
||||||
|
return "SSH_FX_GROUP_INVALID"
|
||||||
|
case StatusV6NoMatchingByteRangeLock:
|
||||||
|
return "SSH_FX_NO_MATCHING_BYTE_RANGE_LOCK"
|
||||||
|
default:
|
||||||
|
return fmt.Sprintf("SSH_FX_UNKNOWN(%d)", s)
|
||||||
|
}
|
||||||
|
}
|
169
vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fxp.go
generated
vendored
Normal file
169
vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fxp.go
generated
vendored
Normal file
|
@ -0,0 +1,169 @@
|
||||||
|
package sshfx
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PacketType defines the various SFTP packet types.
|
||||||
|
type PacketType uint8
|
||||||
|
|
||||||
|
// Request packet types.
|
||||||
|
const (
|
||||||
|
// https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt#section-3
|
||||||
|
PacketTypeInit = PacketType(iota + 1)
|
||||||
|
PacketTypeVersion
|
||||||
|
PacketTypeOpen
|
||||||
|
PacketTypeClose
|
||||||
|
PacketTypeRead
|
||||||
|
PacketTypeWrite
|
||||||
|
PacketTypeLStat
|
||||||
|
PacketTypeFStat
|
||||||
|
PacketTypeSetstat
|
||||||
|
PacketTypeFSetstat
|
||||||
|
PacketTypeOpenDir
|
||||||
|
PacketTypeReadDir
|
||||||
|
PacketTypeRemove
|
||||||
|
PacketTypeMkdir
|
||||||
|
PacketTypeRmdir
|
||||||
|
PacketTypeRealPath
|
||||||
|
PacketTypeStat
|
||||||
|
PacketTypeRename
|
||||||
|
PacketTypeReadLink
|
||||||
|
PacketTypeSymlink
|
||||||
|
|
||||||
|
// https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-07.txt#section-3.3
|
||||||
|
PacketTypeV6Link
|
||||||
|
|
||||||
|
// https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-08.txt#section-3.3
|
||||||
|
PacketTypeV6Block
|
||||||
|
PacketTypeV6Unblock
|
||||||
|
)
|
||||||
|
|
||||||
|
// Response packet types.
|
||||||
|
const (
|
||||||
|
// https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt#section-3
|
||||||
|
PacketTypeStatus = PacketType(iota + 101)
|
||||||
|
PacketTypeHandle
|
||||||
|
PacketTypeData
|
||||||
|
PacketTypeName
|
||||||
|
PacketTypeAttrs
|
||||||
|
)
|
||||||
|
|
||||||
|
// Extended packet types.
|
||||||
|
const (
|
||||||
|
// https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt#section-3
|
||||||
|
PacketTypeExtended = PacketType(iota + 200)
|
||||||
|
PacketTypeExtendedReply
|
||||||
|
)
|
||||||
|
|
||||||
|
func (f PacketType) String() string {
|
||||||
|
switch f {
|
||||||
|
case PacketTypeInit:
|
||||||
|
return "SSH_FXP_INIT"
|
||||||
|
case PacketTypeVersion:
|
||||||
|
return "SSH_FXP_VERSION"
|
||||||
|
case PacketTypeOpen:
|
||||||
|
return "SSH_FXP_OPEN"
|
||||||
|
case PacketTypeClose:
|
||||||
|
return "SSH_FXP_CLOSE"
|
||||||
|
case PacketTypeRead:
|
||||||
|
return "SSH_FXP_READ"
|
||||||
|
case PacketTypeWrite:
|
||||||
|
return "SSH_FXP_WRITE"
|
||||||
|
case PacketTypeLStat:
|
||||||
|
return "SSH_FXP_LSTAT"
|
||||||
|
case PacketTypeFStat:
|
||||||
|
return "SSH_FXP_FSTAT"
|
||||||
|
case PacketTypeSetstat:
|
||||||
|
return "SSH_FXP_SETSTAT"
|
||||||
|
case PacketTypeFSetstat:
|
||||||
|
return "SSH_FXP_FSETSTAT"
|
||||||
|
case PacketTypeOpenDir:
|
||||||
|
return "SSH_FXP_OPENDIR"
|
||||||
|
case PacketTypeReadDir:
|
||||||
|
return "SSH_FXP_READDIR"
|
||||||
|
case PacketTypeRemove:
|
||||||
|
return "SSH_FXP_REMOVE"
|
||||||
|
case PacketTypeMkdir:
|
||||||
|
return "SSH_FXP_MKDIR"
|
||||||
|
case PacketTypeRmdir:
|
||||||
|
return "SSH_FXP_RMDIR"
|
||||||
|
case PacketTypeRealPath:
|
||||||
|
return "SSH_FXP_REALPATH"
|
||||||
|
case PacketTypeStat:
|
||||||
|
return "SSH_FXP_STAT"
|
||||||
|
case PacketTypeRename:
|
||||||
|
return "SSH_FXP_RENAME"
|
||||||
|
case PacketTypeReadLink:
|
||||||
|
return "SSH_FXP_READLINK"
|
||||||
|
case PacketTypeSymlink:
|
||||||
|
return "SSH_FXP_SYMLINK"
|
||||||
|
case PacketTypeV6Link:
|
||||||
|
return "SSH_FXP_LINK"
|
||||||
|
case PacketTypeV6Block:
|
||||||
|
return "SSH_FXP_BLOCK"
|
||||||
|
case PacketTypeV6Unblock:
|
||||||
|
return "SSH_FXP_UNBLOCK"
|
||||||
|
case PacketTypeStatus:
|
||||||
|
return "SSH_FXP_STATUS"
|
||||||
|
case PacketTypeHandle:
|
||||||
|
return "SSH_FXP_HANDLE"
|
||||||
|
case PacketTypeData:
|
||||||
|
return "SSH_FXP_DATA"
|
||||||
|
case PacketTypeName:
|
||||||
|
return "SSH_FXP_NAME"
|
||||||
|
case PacketTypeAttrs:
|
||||||
|
return "SSH_FXP_ATTRS"
|
||||||
|
case PacketTypeExtended:
|
||||||
|
return "SSH_FXP_EXTENDED"
|
||||||
|
case PacketTypeExtendedReply:
|
||||||
|
return "SSH_FXP_EXTENDED_REPLY"
|
||||||
|
default:
|
||||||
|
return fmt.Sprintf("SSH_FXP_UNKNOWN(%d)", f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newPacketFromType(typ PacketType) (Packet, error) {
|
||||||
|
switch typ {
|
||||||
|
case PacketTypeOpen:
|
||||||
|
return new(OpenPacket), nil
|
||||||
|
case PacketTypeClose:
|
||||||
|
return new(ClosePacket), nil
|
||||||
|
case PacketTypeRead:
|
||||||
|
return new(ReadPacket), nil
|
||||||
|
case PacketTypeWrite:
|
||||||
|
return new(WritePacket), nil
|
||||||
|
case PacketTypeLStat:
|
||||||
|
return new(LStatPacket), nil
|
||||||
|
case PacketTypeFStat:
|
||||||
|
return new(FStatPacket), nil
|
||||||
|
case PacketTypeSetstat:
|
||||||
|
return new(SetstatPacket), nil
|
||||||
|
case PacketTypeFSetstat:
|
||||||
|
return new(FSetstatPacket), nil
|
||||||
|
case PacketTypeOpenDir:
|
||||||
|
return new(OpenDirPacket), nil
|
||||||
|
case PacketTypeReadDir:
|
||||||
|
return new(ReadDirPacket), nil
|
||||||
|
case PacketTypeRemove:
|
||||||
|
return new(RemovePacket), nil
|
||||||
|
case PacketTypeMkdir:
|
||||||
|
return new(MkdirPacket), nil
|
||||||
|
case PacketTypeRmdir:
|
||||||
|
return new(RmdirPacket), nil
|
||||||
|
case PacketTypeRealPath:
|
||||||
|
return new(RealPathPacket), nil
|
||||||
|
case PacketTypeStat:
|
||||||
|
return new(StatPacket), nil
|
||||||
|
case PacketTypeRename:
|
||||||
|
return new(RenamePacket), nil
|
||||||
|
case PacketTypeReadLink:
|
||||||
|
return new(ReadLinkPacket), nil
|
||||||
|
case PacketTypeSymlink:
|
||||||
|
return new(SymlinkPacket), nil
|
||||||
|
case PacketTypeExtended:
|
||||||
|
return new(ExtendedPacket), nil
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unexpected request packet type: %v", typ)
|
||||||
|
}
|
||||||
|
}
|
230
vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/handle_packets.go
generated
vendored
Normal file
230
vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/handle_packets.go
generated
vendored
Normal file
|
@ -0,0 +1,230 @@
|
||||||
|
package sshfx
|
||||||
|
|
||||||
|
// ClosePacket defines the SSH_FXP_CLOSE packet.
|
||||||
|
type ClosePacket struct {
|
||||||
|
Handle string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type returns the SSH_FXP_xy value associated with this packet type.
|
||||||
|
func (p *ClosePacket) Type() PacketType {
|
||||||
|
return PacketTypeClose
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalPacket returns p as a two-part binary encoding of p.
|
||||||
|
func (p *ClosePacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
|
||||||
|
buf := NewBuffer(b)
|
||||||
|
if buf.Cap() < 9 {
|
||||||
|
size := 4 + len(p.Handle) // string(handle)
|
||||||
|
buf = NewMarshalBuffer(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.StartPacket(PacketTypeClose, reqid)
|
||||||
|
buf.AppendString(p.Handle)
|
||||||
|
|
||||||
|
return buf.Packet(payload)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalPacketBody unmarshals the packet body from the given Buffer.
|
||||||
|
// It is assumed that the uint32(request-id) has already been consumed.
|
||||||
|
func (p *ClosePacket) UnmarshalPacketBody(buf *Buffer) (err error) {
|
||||||
|
*p = ClosePacket{
|
||||||
|
Handle: buf.ConsumeString(),
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.Err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadPacket defines the SSH_FXP_READ packet.
|
||||||
|
type ReadPacket struct {
|
||||||
|
Handle string
|
||||||
|
Offset uint64
|
||||||
|
Length uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type returns the SSH_FXP_xy value associated with this packet type.
|
||||||
|
func (p *ReadPacket) Type() PacketType {
|
||||||
|
return PacketTypeRead
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalPacket returns p as a two-part binary encoding of p.
|
||||||
|
func (p *ReadPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
|
||||||
|
buf := NewBuffer(b)
|
||||||
|
if buf.Cap() < 9 {
|
||||||
|
// string(handle) + uint64(offset) + uint32(len)
|
||||||
|
size := 4 + len(p.Handle) + 8 + 4
|
||||||
|
buf = NewMarshalBuffer(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.StartPacket(PacketTypeRead, reqid)
|
||||||
|
buf.AppendString(p.Handle)
|
||||||
|
buf.AppendUint64(p.Offset)
|
||||||
|
buf.AppendUint32(p.Length)
|
||||||
|
|
||||||
|
return buf.Packet(payload)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalPacketBody unmarshals the packet body from the given Buffer.
|
||||||
|
// It is assumed that the uint32(request-id) has already been consumed.
|
||||||
|
func (p *ReadPacket) UnmarshalPacketBody(buf *Buffer) (err error) {
|
||||||
|
*p = ReadPacket{
|
||||||
|
Handle: buf.ConsumeString(),
|
||||||
|
Offset: buf.ConsumeUint64(),
|
||||||
|
Length: buf.ConsumeUint32(),
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.Err
|
||||||
|
}
|
||||||
|
|
||||||
|
// WritePacket defines the SSH_FXP_WRITE packet.
|
||||||
|
type WritePacket struct {
|
||||||
|
Handle string
|
||||||
|
Offset uint64
|
||||||
|
Data []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type returns the SSH_FXP_xy value associated with this packet type.
|
||||||
|
func (p *WritePacket) Type() PacketType {
|
||||||
|
return PacketTypeWrite
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalPacket returns p as a two-part binary encoding of p.
|
||||||
|
func (p *WritePacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
|
||||||
|
buf := NewBuffer(b)
|
||||||
|
if buf.Cap() < 9 {
|
||||||
|
// string(handle) + uint64(offset) + uint32(len(data)); data content in payload
|
||||||
|
size := 4 + len(p.Handle) + 8 + 4
|
||||||
|
buf = NewMarshalBuffer(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.StartPacket(PacketTypeWrite, reqid)
|
||||||
|
buf.AppendString(p.Handle)
|
||||||
|
buf.AppendUint64(p.Offset)
|
||||||
|
buf.AppendUint32(uint32(len(p.Data)))
|
||||||
|
|
||||||
|
return buf.Packet(p.Data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalPacketBody unmarshals the packet body from the given Buffer.
|
||||||
|
// It is assumed that the uint32(request-id) has already been consumed.
|
||||||
|
//
|
||||||
|
// If p.Data is already populated, and of sufficient length to hold the data,
|
||||||
|
// then this will copy the data into that byte slice.
|
||||||
|
//
|
||||||
|
// If p.Data has a length insufficient to hold the data,
|
||||||
|
// then this will make a new slice of sufficient length, and copy the data into that.
|
||||||
|
//
|
||||||
|
// This means this _does not_ alias any of the data buffer that is passed in.
|
||||||
|
func (p *WritePacket) UnmarshalPacketBody(buf *Buffer) (err error) {
|
||||||
|
*p = WritePacket{
|
||||||
|
Handle: buf.ConsumeString(),
|
||||||
|
Offset: buf.ConsumeUint64(),
|
||||||
|
Data: buf.ConsumeByteSliceCopy(p.Data),
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.Err
|
||||||
|
}
|
||||||
|
|
||||||
|
// FStatPacket defines the SSH_FXP_FSTAT packet.
|
||||||
|
type FStatPacket struct {
|
||||||
|
Handle string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type returns the SSH_FXP_xy value associated with this packet type.
|
||||||
|
func (p *FStatPacket) Type() PacketType {
|
||||||
|
return PacketTypeFStat
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalPacket returns p as a two-part binary encoding of p.
|
||||||
|
func (p *FStatPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
|
||||||
|
buf := NewBuffer(b)
|
||||||
|
if buf.Cap() < 9 {
|
||||||
|
size := 4 + len(p.Handle) // string(handle)
|
||||||
|
buf = NewMarshalBuffer(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.StartPacket(PacketTypeFStat, reqid)
|
||||||
|
buf.AppendString(p.Handle)
|
||||||
|
|
||||||
|
return buf.Packet(payload)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalPacketBody unmarshals the packet body from the given Buffer.
|
||||||
|
// It is assumed that the uint32(request-id) has already been consumed.
|
||||||
|
func (p *FStatPacket) UnmarshalPacketBody(buf *Buffer) (err error) {
|
||||||
|
*p = FStatPacket{
|
||||||
|
Handle: buf.ConsumeString(),
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.Err
|
||||||
|
}
|
||||||
|
|
||||||
|
// FSetstatPacket defines the SSH_FXP_FSETSTAT packet.
|
||||||
|
type FSetstatPacket struct {
|
||||||
|
Handle string
|
||||||
|
Attrs Attributes
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type returns the SSH_FXP_xy value associated with this packet type.
|
||||||
|
func (p *FSetstatPacket) Type() PacketType {
|
||||||
|
return PacketTypeFSetstat
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalPacket returns p as a two-part binary encoding of p.
|
||||||
|
func (p *FSetstatPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
|
||||||
|
buf := NewBuffer(b)
|
||||||
|
if buf.Cap() < 9 {
|
||||||
|
size := 4 + len(p.Handle) + p.Attrs.Len() // string(handle) + ATTRS(attrs)
|
||||||
|
buf = NewMarshalBuffer(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.StartPacket(PacketTypeFSetstat, reqid)
|
||||||
|
buf.AppendString(p.Handle)
|
||||||
|
|
||||||
|
p.Attrs.MarshalInto(buf)
|
||||||
|
|
||||||
|
return buf.Packet(payload)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalPacketBody unmarshals the packet body from the given Buffer.
|
||||||
|
// It is assumed that the uint32(request-id) has already been consumed.
|
||||||
|
func (p *FSetstatPacket) UnmarshalPacketBody(buf *Buffer) (err error) {
|
||||||
|
*p = FSetstatPacket{
|
||||||
|
Handle: buf.ConsumeString(),
|
||||||
|
}
|
||||||
|
|
||||||
|
return p.Attrs.UnmarshalFrom(buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadDirPacket defines the SSH_FXP_READDIR packet.
|
||||||
|
type ReadDirPacket struct {
|
||||||
|
Handle string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type returns the SSH_FXP_xy value associated with this packet type.
|
||||||
|
func (p *ReadDirPacket) Type() PacketType {
|
||||||
|
return PacketTypeReadDir
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalPacket returns p as a two-part binary encoding of p.
|
||||||
|
func (p *ReadDirPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
|
||||||
|
buf := NewBuffer(b)
|
||||||
|
if buf.Cap() < 9 {
|
||||||
|
size := 4 + len(p.Handle) // string(handle)
|
||||||
|
buf = NewMarshalBuffer(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.StartPacket(PacketTypeReadDir, reqid)
|
||||||
|
buf.AppendString(p.Handle)
|
||||||
|
|
||||||
|
return buf.Packet(payload)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalPacketBody unmarshals the packet body from the given Buffer.
|
||||||
|
// It is assumed that the uint32(request-id) has already been consumed.
|
||||||
|
func (p *ReadDirPacket) UnmarshalPacketBody(buf *Buffer) (err error) {
|
||||||
|
*p = ReadDirPacket{
|
||||||
|
Handle: buf.ConsumeString(),
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.Err
|
||||||
|
}
|
99
vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/init_packets.go
generated
vendored
Normal file
99
vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/init_packets.go
generated
vendored
Normal file
|
@ -0,0 +1,99 @@
|
||||||
|
package sshfx
|
||||||
|
|
||||||
|
// InitPacket defines the SSH_FXP_INIT packet.
|
||||||
|
type InitPacket struct {
|
||||||
|
Version uint32
|
||||||
|
Extensions []*ExtensionPair
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalBinary returns p as the binary encoding of p.
|
||||||
|
func (p *InitPacket) MarshalBinary() ([]byte, error) {
|
||||||
|
size := 1 + 4 // byte(type) + uint32(version)
|
||||||
|
|
||||||
|
for _, ext := range p.Extensions {
|
||||||
|
size += ext.Len()
|
||||||
|
}
|
||||||
|
|
||||||
|
b := NewBuffer(make([]byte, 4, 4+size))
|
||||||
|
b.AppendUint8(uint8(PacketTypeInit))
|
||||||
|
b.AppendUint32(p.Version)
|
||||||
|
|
||||||
|
for _, ext := range p.Extensions {
|
||||||
|
ext.MarshalInto(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
b.PutLength(size)
|
||||||
|
|
||||||
|
return b.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalBinary unmarshals a full raw packet out of the given data.
|
||||||
|
// It is assumed that the uint32(length) has already been consumed to receive the data.
|
||||||
|
// It is also assumed that the uint8(type) has already been consumed to which packet to unmarshal into.
|
||||||
|
func (p *InitPacket) UnmarshalBinary(data []byte) (err error) {
|
||||||
|
buf := NewBuffer(data)
|
||||||
|
|
||||||
|
*p = InitPacket{
|
||||||
|
Version: buf.ConsumeUint32(),
|
||||||
|
}
|
||||||
|
|
||||||
|
for buf.Len() > 0 {
|
||||||
|
var ext ExtensionPair
|
||||||
|
if err := ext.UnmarshalFrom(buf); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
p.Extensions = append(p.Extensions, &ext)
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.Err
|
||||||
|
}
|
||||||
|
|
||||||
|
// VersionPacket defines the SSH_FXP_VERSION packet.
|
||||||
|
type VersionPacket struct {
|
||||||
|
Version uint32
|
||||||
|
Extensions []*ExtensionPair
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalBinary returns p as the binary encoding of p.
|
||||||
|
func (p *VersionPacket) MarshalBinary() ([]byte, error) {
|
||||||
|
size := 1 + 4 // byte(type) + uint32(version)
|
||||||
|
|
||||||
|
for _, ext := range p.Extensions {
|
||||||
|
size += ext.Len()
|
||||||
|
}
|
||||||
|
|
||||||
|
b := NewBuffer(make([]byte, 4, 4+size))
|
||||||
|
b.AppendUint8(uint8(PacketTypeVersion))
|
||||||
|
b.AppendUint32(p.Version)
|
||||||
|
|
||||||
|
for _, ext := range p.Extensions {
|
||||||
|
ext.MarshalInto(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
b.PutLength(size)
|
||||||
|
|
||||||
|
return b.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalBinary unmarshals a full raw packet out of the given data.
|
||||||
|
// It is assumed that the uint32(length) has already been consumed to receive the data.
|
||||||
|
// It is also assumed that the uint8(type) has already been consumed to which packet to unmarshal into.
|
||||||
|
func (p *VersionPacket) UnmarshalBinary(data []byte) (err error) {
|
||||||
|
buf := NewBuffer(data)
|
||||||
|
|
||||||
|
*p = VersionPacket{
|
||||||
|
Version: buf.ConsumeUint32(),
|
||||||
|
}
|
||||||
|
|
||||||
|
for buf.Len() > 0 {
|
||||||
|
var ext ExtensionPair
|
||||||
|
if err := ext.UnmarshalFrom(buf); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
p.Extensions = append(p.Extensions, &ext)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
86
vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/open_packets.go
generated
vendored
Normal file
86
vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/open_packets.go
generated
vendored
Normal file
|
@ -0,0 +1,86 @@
|
||||||
|
package sshfx
|
||||||
|
|
||||||
|
// SSH_FXF_* flags.
|
||||||
|
const (
|
||||||
|
FlagRead = 1 << iota // SSH_FXF_READ
|
||||||
|
FlagWrite // SSH_FXF_WRITE
|
||||||
|
FlagAppend // SSH_FXF_APPEND
|
||||||
|
FlagCreate // SSH_FXF_CREAT
|
||||||
|
FlagTruncate // SSH_FXF_TRUNC
|
||||||
|
FlagExclusive // SSH_FXF_EXCL
|
||||||
|
)
|
||||||
|
|
||||||
|
// OpenPacket defines the SSH_FXP_OPEN packet.
|
||||||
|
type OpenPacket struct {
|
||||||
|
Filename string
|
||||||
|
PFlags uint32
|
||||||
|
Attrs Attributes
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type returns the SSH_FXP_xy value associated with this packet type.
|
||||||
|
func (p *OpenPacket) Type() PacketType {
|
||||||
|
return PacketTypeOpen
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalPacket returns p as a two-part binary encoding of p.
|
||||||
|
func (p *OpenPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
|
||||||
|
buf := NewBuffer(b)
|
||||||
|
if buf.Cap() < 9 {
|
||||||
|
// string(filename) + uint32(pflags) + ATTRS(attrs)
|
||||||
|
size := 4 + len(p.Filename) + 4 + p.Attrs.Len()
|
||||||
|
buf = NewMarshalBuffer(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.StartPacket(PacketTypeOpen, reqid)
|
||||||
|
buf.AppendString(p.Filename)
|
||||||
|
buf.AppendUint32(p.PFlags)
|
||||||
|
|
||||||
|
p.Attrs.MarshalInto(buf)
|
||||||
|
|
||||||
|
return buf.Packet(payload)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalPacketBody unmarshals the packet body from the given Buffer.
|
||||||
|
// It is assumed that the uint32(request-id) has already been consumed.
|
||||||
|
func (p *OpenPacket) UnmarshalPacketBody(buf *Buffer) (err error) {
|
||||||
|
*p = OpenPacket{
|
||||||
|
Filename: buf.ConsumeString(),
|
||||||
|
PFlags: buf.ConsumeUint32(),
|
||||||
|
}
|
||||||
|
|
||||||
|
return p.Attrs.UnmarshalFrom(buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpenDirPacket defines the SSH_FXP_OPENDIR packet.
|
||||||
|
type OpenDirPacket struct {
|
||||||
|
Path string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type returns the SSH_FXP_xy value associated with this packet type.
|
||||||
|
func (p *OpenDirPacket) Type() PacketType {
|
||||||
|
return PacketTypeOpenDir
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalPacket returns p as a two-part binary encoding of p.
|
||||||
|
func (p *OpenDirPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
|
||||||
|
buf := NewBuffer(b)
|
||||||
|
if buf.Cap() < 9 {
|
||||||
|
size := 4 + len(p.Path) // string(path)
|
||||||
|
buf = NewMarshalBuffer(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.StartPacket(PacketTypeOpenDir, reqid)
|
||||||
|
buf.AppendString(p.Path)
|
||||||
|
|
||||||
|
return buf.Packet(payload)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalPacketBody unmarshals the packet body from the given Buffer.
|
||||||
|
// It is assumed that the uint32(request-id) has already been consumed.
|
||||||
|
func (p *OpenDirPacket) UnmarshalPacketBody(buf *Buffer) (err error) {
|
||||||
|
*p = OpenDirPacket{
|
||||||
|
Path: buf.ConsumeString(),
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.Err
|
||||||
|
}
|
273
vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/packets.go
generated
vendored
Normal file
273
vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/packets.go
generated
vendored
Normal file
|
@ -0,0 +1,273 @@
|
||||||
|
package sshfx
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// smallBufferSize is an initial allocation minimal capacity.
|
||||||
|
const smallBufferSize = 64
|
||||||
|
|
||||||
|
// RawPacket implements the general packet format from draft-ietf-secsh-filexfer-02
|
||||||
|
//
|
||||||
|
// RawPacket is intended for use in clients receiving responses,
|
||||||
|
// where a response will be expected to be of a limited number of types,
|
||||||
|
// and unmarshaling unknown/unexpected response packets is unnecessary.
|
||||||
|
//
|
||||||
|
// For servers expecting to receive arbitrary request packet types,
|
||||||
|
// use RequestPacket.
|
||||||
|
//
|
||||||
|
// Defined in https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt#section-3
|
||||||
|
type RawPacket struct {
|
||||||
|
PacketType PacketType
|
||||||
|
RequestID uint32
|
||||||
|
|
||||||
|
Data Buffer
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type returns the Type field defining the SSH_FXP_xy type for this packet.
|
||||||
|
func (p *RawPacket) Type() PacketType {
|
||||||
|
return p.PacketType
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset clears the pointers and reference-semantic variables of RawPacket,
|
||||||
|
// releasing underlying resources, and making them and the RawPacket suitable to be reused,
|
||||||
|
// so long as no other references have been kept.
|
||||||
|
func (p *RawPacket) Reset() {
|
||||||
|
p.Data = Buffer{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalPacket returns p as a two-part binary encoding of p.
|
||||||
|
//
|
||||||
|
// The internal p.RequestID is overridden by the reqid argument.
|
||||||
|
func (p *RawPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
|
||||||
|
buf := NewBuffer(b)
|
||||||
|
if buf.Cap() < 9 {
|
||||||
|
buf = NewMarshalBuffer(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.StartPacket(p.PacketType, reqid)
|
||||||
|
|
||||||
|
return buf.Packet(p.Data.Bytes())
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalBinary returns p as the binary encoding of p.
|
||||||
|
//
|
||||||
|
// This is a convenience implementation primarily intended for tests,
|
||||||
|
// because it is inefficient with allocations.
|
||||||
|
func (p *RawPacket) MarshalBinary() ([]byte, error) {
|
||||||
|
return ComposePacket(p.MarshalPacket(p.RequestID, nil))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalFrom decodes a RawPacket from the given Buffer into p.
|
||||||
|
//
|
||||||
|
// The Data field will alias the passed in Buffer,
|
||||||
|
// so the buffer passed in should not be reused before RawPacket.Reset().
|
||||||
|
func (p *RawPacket) UnmarshalFrom(buf *Buffer) error {
|
||||||
|
*p = RawPacket{
|
||||||
|
PacketType: PacketType(buf.ConsumeUint8()),
|
||||||
|
RequestID: buf.ConsumeUint32(),
|
||||||
|
}
|
||||||
|
|
||||||
|
p.Data = *buf
|
||||||
|
|
||||||
|
return buf.Err
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalBinary decodes a full raw packet out of the given data.
|
||||||
|
// It is assumed that the uint32(length) has already been consumed to receive the data.
|
||||||
|
//
|
||||||
|
// This is a convenience implementation primarily intended for tests,
|
||||||
|
// because this must clone the given data byte slice,
|
||||||
|
// as Data is not allowed to alias any part of the data byte slice.
|
||||||
|
func (p *RawPacket) UnmarshalBinary(data []byte) error {
|
||||||
|
clone := make([]byte, len(data))
|
||||||
|
n := copy(clone, data)
|
||||||
|
return p.UnmarshalFrom(NewBuffer(clone[:n]))
|
||||||
|
}
|
||||||
|
|
||||||
|
// readPacket reads a uint32 length-prefixed binary data packet from r.
|
||||||
|
// using the given byte slice as a backing array.
|
||||||
|
//
|
||||||
|
// If the packet length read from r is bigger than maxPacketLength,
|
||||||
|
// or greater than math.MaxInt32 on a 32-bit implementation,
|
||||||
|
// then a `ErrLongPacket` error will be returned.
|
||||||
|
//
|
||||||
|
// If the given byte slice is insufficient to hold the packet,
|
||||||
|
// then it will be extended to fill the packet size.
|
||||||
|
func readPacket(r io.Reader, b []byte, maxPacketLength uint32) ([]byte, error) {
|
||||||
|
if cap(b) < 4 {
|
||||||
|
// We will need allocate our own buffer just for reading the packet length.
|
||||||
|
|
||||||
|
// However, we don’t really want to allocate an extremely narrow buffer (4-bytes),
|
||||||
|
// and cause unnecessary allocation churn from both length reads and small packet reads,
|
||||||
|
// so we use smallBufferSize from the bytes package as a reasonable guess.
|
||||||
|
|
||||||
|
// But if callers really do want to force narrow throw-away allocation of every packet body,
|
||||||
|
// they can do so with a buffer of capacity 4.
|
||||||
|
b = make([]byte, smallBufferSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := io.ReadFull(r, b[:4]); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
length := unmarshalUint32(b)
|
||||||
|
if int(length) < 5 {
|
||||||
|
// Must have at least uint8(type) and uint32(request-id)
|
||||||
|
|
||||||
|
if int(length) < 0 {
|
||||||
|
// Only possible when strconv.IntSize == 32,
|
||||||
|
// the packet length is longer than math.MaxInt32,
|
||||||
|
// and thus longer than any possible slice.
|
||||||
|
return nil, ErrLongPacket
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, ErrShortPacket
|
||||||
|
}
|
||||||
|
if length > maxPacketLength {
|
||||||
|
return nil, ErrLongPacket
|
||||||
|
}
|
||||||
|
|
||||||
|
if int(length) > cap(b) {
|
||||||
|
// We know int(length) must be positive, because of tests above.
|
||||||
|
b = make([]byte, length)
|
||||||
|
}
|
||||||
|
|
||||||
|
n, err := io.ReadFull(r, b[:length])
|
||||||
|
return b[:n], err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadFrom provides a simple functional packet reader,
|
||||||
|
// using the given byte slice as a backing array.
|
||||||
|
//
|
||||||
|
// To protect against potential denial of service attacks,
|
||||||
|
// if the read packet length is longer than maxPacketLength,
|
||||||
|
// then no packet data will be read, and ErrLongPacket will be returned.
|
||||||
|
// (On 32-bit int architectures, all packets >= 2^31 in length
|
||||||
|
// will return ErrLongPacket regardless of maxPacketLength.)
|
||||||
|
//
|
||||||
|
// If the read packet length is longer than cap(b),
|
||||||
|
// then a throw-away slice will allocated to meet the exact packet length.
|
||||||
|
// This can be used to limit the length of reused buffers,
|
||||||
|
// while still allowing reception of occasional large packets.
|
||||||
|
//
|
||||||
|
// The Data field may alias the passed in byte slice,
|
||||||
|
// so the byte slice passed in should not be reused before RawPacket.Reset().
|
||||||
|
func (p *RawPacket) ReadFrom(r io.Reader, b []byte, maxPacketLength uint32) error {
|
||||||
|
b, err := readPacket(r, b, maxPacketLength)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return p.UnmarshalFrom(NewBuffer(b))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequestPacket implements the general packet format from draft-ietf-secsh-filexfer-02
|
||||||
|
// but also automatically decode/encodes valid request packets (2 < type < 100 || type == 200).
|
||||||
|
//
|
||||||
|
// RequestPacket is intended for use in servers receiving requests,
|
||||||
|
// where any arbitrary request may be received, and so decoding them automatically
|
||||||
|
// is useful.
|
||||||
|
//
|
||||||
|
// For clients expecting to receive specific response packet types,
|
||||||
|
// where automatic unmarshaling of the packet body does not make sense,
|
||||||
|
// use RawPacket.
|
||||||
|
//
|
||||||
|
// Defined in https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt#section-3
|
||||||
|
type RequestPacket struct {
|
||||||
|
RequestID uint32
|
||||||
|
|
||||||
|
Request Packet
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type returns the SSH_FXP_xy value associated with the underlying packet.
|
||||||
|
func (p *RequestPacket) Type() PacketType {
|
||||||
|
return p.Request.Type()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset clears the pointers and reference-semantic variables in RequestPacket,
|
||||||
|
// releasing underlying resources, and making them and the RequestPacket suitable to be reused,
|
||||||
|
// so long as no other references have been kept.
|
||||||
|
func (p *RequestPacket) Reset() {
|
||||||
|
p.Request = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalPacket returns p as a two-part binary encoding of p.
|
||||||
|
//
|
||||||
|
// The internal p.RequestID is overridden by the reqid argument.
|
||||||
|
func (p *RequestPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
|
||||||
|
if p.Request == nil {
|
||||||
|
return nil, nil, errors.New("empty request packet")
|
||||||
|
}
|
||||||
|
|
||||||
|
return p.Request.MarshalPacket(reqid, b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalBinary returns p as the binary encoding of p.
|
||||||
|
//
|
||||||
|
// This is a convenience implementation primarily intended for tests,
|
||||||
|
// because it is inefficient with allocations.
|
||||||
|
func (p *RequestPacket) MarshalBinary() ([]byte, error) {
|
||||||
|
return ComposePacket(p.MarshalPacket(p.RequestID, nil))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalFrom decodes a RequestPacket from the given Buffer into p.
|
||||||
|
//
|
||||||
|
// The Request field may alias the passed in Buffer, (e.g. SSH_FXP_WRITE),
|
||||||
|
// so the buffer passed in should not be reused before RequestPacket.Reset().
|
||||||
|
func (p *RequestPacket) UnmarshalFrom(buf *Buffer) error {
|
||||||
|
typ := PacketType(buf.ConsumeUint8())
|
||||||
|
if buf.Err != nil {
|
||||||
|
return buf.Err
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := newPacketFromType(typ)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
*p = RequestPacket{
|
||||||
|
RequestID: buf.ConsumeUint32(),
|
||||||
|
Request: req,
|
||||||
|
}
|
||||||
|
|
||||||
|
return p.Request.UnmarshalPacketBody(buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalBinary decodes a full request packet out of the given data.
|
||||||
|
// It is assumed that the uint32(length) has already been consumed to receive the data.
|
||||||
|
//
|
||||||
|
// This is a convenience implementation primarily intended for tests,
|
||||||
|
// because this must clone the given data byte slice,
|
||||||
|
// as Request is not allowed to alias any part of the data byte slice.
|
||||||
|
func (p *RequestPacket) UnmarshalBinary(data []byte) error {
|
||||||
|
clone := make([]byte, len(data))
|
||||||
|
n := copy(clone, data)
|
||||||
|
return p.UnmarshalFrom(NewBuffer(clone[:n]))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadFrom provides a simple functional packet reader,
|
||||||
|
// using the given byte slice as a backing array.
|
||||||
|
//
|
||||||
|
// To protect against potential denial of service attacks,
|
||||||
|
// if the read packet length is longer than maxPacketLength,
|
||||||
|
// then no packet data will be read, and ErrLongPacket will be returned.
|
||||||
|
// (On 32-bit int architectures, all packets >= 2^31 in length
|
||||||
|
// will return ErrLongPacket regardless of maxPacketLength.)
|
||||||
|
//
|
||||||
|
// If the read packet length is longer than cap(b),
|
||||||
|
// then a throw-away slice will allocated to meet the exact packet length.
|
||||||
|
// This can be used to limit the length of reused buffers,
|
||||||
|
// while still allowing reception of occasional large packets.
|
||||||
|
//
|
||||||
|
// The Request field may alias the passed in byte slice,
|
||||||
|
// so the byte slice passed in should not be reused before RawPacket.Reset().
|
||||||
|
func (p *RequestPacket) ReadFrom(r io.Reader, b []byte, maxPacketLength uint32) error {
|
||||||
|
b, err := readPacket(r, b, maxPacketLength)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return p.UnmarshalFrom(NewBuffer(b))
|
||||||
|
}
|
362
vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/path_packets.go
generated
vendored
Normal file
362
vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/path_packets.go
generated
vendored
Normal file
|
@ -0,0 +1,362 @@
|
||||||
|
package sshfx
|
||||||
|
|
||||||
|
// LStatPacket defines the SSH_FXP_LSTAT packet.
|
||||||
|
type LStatPacket struct {
|
||||||
|
Path string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type returns the SSH_FXP_xy value associated with this packet type.
|
||||||
|
func (p *LStatPacket) Type() PacketType {
|
||||||
|
return PacketTypeLStat
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalPacket returns p as a two-part binary encoding of p.
|
||||||
|
func (p *LStatPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
|
||||||
|
buf := NewBuffer(b)
|
||||||
|
if buf.Cap() < 9 {
|
||||||
|
size := 4 + len(p.Path) // string(path)
|
||||||
|
buf = NewMarshalBuffer(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.StartPacket(PacketTypeLStat, reqid)
|
||||||
|
buf.AppendString(p.Path)
|
||||||
|
|
||||||
|
return buf.Packet(payload)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalPacketBody unmarshals the packet body from the given Buffer.
|
||||||
|
// It is assumed that the uint32(request-id) has already been consumed.
|
||||||
|
func (p *LStatPacket) UnmarshalPacketBody(buf *Buffer) (err error) {
|
||||||
|
*p = LStatPacket{
|
||||||
|
Path: buf.ConsumeString(),
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.Err
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetstatPacket defines the SSH_FXP_SETSTAT packet.
|
||||||
|
type SetstatPacket struct {
|
||||||
|
Path string
|
||||||
|
Attrs Attributes
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type returns the SSH_FXP_xy value associated with this packet type.
|
||||||
|
func (p *SetstatPacket) Type() PacketType {
|
||||||
|
return PacketTypeSetstat
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalPacket returns p as a two-part binary encoding of p.
|
||||||
|
func (p *SetstatPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
|
||||||
|
buf := NewBuffer(b)
|
||||||
|
if buf.Cap() < 9 {
|
||||||
|
size := 4 + len(p.Path) + p.Attrs.Len() // string(path) + ATTRS(attrs)
|
||||||
|
buf = NewMarshalBuffer(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.StartPacket(PacketTypeSetstat, reqid)
|
||||||
|
buf.AppendString(p.Path)
|
||||||
|
|
||||||
|
p.Attrs.MarshalInto(buf)
|
||||||
|
|
||||||
|
return buf.Packet(payload)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalPacketBody unmarshals the packet body from the given Buffer.
|
||||||
|
// It is assumed that the uint32(request-id) has already been consumed.
|
||||||
|
func (p *SetstatPacket) UnmarshalPacketBody(buf *Buffer) (err error) {
|
||||||
|
*p = SetstatPacket{
|
||||||
|
Path: buf.ConsumeString(),
|
||||||
|
}
|
||||||
|
|
||||||
|
return p.Attrs.UnmarshalFrom(buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemovePacket defines the SSH_FXP_REMOVE packet.
|
||||||
|
type RemovePacket struct {
|
||||||
|
Path string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type returns the SSH_FXP_xy value associated with this packet type.
|
||||||
|
func (p *RemovePacket) Type() PacketType {
|
||||||
|
return PacketTypeRemove
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalPacket returns p as a two-part binary encoding of p.
|
||||||
|
func (p *RemovePacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
|
||||||
|
buf := NewBuffer(b)
|
||||||
|
if buf.Cap() < 9 {
|
||||||
|
size := 4 + len(p.Path) // string(path)
|
||||||
|
buf = NewMarshalBuffer(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.StartPacket(PacketTypeRemove, reqid)
|
||||||
|
buf.AppendString(p.Path)
|
||||||
|
|
||||||
|
return buf.Packet(payload)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalPacketBody unmarshals the packet body from the given Buffer.
|
||||||
|
// It is assumed that the uint32(request-id) has already been consumed.
|
||||||
|
func (p *RemovePacket) UnmarshalPacketBody(buf *Buffer) (err error) {
|
||||||
|
*p = RemovePacket{
|
||||||
|
Path: buf.ConsumeString(),
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.Err
|
||||||
|
}
|
||||||
|
|
||||||
|
// MkdirPacket defines the SSH_FXP_MKDIR packet.
|
||||||
|
type MkdirPacket struct {
|
||||||
|
Path string
|
||||||
|
Attrs Attributes
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type returns the SSH_FXP_xy value associated with this packet type.
|
||||||
|
func (p *MkdirPacket) Type() PacketType {
|
||||||
|
return PacketTypeMkdir
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalPacket returns p as a two-part binary encoding of p.
|
||||||
|
func (p *MkdirPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
|
||||||
|
buf := NewBuffer(b)
|
||||||
|
if buf.Cap() < 9 {
|
||||||
|
size := 4 + len(p.Path) + p.Attrs.Len() // string(path) + ATTRS(attrs)
|
||||||
|
buf = NewMarshalBuffer(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.StartPacket(PacketTypeMkdir, reqid)
|
||||||
|
buf.AppendString(p.Path)
|
||||||
|
|
||||||
|
p.Attrs.MarshalInto(buf)
|
||||||
|
|
||||||
|
return buf.Packet(payload)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalPacketBody unmarshals the packet body from the given Buffer.
|
||||||
|
// It is assumed that the uint32(request-id) has already been consumed.
|
||||||
|
func (p *MkdirPacket) UnmarshalPacketBody(buf *Buffer) (err error) {
|
||||||
|
*p = MkdirPacket{
|
||||||
|
Path: buf.ConsumeString(),
|
||||||
|
}
|
||||||
|
|
||||||
|
return p.Attrs.UnmarshalFrom(buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RmdirPacket defines the SSH_FXP_RMDIR packet.
|
||||||
|
type RmdirPacket struct {
|
||||||
|
Path string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type returns the SSH_FXP_xy value associated with this packet type.
|
||||||
|
func (p *RmdirPacket) Type() PacketType {
|
||||||
|
return PacketTypeRmdir
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalPacket returns p as a two-part binary encoding of p.
|
||||||
|
func (p *RmdirPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
|
||||||
|
buf := NewBuffer(b)
|
||||||
|
if buf.Cap() < 9 {
|
||||||
|
size := 4 + len(p.Path) // string(path)
|
||||||
|
buf = NewMarshalBuffer(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.StartPacket(PacketTypeRmdir, reqid)
|
||||||
|
buf.AppendString(p.Path)
|
||||||
|
|
||||||
|
return buf.Packet(payload)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalPacketBody unmarshals the packet body from the given Buffer.
|
||||||
|
// It is assumed that the uint32(request-id) has already been consumed.
|
||||||
|
func (p *RmdirPacket) UnmarshalPacketBody(buf *Buffer) (err error) {
|
||||||
|
*p = RmdirPacket{
|
||||||
|
Path: buf.ConsumeString(),
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.Err
|
||||||
|
}
|
||||||
|
|
||||||
|
// RealPathPacket defines the SSH_FXP_REALPATH packet.
|
||||||
|
type RealPathPacket struct {
|
||||||
|
Path string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type returns the SSH_FXP_xy value associated with this packet type.
|
||||||
|
func (p *RealPathPacket) Type() PacketType {
|
||||||
|
return PacketTypeRealPath
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalPacket returns p as a two-part binary encoding of p.
|
||||||
|
func (p *RealPathPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
|
||||||
|
buf := NewBuffer(b)
|
||||||
|
if buf.Cap() < 9 {
|
||||||
|
size := 4 + len(p.Path) // string(path)
|
||||||
|
buf = NewMarshalBuffer(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.StartPacket(PacketTypeRealPath, reqid)
|
||||||
|
buf.AppendString(p.Path)
|
||||||
|
|
||||||
|
return buf.Packet(payload)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalPacketBody unmarshals the packet body from the given Buffer.
|
||||||
|
// It is assumed that the uint32(request-id) has already been consumed.
|
||||||
|
func (p *RealPathPacket) UnmarshalPacketBody(buf *Buffer) (err error) {
|
||||||
|
*p = RealPathPacket{
|
||||||
|
Path: buf.ConsumeString(),
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.Err
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatPacket defines the SSH_FXP_STAT packet.
|
||||||
|
type StatPacket struct {
|
||||||
|
Path string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type returns the SSH_FXP_xy value associated with this packet type.
|
||||||
|
func (p *StatPacket) Type() PacketType {
|
||||||
|
return PacketTypeStat
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalPacket returns p as a two-part binary encoding of p.
|
||||||
|
func (p *StatPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
|
||||||
|
buf := NewBuffer(b)
|
||||||
|
if buf.Cap() < 9 {
|
||||||
|
size := 4 + len(p.Path) // string(path)
|
||||||
|
buf = NewMarshalBuffer(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.StartPacket(PacketTypeStat, reqid)
|
||||||
|
buf.AppendString(p.Path)
|
||||||
|
|
||||||
|
return buf.Packet(payload)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalPacketBody unmarshals the packet body from the given Buffer.
|
||||||
|
// It is assumed that the uint32(request-id) has already been consumed.
|
||||||
|
func (p *StatPacket) UnmarshalPacketBody(buf *Buffer) (err error) {
|
||||||
|
*p = StatPacket{
|
||||||
|
Path: buf.ConsumeString(),
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.Err
|
||||||
|
}
|
||||||
|
|
||||||
|
// RenamePacket defines the SSH_FXP_RENAME packet.
|
||||||
|
type RenamePacket struct {
|
||||||
|
OldPath string
|
||||||
|
NewPath string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type returns the SSH_FXP_xy value associated with this packet type.
|
||||||
|
func (p *RenamePacket) Type() PacketType {
|
||||||
|
return PacketTypeRename
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalPacket returns p as a two-part binary encoding of p.
|
||||||
|
func (p *RenamePacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
|
||||||
|
buf := NewBuffer(b)
|
||||||
|
if buf.Cap() < 9 {
|
||||||
|
// string(oldpath) + string(newpath)
|
||||||
|
size := 4 + len(p.OldPath) + 4 + len(p.NewPath)
|
||||||
|
buf = NewMarshalBuffer(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.StartPacket(PacketTypeRename, reqid)
|
||||||
|
buf.AppendString(p.OldPath)
|
||||||
|
buf.AppendString(p.NewPath)
|
||||||
|
|
||||||
|
return buf.Packet(payload)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalPacketBody unmarshals the packet body from the given Buffer.
|
||||||
|
// It is assumed that the uint32(request-id) has already been consumed.
|
||||||
|
func (p *RenamePacket) UnmarshalPacketBody(buf *Buffer) (err error) {
|
||||||
|
*p = RenamePacket{
|
||||||
|
OldPath: buf.ConsumeString(),
|
||||||
|
NewPath: buf.ConsumeString(),
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.Err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadLinkPacket defines the SSH_FXP_READLINK packet.
|
||||||
|
type ReadLinkPacket struct {
|
||||||
|
Path string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type returns the SSH_FXP_xy value associated with this packet type.
|
||||||
|
func (p *ReadLinkPacket) Type() PacketType {
|
||||||
|
return PacketTypeReadLink
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalPacket returns p as a two-part binary encoding of p.
|
||||||
|
func (p *ReadLinkPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
|
||||||
|
buf := NewBuffer(b)
|
||||||
|
if buf.Cap() < 9 {
|
||||||
|
size := 4 + len(p.Path) // string(path)
|
||||||
|
buf = NewMarshalBuffer(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.StartPacket(PacketTypeReadLink, reqid)
|
||||||
|
buf.AppendString(p.Path)
|
||||||
|
|
||||||
|
return buf.Packet(payload)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalPacketBody unmarshals the packet body from the given Buffer.
|
||||||
|
// It is assumed that the uint32(request-id) has already been consumed.
|
||||||
|
func (p *ReadLinkPacket) UnmarshalPacketBody(buf *Buffer) (err error) {
|
||||||
|
*p = ReadLinkPacket{
|
||||||
|
Path: buf.ConsumeString(),
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.Err
|
||||||
|
}
|
||||||
|
|
||||||
|
// SymlinkPacket defines the SSH_FXP_SYMLINK packet.
|
||||||
|
//
|
||||||
|
// The order of the arguments to the SSH_FXP_SYMLINK method was inadvertently reversed.
|
||||||
|
// Unfortunately, the reversal was not noticed until the server was widely deployed.
|
||||||
|
// Covered in Section 4.1 of https://github.com/openssh/openssh-portable/blob/master/PROTOCOL
|
||||||
|
type SymlinkPacket struct {
|
||||||
|
LinkPath string
|
||||||
|
TargetPath string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type returns the SSH_FXP_xy value associated with this packet type.
|
||||||
|
func (p *SymlinkPacket) Type() PacketType {
|
||||||
|
return PacketTypeSymlink
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalPacket returns p as a two-part binary encoding of p.
|
||||||
|
func (p *SymlinkPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
|
||||||
|
buf := NewBuffer(b)
|
||||||
|
if buf.Cap() < 9 {
|
||||||
|
// string(targetpath) + string(linkpath)
|
||||||
|
size := 4 + len(p.TargetPath) + 4 + len(p.LinkPath)
|
||||||
|
buf = NewMarshalBuffer(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.StartPacket(PacketTypeSymlink, reqid)
|
||||||
|
|
||||||
|
// Arguments were inadvertently reversed.
|
||||||
|
buf.AppendString(p.TargetPath)
|
||||||
|
buf.AppendString(p.LinkPath)
|
||||||
|
|
||||||
|
return buf.Packet(payload)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalPacketBody unmarshals the packet body from the given Buffer.
|
||||||
|
// It is assumed that the uint32(request-id) has already been consumed.
|
||||||
|
func (p *SymlinkPacket) UnmarshalPacketBody(buf *Buffer) (err error) {
|
||||||
|
*p = SymlinkPacket{
|
||||||
|
// Arguments were inadvertently reversed.
|
||||||
|
TargetPath: buf.ConsumeString(),
|
||||||
|
LinkPath: buf.ConsumeString(),
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.Err
|
||||||
|
}
|
114
vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/permissions.go
generated
vendored
Normal file
114
vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/permissions.go
generated
vendored
Normal file
|
@ -0,0 +1,114 @@
|
||||||
|
package sshfx
|
||||||
|
|
||||||
|
// FileMode represents a file’s mode and permission bits.
|
||||||
|
// The bits are defined according to POSIX standards,
|
||||||
|
// and may not apply to the OS being built for.
|
||||||
|
type FileMode uint32
|
||||||
|
|
||||||
|
// Permission flags, defined here to avoid potential inconsistencies in individual OS implementations.
|
||||||
|
const (
|
||||||
|
ModePerm FileMode = 0o0777 // S_IRWXU | S_IRWXG | S_IRWXO
|
||||||
|
ModeUserRead FileMode = 0o0400 // S_IRUSR
|
||||||
|
ModeUserWrite FileMode = 0o0200 // S_IWUSR
|
||||||
|
ModeUserExec FileMode = 0o0100 // S_IXUSR
|
||||||
|
ModeGroupRead FileMode = 0o0040 // S_IRGRP
|
||||||
|
ModeGroupWrite FileMode = 0o0020 // S_IWGRP
|
||||||
|
ModeGroupExec FileMode = 0o0010 // S_IXGRP
|
||||||
|
ModeOtherRead FileMode = 0o0004 // S_IROTH
|
||||||
|
ModeOtherWrite FileMode = 0o0002 // S_IWOTH
|
||||||
|
ModeOtherExec FileMode = 0o0001 // S_IXOTH
|
||||||
|
|
||||||
|
ModeSetUID FileMode = 0o4000 // S_ISUID
|
||||||
|
ModeSetGID FileMode = 0o2000 // S_ISGID
|
||||||
|
ModeSticky FileMode = 0o1000 // S_ISVTX
|
||||||
|
|
||||||
|
ModeType FileMode = 0xF000 // S_IFMT
|
||||||
|
ModeNamedPipe FileMode = 0x1000 // S_IFIFO
|
||||||
|
ModeCharDevice FileMode = 0x2000 // S_IFCHR
|
||||||
|
ModeDir FileMode = 0x4000 // S_IFDIR
|
||||||
|
ModeDevice FileMode = 0x6000 // S_IFBLK
|
||||||
|
ModeRegular FileMode = 0x8000 // S_IFREG
|
||||||
|
ModeSymlink FileMode = 0xA000 // S_IFLNK
|
||||||
|
ModeSocket FileMode = 0xC000 // S_IFSOCK
|
||||||
|
)
|
||||||
|
|
||||||
|
// IsDir reports whether m describes a directory.
|
||||||
|
// That is, it tests for m.Type() == ModeDir.
|
||||||
|
func (m FileMode) IsDir() bool {
|
||||||
|
return (m & ModeType) == ModeDir
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsRegular reports whether m describes a regular file.
|
||||||
|
// That is, it tests for m.Type() == ModeRegular
|
||||||
|
func (m FileMode) IsRegular() bool {
|
||||||
|
return (m & ModeType) == ModeRegular
|
||||||
|
}
|
||||||
|
|
||||||
|
// Perm returns the POSIX permission bits in m (m & ModePerm).
|
||||||
|
func (m FileMode) Perm() FileMode {
|
||||||
|
return (m & ModePerm)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type returns the type bits in m (m & ModeType).
|
||||||
|
func (m FileMode) Type() FileMode {
|
||||||
|
return (m & ModeType)
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a `-rwxrwxrwx` style string representing the `ls -l` POSIX permissions string.
|
||||||
|
func (m FileMode) String() string {
|
||||||
|
var buf [10]byte
|
||||||
|
|
||||||
|
switch m.Type() {
|
||||||
|
case ModeRegular:
|
||||||
|
buf[0] = '-'
|
||||||
|
case ModeDir:
|
||||||
|
buf[0] = 'd'
|
||||||
|
case ModeSymlink:
|
||||||
|
buf[0] = 'l'
|
||||||
|
case ModeDevice:
|
||||||
|
buf[0] = 'b'
|
||||||
|
case ModeCharDevice:
|
||||||
|
buf[0] = 'c'
|
||||||
|
case ModeNamedPipe:
|
||||||
|
buf[0] = 'p'
|
||||||
|
case ModeSocket:
|
||||||
|
buf[0] = 's'
|
||||||
|
default:
|
||||||
|
buf[0] = '?'
|
||||||
|
}
|
||||||
|
|
||||||
|
const rwx = "rwxrwxrwx"
|
||||||
|
for i, c := range rwx {
|
||||||
|
if m&(1<<uint(9-1-i)) != 0 {
|
||||||
|
buf[i+1] = byte(c)
|
||||||
|
} else {
|
||||||
|
buf[i+1] = '-'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if m&ModeSetUID != 0 {
|
||||||
|
if buf[3] == 'x' {
|
||||||
|
buf[3] = 's'
|
||||||
|
} else {
|
||||||
|
buf[3] = 'S'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if m&ModeSetGID != 0 {
|
||||||
|
if buf[6] == 'x' {
|
||||||
|
buf[6] = 's'
|
||||||
|
} else {
|
||||||
|
buf[6] = 'S'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if m&ModeSticky != 0 {
|
||||||
|
if buf[9] == 'x' {
|
||||||
|
buf[9] = 't'
|
||||||
|
} else {
|
||||||
|
buf[9] = 'T'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(buf[:])
|
||||||
|
}
|
230
vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/response_packets.go
generated
vendored
Normal file
230
vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/response_packets.go
generated
vendored
Normal file
|
@ -0,0 +1,230 @@
|
||||||
|
package sshfx
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StatusPacket defines the SSH_FXP_STATUS packet.
|
||||||
|
//
|
||||||
|
// Specified in https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt#section-7
|
||||||
|
type StatusPacket struct {
|
||||||
|
StatusCode Status
|
||||||
|
ErrorMessage string
|
||||||
|
LanguageTag string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error makes StatusPacket an error type.
|
||||||
|
func (p *StatusPacket) Error() string {
|
||||||
|
if p.ErrorMessage == "" {
|
||||||
|
return "sftp: " + p.StatusCode.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf("sftp: %s: %q", p.StatusCode, p.ErrorMessage)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Is returns true if target is a StatusPacket with the same StatusCode,
|
||||||
|
// or target is a Status code which is the same as SatusCode.
|
||||||
|
func (p *StatusPacket) Is(target error) bool {
|
||||||
|
if target, ok := target.(*StatusPacket); ok {
|
||||||
|
return p.StatusCode == target.StatusCode
|
||||||
|
}
|
||||||
|
|
||||||
|
return p.StatusCode == target
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type returns the SSH_FXP_xy value associated with this packet type.
|
||||||
|
func (p *StatusPacket) Type() PacketType {
|
||||||
|
return PacketTypeStatus
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalPacket returns p as a two-part binary encoding of p.
|
||||||
|
func (p *StatusPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
|
||||||
|
buf := NewBuffer(b)
|
||||||
|
if buf.Cap() < 9 {
|
||||||
|
// uint32(error/status code) + string(error message) + string(language tag)
|
||||||
|
size := 4 + 4 + len(p.ErrorMessage) + 4 + len(p.LanguageTag)
|
||||||
|
buf = NewMarshalBuffer(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.StartPacket(PacketTypeStatus, reqid)
|
||||||
|
buf.AppendUint32(uint32(p.StatusCode))
|
||||||
|
buf.AppendString(p.ErrorMessage)
|
||||||
|
buf.AppendString(p.LanguageTag)
|
||||||
|
|
||||||
|
return buf.Packet(payload)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalPacketBody unmarshals the packet body from the given Buffer.
|
||||||
|
// It is assumed that the uint32(request-id) has already been consumed.
|
||||||
|
func (p *StatusPacket) UnmarshalPacketBody(buf *Buffer) (err error) {
|
||||||
|
*p = StatusPacket{
|
||||||
|
StatusCode: Status(buf.ConsumeUint32()),
|
||||||
|
ErrorMessage: buf.ConsumeString(),
|
||||||
|
LanguageTag: buf.ConsumeString(),
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.Err
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandlePacket defines the SSH_FXP_HANDLE packet.
|
||||||
|
type HandlePacket struct {
|
||||||
|
Handle string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type returns the SSH_FXP_xy value associated with this packet type.
|
||||||
|
func (p *HandlePacket) Type() PacketType {
|
||||||
|
return PacketTypeHandle
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalPacket returns p as a two-part binary encoding of p.
|
||||||
|
func (p *HandlePacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
|
||||||
|
buf := NewBuffer(b)
|
||||||
|
if buf.Cap() < 9 {
|
||||||
|
size := 4 + len(p.Handle) // string(handle)
|
||||||
|
buf = NewMarshalBuffer(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.StartPacket(PacketTypeHandle, reqid)
|
||||||
|
buf.AppendString(p.Handle)
|
||||||
|
|
||||||
|
return buf.Packet(payload)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalPacketBody unmarshals the packet body from the given Buffer.
|
||||||
|
// It is assumed that the uint32(request-id) has already been consumed.
|
||||||
|
func (p *HandlePacket) UnmarshalPacketBody(buf *Buffer) (err error) {
|
||||||
|
*p = HandlePacket{
|
||||||
|
Handle: buf.ConsumeString(),
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.Err
|
||||||
|
}
|
||||||
|
|
||||||
|
// DataPacket defines the SSH_FXP_DATA packet.
|
||||||
|
type DataPacket struct {
|
||||||
|
Data []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type returns the SSH_FXP_xy value associated with this packet type.
|
||||||
|
func (p *DataPacket) Type() PacketType {
|
||||||
|
return PacketTypeData
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalPacket returns p as a two-part binary encoding of p.
|
||||||
|
func (p *DataPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
|
||||||
|
buf := NewBuffer(b)
|
||||||
|
if buf.Cap() < 9 {
|
||||||
|
size := 4 // uint32(len(data)); data content in payload
|
||||||
|
buf = NewMarshalBuffer(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.StartPacket(PacketTypeData, reqid)
|
||||||
|
buf.AppendUint32(uint32(len(p.Data)))
|
||||||
|
|
||||||
|
return buf.Packet(p.Data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalPacketBody unmarshals the packet body from the given Buffer.
|
||||||
|
// It is assumed that the uint32(request-id) has already been consumed.
|
||||||
|
//
|
||||||
|
// If p.Data is already populated, and of sufficient length to hold the data,
|
||||||
|
// then this will copy the data into that byte slice.
|
||||||
|
//
|
||||||
|
// If p.Data has a length insufficient to hold the data,
|
||||||
|
// then this will make a new slice of sufficient length, and copy the data into that.
|
||||||
|
//
|
||||||
|
// This means this _does not_ alias any of the data buffer that is passed in.
|
||||||
|
func (p *DataPacket) UnmarshalPacketBody(buf *Buffer) (err error) {
|
||||||
|
*p = DataPacket{
|
||||||
|
Data: buf.ConsumeByteSliceCopy(p.Data),
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.Err
|
||||||
|
}
|
||||||
|
|
||||||
|
// NamePacket defines the SSH_FXP_NAME packet.
|
||||||
|
type NamePacket struct {
|
||||||
|
Entries []*NameEntry
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type returns the SSH_FXP_xy value associated with this packet type.
|
||||||
|
func (p *NamePacket) Type() PacketType {
|
||||||
|
return PacketTypeName
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalPacket returns p as a two-part binary encoding of p.
|
||||||
|
func (p *NamePacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
|
||||||
|
buf := NewBuffer(b)
|
||||||
|
if buf.Cap() < 9 {
|
||||||
|
size := 4 // uint32(len(entries))
|
||||||
|
|
||||||
|
for _, e := range p.Entries {
|
||||||
|
size += e.Len()
|
||||||
|
}
|
||||||
|
|
||||||
|
buf = NewMarshalBuffer(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.StartPacket(PacketTypeName, reqid)
|
||||||
|
buf.AppendUint32(uint32(len(p.Entries)))
|
||||||
|
|
||||||
|
for _, e := range p.Entries {
|
||||||
|
e.MarshalInto(buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.Packet(payload)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalPacketBody unmarshals the packet body from the given Buffer.
|
||||||
|
// It is assumed that the uint32(request-id) has already been consumed.
|
||||||
|
func (p *NamePacket) UnmarshalPacketBody(buf *Buffer) (err error) {
|
||||||
|
count := buf.ConsumeCount()
|
||||||
|
if buf.Err != nil {
|
||||||
|
return buf.Err
|
||||||
|
}
|
||||||
|
|
||||||
|
*p = NamePacket{
|
||||||
|
Entries: make([]*NameEntry, 0, count),
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
var e NameEntry
|
||||||
|
if err := e.UnmarshalFrom(buf); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
p.Entries = append(p.Entries, &e)
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.Err
|
||||||
|
}
|
||||||
|
|
||||||
|
// AttrsPacket defines the SSH_FXP_ATTRS packet.
|
||||||
|
type AttrsPacket struct {
|
||||||
|
Attrs Attributes
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type returns the SSH_FXP_xy value associated with this packet type.
|
||||||
|
func (p *AttrsPacket) Type() PacketType {
|
||||||
|
return PacketTypeAttrs
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalPacket returns p as a two-part binary encoding of p.
|
||||||
|
func (p *AttrsPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []byte, err error) {
|
||||||
|
buf := NewBuffer(b)
|
||||||
|
if buf.Cap() < 9 {
|
||||||
|
size := p.Attrs.Len() // ATTRS(attrs)
|
||||||
|
buf = NewMarshalBuffer(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.StartPacket(PacketTypeAttrs, reqid)
|
||||||
|
p.Attrs.MarshalInto(buf)
|
||||||
|
|
||||||
|
return buf.Packet(payload)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalPacketBody unmarshals the packet body from the given Buffer.
|
||||||
|
// It is assumed that the uint32(request-id) has already been consumed.
|
||||||
|
func (p *AttrsPacket) UnmarshalPacketBody(buf *Buffer) (err error) {
|
||||||
|
return p.Attrs.UnmarshalFrom(buf)
|
||||||
|
}
|
88
vendor/github.com/pkg/sftp/ls_formatting.go
generated
vendored
Normal file
88
vendor/github.com/pkg/sftp/ls_formatting.go
generated
vendored
Normal file
|
@ -0,0 +1,88 @@
|
||||||
|
package sftp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"os/user"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
sshfx "github.com/pkg/sftp/internal/encoding/ssh/filexfer"
|
||||||
|
)
|
||||||
|
|
||||||
|
func lsFormatID(id uint32) string {
|
||||||
|
return strconv.FormatUint(uint64(id), 10)
|
||||||
|
}
|
||||||
|
|
||||||
|
type osIDLookup struct{}
|
||||||
|
|
||||||
|
func (osIDLookup) Filelist(*Request) (ListerAt, error) {
|
||||||
|
return nil, errors.New("unimplemented stub")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (osIDLookup) LookupUserName(uid string) string {
|
||||||
|
u, err := user.LookupId(uid)
|
||||||
|
if err != nil {
|
||||||
|
return uid
|
||||||
|
}
|
||||||
|
|
||||||
|
return u.Username
|
||||||
|
}
|
||||||
|
|
||||||
|
func (osIDLookup) LookupGroupName(gid string) string {
|
||||||
|
g, err := user.LookupGroupId(gid)
|
||||||
|
if err != nil {
|
||||||
|
return gid
|
||||||
|
}
|
||||||
|
|
||||||
|
return g.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
// runLs formats the FileInfo as per `ls -l` style, which is in the 'longname' field of a SSH_FXP_NAME entry.
|
||||||
|
// This is a fairly simple implementation, just enough to look close to openssh in simple cases.
|
||||||
|
func runLs(idLookup NameLookupFileLister, dirent os.FileInfo) string {
|
||||||
|
// example from openssh sftp server:
|
||||||
|
// crw-rw-rw- 1 root wheel 0 Jul 31 20:52 ttyvd
|
||||||
|
// format:
|
||||||
|
// {directory / char device / etc}{rwxrwxrwx} {number of links} owner group size month day [time (this year) | year (otherwise)] name
|
||||||
|
|
||||||
|
symPerms := sshfx.FileMode(fromFileMode(dirent.Mode())).String()
|
||||||
|
|
||||||
|
var numLinks uint64 = 1
|
||||||
|
uid, gid := "0", "0"
|
||||||
|
|
||||||
|
switch sys := dirent.Sys().(type) {
|
||||||
|
case *sshfx.Attributes:
|
||||||
|
uid = lsFormatID(sys.UID)
|
||||||
|
gid = lsFormatID(sys.GID)
|
||||||
|
case *FileStat:
|
||||||
|
uid = lsFormatID(sys.UID)
|
||||||
|
gid = lsFormatID(sys.GID)
|
||||||
|
default:
|
||||||
|
if fiExt, ok := dirent.(FileInfoUidGid); ok {
|
||||||
|
uid = lsFormatID(fiExt.Uid())
|
||||||
|
gid = lsFormatID(fiExt.Gid())
|
||||||
|
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
numLinks, uid, gid = lsLinksUIDGID(dirent)
|
||||||
|
}
|
||||||
|
|
||||||
|
if idLookup != nil {
|
||||||
|
uid, gid = idLookup.LookupUserName(uid), idLookup.LookupGroupName(gid)
|
||||||
|
}
|
||||||
|
|
||||||
|
mtime := dirent.ModTime()
|
||||||
|
date := mtime.Format("Jan 2")
|
||||||
|
|
||||||
|
var yearOrTime string
|
||||||
|
if mtime.Before(time.Now().AddDate(0, -6, 0)) {
|
||||||
|
yearOrTime = mtime.Format("2006")
|
||||||
|
} else {
|
||||||
|
yearOrTime = mtime.Format("15:04")
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf("%s %4d %-8s %-8s %8d %s %5s %s", symPerms, numLinks, uid, gid, dirent.Size(), date, yearOrTime, dirent.Name())
|
||||||
|
}
|
22
vendor/github.com/pkg/sftp/ls_plan9.go
generated
vendored
Normal file
22
vendor/github.com/pkg/sftp/ls_plan9.go
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
//go:build plan9
|
||||||
|
// +build plan9
|
||||||
|
|
||||||
|
package sftp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
func lsLinksUIDGID(fi os.FileInfo) (numLinks uint64, uid, gid string) {
|
||||||
|
numLinks = 1
|
||||||
|
uid, gid = "0", "0"
|
||||||
|
|
||||||
|
switch sys := fi.Sys().(type) {
|
||||||
|
case *syscall.Dir:
|
||||||
|
uid = sys.Uid
|
||||||
|
gid = sys.Gid
|
||||||
|
}
|
||||||
|
|
||||||
|
return numLinks, uid, gid
|
||||||
|
}
|
12
vendor/github.com/pkg/sftp/ls_stub.go
generated
vendored
Normal file
12
vendor/github.com/pkg/sftp/ls_stub.go
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
//go:build windows || android
|
||||||
|
// +build windows android
|
||||||
|
|
||||||
|
package sftp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
func lsLinksUIDGID(fi os.FileInfo) (numLinks uint64, uid, gid string) {
|
||||||
|
return 1, "0", "0"
|
||||||
|
}
|
24
vendor/github.com/pkg/sftp/ls_unix.go
generated
vendored
Normal file
24
vendor/github.com/pkg/sftp/ls_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
//go:build aix || darwin || dragonfly || freebsd || (!android && linux) || netbsd || openbsd || solaris || js
|
||||||
|
// +build aix darwin dragonfly freebsd !android,linux netbsd openbsd solaris js
|
||||||
|
|
||||||
|
package sftp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
func lsLinksUIDGID(fi os.FileInfo) (numLinks uint64, uid, gid string) {
|
||||||
|
numLinks = 1
|
||||||
|
uid, gid = "0", "0"
|
||||||
|
|
||||||
|
switch sys := fi.Sys().(type) {
|
||||||
|
case *syscall.Stat_t:
|
||||||
|
numLinks = uint64(sys.Nlink)
|
||||||
|
uid = lsFormatID(sys.Uid)
|
||||||
|
gid = lsFormatID(sys.Gid)
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
return numLinks, uid, gid
|
||||||
|
}
|
137
vendor/github.com/pkg/sftp/match.go
generated
vendored
Normal file
137
vendor/github.com/pkg/sftp/match.go
generated
vendored
Normal file
|
@ -0,0 +1,137 @@
|
||||||
|
package sftp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ErrBadPattern indicates a globbing pattern was malformed.
|
||||||
|
var ErrBadPattern = path.ErrBadPattern
|
||||||
|
|
||||||
|
// Match reports whether name matches the shell pattern.
|
||||||
|
//
|
||||||
|
// This is an alias for path.Match from the standard library,
|
||||||
|
// offered so that callers need not import the path package.
|
||||||
|
// For details, see https://golang.org/pkg/path/#Match.
|
||||||
|
func Match(pattern, name string) (matched bool, err error) {
|
||||||
|
return path.Match(pattern, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// detect if byte(char) is path separator
|
||||||
|
func isPathSeparator(c byte) bool {
|
||||||
|
return c == '/'
|
||||||
|
}
|
||||||
|
|
||||||
|
// Split splits the path p immediately following the final slash,
|
||||||
|
// separating it into a directory and file name component.
|
||||||
|
//
|
||||||
|
// This is an alias for path.Split from the standard library,
|
||||||
|
// offered so that callers need not import the path package.
|
||||||
|
// For details, see https://golang.org/pkg/path/#Split.
|
||||||
|
func Split(p string) (dir, file string) {
|
||||||
|
return path.Split(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Glob returns the names of all files matching pattern or nil
|
||||||
|
// if there is no matching file. The syntax of patterns is the same
|
||||||
|
// as in Match. The pattern may describe hierarchical names such as
|
||||||
|
// /usr/*/bin/ed.
|
||||||
|
//
|
||||||
|
// Glob ignores file system errors such as I/O errors reading directories.
|
||||||
|
// The only possible returned error is ErrBadPattern, when pattern
|
||||||
|
// is malformed.
|
||||||
|
func (c *Client) Glob(pattern string) (matches []string, err error) {
|
||||||
|
if !hasMeta(pattern) {
|
||||||
|
file, err := c.Lstat(pattern)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
dir, _ := Split(pattern)
|
||||||
|
dir = cleanGlobPath(dir)
|
||||||
|
return []string{Join(dir, file.Name())}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
dir, file := Split(pattern)
|
||||||
|
dir = cleanGlobPath(dir)
|
||||||
|
|
||||||
|
if !hasMeta(dir) {
|
||||||
|
return c.glob(dir, file, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prevent infinite recursion. See issue 15879.
|
||||||
|
if dir == pattern {
|
||||||
|
return nil, ErrBadPattern
|
||||||
|
}
|
||||||
|
|
||||||
|
var m []string
|
||||||
|
m, err = c.Glob(dir)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for _, d := range m {
|
||||||
|
matches, err = c.glob(d, file, matches)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// cleanGlobPath prepares path for glob matching.
|
||||||
|
func cleanGlobPath(path string) string {
|
||||||
|
switch path {
|
||||||
|
case "":
|
||||||
|
return "."
|
||||||
|
case "/":
|
||||||
|
return path
|
||||||
|
default:
|
||||||
|
return path[0 : len(path)-1] // chop off trailing separator
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// glob searches for files matching pattern in the directory dir
|
||||||
|
// and appends them to matches. If the directory cannot be
|
||||||
|
// opened, it returns the existing matches. New matches are
|
||||||
|
// added in lexicographical order.
|
||||||
|
func (c *Client) glob(dir, pattern string, matches []string) (m []string, e error) {
|
||||||
|
m = matches
|
||||||
|
fi, err := c.Stat(dir)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !fi.IsDir() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
names, err := c.ReadDir(dir)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
//sort.Strings(names)
|
||||||
|
|
||||||
|
for _, n := range names {
|
||||||
|
matched, err := Match(pattern, n.Name())
|
||||||
|
if err != nil {
|
||||||
|
return m, err
|
||||||
|
}
|
||||||
|
if matched {
|
||||||
|
m = append(m, Join(dir, n.Name()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Join joins any number of path elements into a single path, separating
|
||||||
|
// them with slashes.
|
||||||
|
//
|
||||||
|
// This is an alias for path.Join from the standard library,
|
||||||
|
// offered so that callers need not import the path package.
|
||||||
|
// For details, see https://golang.org/pkg/path/#Join.
|
||||||
|
func Join(elem ...string) string {
|
||||||
|
return path.Join(elem...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// hasMeta reports whether path contains any of the magic characters
|
||||||
|
// recognized by Match.
|
||||||
|
func hasMeta(path string) bool {
|
||||||
|
return strings.ContainsAny(path, "\\*?[")
|
||||||
|
}
|
216
vendor/github.com/pkg/sftp/packet-manager.go
generated
vendored
Normal file
216
vendor/github.com/pkg/sftp/packet-manager.go
generated
vendored
Normal file
|
@ -0,0 +1,216 @@
|
||||||
|
package sftp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding"
|
||||||
|
"sort"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// The goal of the packetManager is to keep the outgoing packets in the same
|
||||||
|
// order as the incoming as is requires by section 7 of the RFC.
|
||||||
|
|
||||||
|
type packetManager struct {
|
||||||
|
requests chan orderedPacket
|
||||||
|
responses chan orderedPacket
|
||||||
|
fini chan struct{}
|
||||||
|
incoming orderedPackets
|
||||||
|
outgoing orderedPackets
|
||||||
|
sender packetSender // connection object
|
||||||
|
working *sync.WaitGroup
|
||||||
|
packetCount uint32
|
||||||
|
// it is not nil if the allocator is enabled
|
||||||
|
alloc *allocator
|
||||||
|
}
|
||||||
|
|
||||||
|
type packetSender interface {
|
||||||
|
sendPacket(encoding.BinaryMarshaler) error
|
||||||
|
}
|
||||||
|
|
||||||
|
func newPktMgr(sender packetSender) *packetManager {
|
||||||
|
s := &packetManager{
|
||||||
|
requests: make(chan orderedPacket, SftpServerWorkerCount),
|
||||||
|
responses: make(chan orderedPacket, SftpServerWorkerCount),
|
||||||
|
fini: make(chan struct{}),
|
||||||
|
incoming: make([]orderedPacket, 0, SftpServerWorkerCount),
|
||||||
|
outgoing: make([]orderedPacket, 0, SftpServerWorkerCount),
|
||||||
|
sender: sender,
|
||||||
|
working: &sync.WaitGroup{},
|
||||||
|
}
|
||||||
|
go s.controller()
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// // packet ordering
|
||||||
|
func (s *packetManager) newOrderID() uint32 {
|
||||||
|
s.packetCount++
|
||||||
|
return s.packetCount
|
||||||
|
}
|
||||||
|
|
||||||
|
// returns the next orderID without incrementing it.
|
||||||
|
// This is used before receiving a new packet, with the allocator enabled, to associate
|
||||||
|
// the slice allocated for the received packet with the orderID that will be used to mark
|
||||||
|
// the allocated slices for reuse once the request is served
|
||||||
|
func (s *packetManager) getNextOrderID() uint32 {
|
||||||
|
return s.packetCount + 1
|
||||||
|
}
|
||||||
|
|
||||||
|
type orderedRequest struct {
|
||||||
|
requestPacket
|
||||||
|
orderid uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *packetManager) newOrderedRequest(p requestPacket) orderedRequest {
|
||||||
|
return orderedRequest{requestPacket: p, orderid: s.newOrderID()}
|
||||||
|
}
|
||||||
|
func (p orderedRequest) orderID() uint32 { return p.orderid }
|
||||||
|
func (p orderedRequest) setOrderID(oid uint32) { p.orderid = oid }
|
||||||
|
|
||||||
|
type orderedResponse struct {
|
||||||
|
responsePacket
|
||||||
|
orderid uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *packetManager) newOrderedResponse(p responsePacket, id uint32,
|
||||||
|
) orderedResponse {
|
||||||
|
return orderedResponse{responsePacket: p, orderid: id}
|
||||||
|
}
|
||||||
|
func (p orderedResponse) orderID() uint32 { return p.orderid }
|
||||||
|
func (p orderedResponse) setOrderID(oid uint32) { p.orderid = oid }
|
||||||
|
|
||||||
|
type orderedPacket interface {
|
||||||
|
id() uint32
|
||||||
|
orderID() uint32
|
||||||
|
}
|
||||||
|
type orderedPackets []orderedPacket
|
||||||
|
|
||||||
|
func (o orderedPackets) Sort() {
|
||||||
|
sort.Slice(o, func(i, j int) bool {
|
||||||
|
return o[i].orderID() < o[j].orderID()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// // packet registry
|
||||||
|
// register incoming packets to be handled
|
||||||
|
func (s *packetManager) incomingPacket(pkt orderedRequest) {
|
||||||
|
s.working.Add(1)
|
||||||
|
s.requests <- pkt
|
||||||
|
}
|
||||||
|
|
||||||
|
// register outgoing packets as being ready
|
||||||
|
func (s *packetManager) readyPacket(pkt orderedResponse) {
|
||||||
|
s.responses <- pkt
|
||||||
|
s.working.Done()
|
||||||
|
}
|
||||||
|
|
||||||
|
// shut down packetManager controller
|
||||||
|
func (s *packetManager) close() {
|
||||||
|
// pause until current packets are processed
|
||||||
|
s.working.Wait()
|
||||||
|
close(s.fini)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Passed a worker function, returns a channel for incoming packets.
|
||||||
|
// Keep process packet responses in the order they are received while
|
||||||
|
// maximizing throughput of file transfers.
|
||||||
|
func (s *packetManager) workerChan(runWorker func(chan orderedRequest),
|
||||||
|
) chan orderedRequest {
|
||||||
|
// multiple workers for faster read/writes
|
||||||
|
rwChan := make(chan orderedRequest, SftpServerWorkerCount)
|
||||||
|
for i := 0; i < SftpServerWorkerCount; i++ {
|
||||||
|
runWorker(rwChan)
|
||||||
|
}
|
||||||
|
|
||||||
|
// single worker to enforce sequential processing of everything else
|
||||||
|
cmdChan := make(chan orderedRequest)
|
||||||
|
runWorker(cmdChan)
|
||||||
|
|
||||||
|
pktChan := make(chan orderedRequest, SftpServerWorkerCount)
|
||||||
|
go func() {
|
||||||
|
for pkt := range pktChan {
|
||||||
|
switch pkt.requestPacket.(type) {
|
||||||
|
case *sshFxpReadPacket, *sshFxpWritePacket:
|
||||||
|
s.incomingPacket(pkt)
|
||||||
|
rwChan <- pkt
|
||||||
|
continue
|
||||||
|
case *sshFxpClosePacket:
|
||||||
|
// wait for reads/writes to finish when file is closed
|
||||||
|
// incomingPacket() call must occur after this
|
||||||
|
s.working.Wait()
|
||||||
|
}
|
||||||
|
s.incomingPacket(pkt)
|
||||||
|
// all non-RW use sequential cmdChan
|
||||||
|
cmdChan <- pkt
|
||||||
|
}
|
||||||
|
close(rwChan)
|
||||||
|
close(cmdChan)
|
||||||
|
s.close()
|
||||||
|
}()
|
||||||
|
|
||||||
|
return pktChan
|
||||||
|
}
|
||||||
|
|
||||||
|
// process packets
|
||||||
|
func (s *packetManager) controller() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case pkt := <-s.requests:
|
||||||
|
debug("incoming id (oid): %v (%v)", pkt.id(), pkt.orderID())
|
||||||
|
s.incoming = append(s.incoming, pkt)
|
||||||
|
s.incoming.Sort()
|
||||||
|
case pkt := <-s.responses:
|
||||||
|
debug("outgoing id (oid): %v (%v)", pkt.id(), pkt.orderID())
|
||||||
|
s.outgoing = append(s.outgoing, pkt)
|
||||||
|
s.outgoing.Sort()
|
||||||
|
case <-s.fini:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.maybeSendPackets()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// send as many packets as are ready
|
||||||
|
func (s *packetManager) maybeSendPackets() {
|
||||||
|
for {
|
||||||
|
if len(s.outgoing) == 0 || len(s.incoming) == 0 {
|
||||||
|
debug("break! -- outgoing: %v; incoming: %v",
|
||||||
|
len(s.outgoing), len(s.incoming))
|
||||||
|
break
|
||||||
|
}
|
||||||
|
out := s.outgoing[0]
|
||||||
|
in := s.incoming[0]
|
||||||
|
// debug("incoming: %v", ids(s.incoming))
|
||||||
|
// debug("outgoing: %v", ids(s.outgoing))
|
||||||
|
if in.orderID() == out.orderID() {
|
||||||
|
debug("Sending packet: %v", out.id())
|
||||||
|
s.sender.sendPacket(out.(encoding.BinaryMarshaler))
|
||||||
|
if s.alloc != nil {
|
||||||
|
// mark for reuse the slices allocated for this request
|
||||||
|
s.alloc.ReleasePages(in.orderID())
|
||||||
|
}
|
||||||
|
// pop off heads
|
||||||
|
copy(s.incoming, s.incoming[1:]) // shift left
|
||||||
|
s.incoming[len(s.incoming)-1] = nil // clear last
|
||||||
|
s.incoming = s.incoming[:len(s.incoming)-1] // remove last
|
||||||
|
copy(s.outgoing, s.outgoing[1:]) // shift left
|
||||||
|
s.outgoing[len(s.outgoing)-1] = nil // clear last
|
||||||
|
s.outgoing = s.outgoing[:len(s.outgoing)-1] // remove last
|
||||||
|
} else {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// func oids(o []orderedPacket) []uint32 {
|
||||||
|
// res := make([]uint32, 0, len(o))
|
||||||
|
// for _, v := range o {
|
||||||
|
// res = append(res, v.orderId())
|
||||||
|
// }
|
||||||
|
// return res
|
||||||
|
// }
|
||||||
|
// func ids(o []orderedPacket) []uint32 {
|
||||||
|
// res := make([]uint32, 0, len(o))
|
||||||
|
// for _, v := range o {
|
||||||
|
// res = append(res, v.id())
|
||||||
|
// }
|
||||||
|
// return res
|
||||||
|
// }
|
135
vendor/github.com/pkg/sftp/packet-typing.go
generated
vendored
Normal file
135
vendor/github.com/pkg/sftp/packet-typing.go
generated
vendored
Normal file
|
@ -0,0 +1,135 @@
|
||||||
|
package sftp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// all incoming packets
|
||||||
|
type requestPacket interface {
|
||||||
|
encoding.BinaryUnmarshaler
|
||||||
|
id() uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type responsePacket interface {
|
||||||
|
encoding.BinaryMarshaler
|
||||||
|
id() uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// interfaces to group types
|
||||||
|
type hasPath interface {
|
||||||
|
requestPacket
|
||||||
|
getPath() string
|
||||||
|
}
|
||||||
|
|
||||||
|
type hasHandle interface {
|
||||||
|
requestPacket
|
||||||
|
getHandle() string
|
||||||
|
}
|
||||||
|
|
||||||
|
type notReadOnly interface {
|
||||||
|
notReadOnly()
|
||||||
|
}
|
||||||
|
|
||||||
|
// // define types by adding methods
|
||||||
|
// hasPath
|
||||||
|
func (p *sshFxpLstatPacket) getPath() string { return p.Path }
|
||||||
|
func (p *sshFxpStatPacket) getPath() string { return p.Path }
|
||||||
|
func (p *sshFxpRmdirPacket) getPath() string { return p.Path }
|
||||||
|
func (p *sshFxpReadlinkPacket) getPath() string { return p.Path }
|
||||||
|
func (p *sshFxpRealpathPacket) getPath() string { return p.Path }
|
||||||
|
func (p *sshFxpMkdirPacket) getPath() string { return p.Path }
|
||||||
|
func (p *sshFxpSetstatPacket) getPath() string { return p.Path }
|
||||||
|
func (p *sshFxpStatvfsPacket) getPath() string { return p.Path }
|
||||||
|
func (p *sshFxpRemovePacket) getPath() string { return p.Filename }
|
||||||
|
func (p *sshFxpRenamePacket) getPath() string { return p.Oldpath }
|
||||||
|
func (p *sshFxpSymlinkPacket) getPath() string { return p.Targetpath }
|
||||||
|
func (p *sshFxpOpendirPacket) getPath() string { return p.Path }
|
||||||
|
func (p *sshFxpOpenPacket) getPath() string { return p.Path }
|
||||||
|
|
||||||
|
func (p *sshFxpExtendedPacketPosixRename) getPath() string { return p.Oldpath }
|
||||||
|
func (p *sshFxpExtendedPacketHardlink) getPath() string { return p.Oldpath }
|
||||||
|
|
||||||
|
// getHandle
|
||||||
|
func (p *sshFxpFstatPacket) getHandle() string { return p.Handle }
|
||||||
|
func (p *sshFxpFsetstatPacket) getHandle() string { return p.Handle }
|
||||||
|
func (p *sshFxpReadPacket) getHandle() string { return p.Handle }
|
||||||
|
func (p *sshFxpWritePacket) getHandle() string { return p.Handle }
|
||||||
|
func (p *sshFxpReaddirPacket) getHandle() string { return p.Handle }
|
||||||
|
func (p *sshFxpClosePacket) getHandle() string { return p.Handle }
|
||||||
|
|
||||||
|
// notReadOnly
|
||||||
|
func (p *sshFxpWritePacket) notReadOnly() {}
|
||||||
|
func (p *sshFxpSetstatPacket) notReadOnly() {}
|
||||||
|
func (p *sshFxpFsetstatPacket) notReadOnly() {}
|
||||||
|
func (p *sshFxpRemovePacket) notReadOnly() {}
|
||||||
|
func (p *sshFxpMkdirPacket) notReadOnly() {}
|
||||||
|
func (p *sshFxpRmdirPacket) notReadOnly() {}
|
||||||
|
func (p *sshFxpRenamePacket) notReadOnly() {}
|
||||||
|
func (p *sshFxpSymlinkPacket) notReadOnly() {}
|
||||||
|
func (p *sshFxpExtendedPacketPosixRename) notReadOnly() {}
|
||||||
|
func (p *sshFxpExtendedPacketHardlink) notReadOnly() {}
|
||||||
|
|
||||||
|
// some packets with ID are missing id()
|
||||||
|
func (p *sshFxpDataPacket) id() uint32 { return p.ID }
|
||||||
|
func (p *sshFxpStatusPacket) id() uint32 { return p.ID }
|
||||||
|
func (p *sshFxpStatResponse) id() uint32 { return p.ID }
|
||||||
|
func (p *sshFxpNamePacket) id() uint32 { return p.ID }
|
||||||
|
func (p *sshFxpHandlePacket) id() uint32 { return p.ID }
|
||||||
|
func (p *StatVFS) id() uint32 { return p.ID }
|
||||||
|
func (p *sshFxVersionPacket) id() uint32 { return 0 }
|
||||||
|
|
||||||
|
// take raw incoming packet data and build packet objects
|
||||||
|
func makePacket(p rxPacket) (requestPacket, error) {
|
||||||
|
var pkt requestPacket
|
||||||
|
switch p.pktType {
|
||||||
|
case sshFxpInit:
|
||||||
|
pkt = &sshFxInitPacket{}
|
||||||
|
case sshFxpLstat:
|
||||||
|
pkt = &sshFxpLstatPacket{}
|
||||||
|
case sshFxpOpen:
|
||||||
|
pkt = &sshFxpOpenPacket{}
|
||||||
|
case sshFxpClose:
|
||||||
|
pkt = &sshFxpClosePacket{}
|
||||||
|
case sshFxpRead:
|
||||||
|
pkt = &sshFxpReadPacket{}
|
||||||
|
case sshFxpWrite:
|
||||||
|
pkt = &sshFxpWritePacket{}
|
||||||
|
case sshFxpFstat:
|
||||||
|
pkt = &sshFxpFstatPacket{}
|
||||||
|
case sshFxpSetstat:
|
||||||
|
pkt = &sshFxpSetstatPacket{}
|
||||||
|
case sshFxpFsetstat:
|
||||||
|
pkt = &sshFxpFsetstatPacket{}
|
||||||
|
case sshFxpOpendir:
|
||||||
|
pkt = &sshFxpOpendirPacket{}
|
||||||
|
case sshFxpReaddir:
|
||||||
|
pkt = &sshFxpReaddirPacket{}
|
||||||
|
case sshFxpRemove:
|
||||||
|
pkt = &sshFxpRemovePacket{}
|
||||||
|
case sshFxpMkdir:
|
||||||
|
pkt = &sshFxpMkdirPacket{}
|
||||||
|
case sshFxpRmdir:
|
||||||
|
pkt = &sshFxpRmdirPacket{}
|
||||||
|
case sshFxpRealpath:
|
||||||
|
pkt = &sshFxpRealpathPacket{}
|
||||||
|
case sshFxpStat:
|
||||||
|
pkt = &sshFxpStatPacket{}
|
||||||
|
case sshFxpRename:
|
||||||
|
pkt = &sshFxpRenamePacket{}
|
||||||
|
case sshFxpReadlink:
|
||||||
|
pkt = &sshFxpReadlinkPacket{}
|
||||||
|
case sshFxpSymlink:
|
||||||
|
pkt = &sshFxpSymlinkPacket{}
|
||||||
|
case sshFxpExtended:
|
||||||
|
pkt = &sshFxpExtendedPacket{}
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unhandled packet type: %s", p.pktType)
|
||||||
|
}
|
||||||
|
if err := pkt.UnmarshalBinary(p.pktBytes); err != nil {
|
||||||
|
// Return partially unpacked packet to allow callers to return
|
||||||
|
// error messages appropriately with necessary id() method.
|
||||||
|
return pkt, err
|
||||||
|
}
|
||||||
|
return pkt, nil
|
||||||
|
}
|
1295
vendor/github.com/pkg/sftp/packet.go
generated
vendored
Normal file
1295
vendor/github.com/pkg/sftp/packet.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
79
vendor/github.com/pkg/sftp/pool.go
generated
vendored
Normal file
79
vendor/github.com/pkg/sftp/pool.go
generated
vendored
Normal file
|
@ -0,0 +1,79 @@
|
||||||
|
package sftp
|
||||||
|
|
||||||
|
// bufPool provides a pool of byte-slices to be reused in various parts of the package.
|
||||||
|
// It is safe to use concurrently through a pointer.
|
||||||
|
type bufPool struct {
|
||||||
|
ch chan []byte
|
||||||
|
blen int
|
||||||
|
}
|
||||||
|
|
||||||
|
func newBufPool(depth, bufLen int) *bufPool {
|
||||||
|
return &bufPool{
|
||||||
|
ch: make(chan []byte, depth),
|
||||||
|
blen: bufLen,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *bufPool) Get() []byte {
|
||||||
|
if p.blen <= 0 {
|
||||||
|
panic("bufPool: new buffer creation length must be greater than zero")
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case b := <-p.ch:
|
||||||
|
if cap(b) < p.blen {
|
||||||
|
// just in case: throw away any buffer with insufficient capacity.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
return b[:p.blen]
|
||||||
|
|
||||||
|
default:
|
||||||
|
return make([]byte, p.blen)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *bufPool) Put(b []byte) {
|
||||||
|
if p == nil {
|
||||||
|
// functional default: no reuse.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if cap(b) < p.blen || cap(b) > p.blen*2 {
|
||||||
|
// DO NOT reuse buffers with insufficient capacity.
|
||||||
|
// This could cause panics when resizing to p.blen.
|
||||||
|
|
||||||
|
// DO NOT reuse buffers with excessive capacity.
|
||||||
|
// This could cause memory leaks.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case p.ch <- b:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type resChanPool chan chan result
|
||||||
|
|
||||||
|
func newResChanPool(depth int) resChanPool {
|
||||||
|
return make(chan chan result, depth)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p resChanPool) Get() chan result {
|
||||||
|
select {
|
||||||
|
case ch := <-p:
|
||||||
|
return ch
|
||||||
|
default:
|
||||||
|
return make(chan result, 1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p resChanPool) Put(ch chan result) {
|
||||||
|
select {
|
||||||
|
case p <- ch:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
6
vendor/github.com/pkg/sftp/release.go
generated
vendored
Normal file
6
vendor/github.com/pkg/sftp/release.go
generated
vendored
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
//go:build !debug
|
||||||
|
// +build !debug
|
||||||
|
|
||||||
|
package sftp
|
||||||
|
|
||||||
|
func debug(fmt string, args ...interface{}) {}
|
63
vendor/github.com/pkg/sftp/request-attrs.go
generated
vendored
Normal file
63
vendor/github.com/pkg/sftp/request-attrs.go
generated
vendored
Normal file
|
@ -0,0 +1,63 @@
|
||||||
|
package sftp
|
||||||
|
|
||||||
|
// Methods on the Request object to make working with the Flags bitmasks and
|
||||||
|
// Attr(ibutes) byte blob easier. Use Pflags() when working with an Open/Write
|
||||||
|
// request and AttrFlags() and Attributes() when working with SetStat requests.
|
||||||
|
import "os"
|
||||||
|
|
||||||
|
// FileOpenFlags defines Open and Write Flags. Correlate directly with with os.OpenFile flags
|
||||||
|
// (https://golang.org/pkg/os/#pkg-constants).
|
||||||
|
type FileOpenFlags struct {
|
||||||
|
Read, Write, Append, Creat, Trunc, Excl bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func newFileOpenFlags(flags uint32) FileOpenFlags {
|
||||||
|
return FileOpenFlags{
|
||||||
|
Read: flags&sshFxfRead != 0,
|
||||||
|
Write: flags&sshFxfWrite != 0,
|
||||||
|
Append: flags&sshFxfAppend != 0,
|
||||||
|
Creat: flags&sshFxfCreat != 0,
|
||||||
|
Trunc: flags&sshFxfTrunc != 0,
|
||||||
|
Excl: flags&sshFxfExcl != 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pflags converts the bitmap/uint32 from SFTP Open packet pflag values,
|
||||||
|
// into a FileOpenFlags struct with booleans set for flags set in bitmap.
|
||||||
|
func (r *Request) Pflags() FileOpenFlags {
|
||||||
|
return newFileOpenFlags(r.Flags)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileAttrFlags that indicate whether SFTP file attributes were passed. When a flag is
|
||||||
|
// true the corresponding attribute should be available from the FileStat
|
||||||
|
// object returned by Attributes method. Used with SetStat.
|
||||||
|
type FileAttrFlags struct {
|
||||||
|
Size, UidGid, Permissions, Acmodtime bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func newFileAttrFlags(flags uint32) FileAttrFlags {
|
||||||
|
return FileAttrFlags{
|
||||||
|
Size: (flags & sshFileXferAttrSize) != 0,
|
||||||
|
UidGid: (flags & sshFileXferAttrUIDGID) != 0,
|
||||||
|
Permissions: (flags & sshFileXferAttrPermissions) != 0,
|
||||||
|
Acmodtime: (flags & sshFileXferAttrACmodTime) != 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AttrFlags returns a FileAttrFlags boolean struct based on the
|
||||||
|
// bitmap/uint32 file attribute flags from the SFTP packaet.
|
||||||
|
func (r *Request) AttrFlags() FileAttrFlags {
|
||||||
|
return newFileAttrFlags(r.Flags)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileMode returns the Mode SFTP file attributes wrapped as os.FileMode
|
||||||
|
func (a FileStat) FileMode() os.FileMode {
|
||||||
|
return os.FileMode(a.Mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attributes parses file attributes byte blob and return them in a
|
||||||
|
// FileStat object.
|
||||||
|
func (r *Request) Attributes() *FileStat {
|
||||||
|
fs, _ := unmarshalFileStat(r.Flags, r.Attrs)
|
||||||
|
return fs
|
||||||
|
}
|
54
vendor/github.com/pkg/sftp/request-errors.go
generated
vendored
Normal file
54
vendor/github.com/pkg/sftp/request-errors.go
generated
vendored
Normal file
|
@ -0,0 +1,54 @@
|
||||||
|
package sftp
|
||||||
|
|
||||||
|
type fxerr uint32
|
||||||
|
|
||||||
|
// Error types that match the SFTP's SSH_FXP_STATUS codes. Gives you more
|
||||||
|
// direct control of the errors being sent vs. letting the library work them
|
||||||
|
// out from the standard os/io errors.
|
||||||
|
const (
|
||||||
|
ErrSSHFxOk = fxerr(sshFxOk)
|
||||||
|
ErrSSHFxEOF = fxerr(sshFxEOF)
|
||||||
|
ErrSSHFxNoSuchFile = fxerr(sshFxNoSuchFile)
|
||||||
|
ErrSSHFxPermissionDenied = fxerr(sshFxPermissionDenied)
|
||||||
|
ErrSSHFxFailure = fxerr(sshFxFailure)
|
||||||
|
ErrSSHFxBadMessage = fxerr(sshFxBadMessage)
|
||||||
|
ErrSSHFxNoConnection = fxerr(sshFxNoConnection)
|
||||||
|
ErrSSHFxConnectionLost = fxerr(sshFxConnectionLost)
|
||||||
|
ErrSSHFxOpUnsupported = fxerr(sshFxOPUnsupported)
|
||||||
|
)
|
||||||
|
|
||||||
|
// Deprecated error types, these are aliases for the new ones, please use the new ones directly
|
||||||
|
const (
|
||||||
|
ErrSshFxOk = ErrSSHFxOk
|
||||||
|
ErrSshFxEof = ErrSSHFxEOF
|
||||||
|
ErrSshFxNoSuchFile = ErrSSHFxNoSuchFile
|
||||||
|
ErrSshFxPermissionDenied = ErrSSHFxPermissionDenied
|
||||||
|
ErrSshFxFailure = ErrSSHFxFailure
|
||||||
|
ErrSshFxBadMessage = ErrSSHFxBadMessage
|
||||||
|
ErrSshFxNoConnection = ErrSSHFxNoConnection
|
||||||
|
ErrSshFxConnectionLost = ErrSSHFxConnectionLost
|
||||||
|
ErrSshFxOpUnsupported = ErrSSHFxOpUnsupported
|
||||||
|
)
|
||||||
|
|
||||||
|
func (e fxerr) Error() string {
|
||||||
|
switch e {
|
||||||
|
case ErrSSHFxOk:
|
||||||
|
return "OK"
|
||||||
|
case ErrSSHFxEOF:
|
||||||
|
return "EOF"
|
||||||
|
case ErrSSHFxNoSuchFile:
|
||||||
|
return "no such file"
|
||||||
|
case ErrSSHFxPermissionDenied:
|
||||||
|
return "permission denied"
|
||||||
|
case ErrSSHFxBadMessage:
|
||||||
|
return "bad message"
|
||||||
|
case ErrSSHFxNoConnection:
|
||||||
|
return "no connection"
|
||||||
|
case ErrSSHFxConnectionLost:
|
||||||
|
return "connection lost"
|
||||||
|
case ErrSSHFxOpUnsupported:
|
||||||
|
return "operation unsupported"
|
||||||
|
default:
|
||||||
|
return "failure"
|
||||||
|
}
|
||||||
|
}
|
647
vendor/github.com/pkg/sftp/request-example.go
generated
vendored
Normal file
647
vendor/github.com/pkg/sftp/request-example.go
generated
vendored
Normal file
|
@ -0,0 +1,647 @@
|
||||||
|
package sftp
|
||||||
|
|
||||||
|
// This serves as an example of how to implement the request server handler as
|
||||||
|
// well as a dummy backend for testing. It implements an in-memory backend that
|
||||||
|
// works as a very simple filesystem with simple flat key-value lookup system.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const maxSymlinkFollows = 5
|
||||||
|
|
||||||
|
var errTooManySymlinks = errors.New("too many symbolic links")
|
||||||
|
|
||||||
|
// InMemHandler returns a Hanlders object with the test handlers.
|
||||||
|
func InMemHandler() Handlers {
|
||||||
|
root := &root{
|
||||||
|
rootFile: &memFile{name: "/", modtime: time.Now(), isdir: true},
|
||||||
|
files: make(map[string]*memFile),
|
||||||
|
}
|
||||||
|
return Handlers{root, root, root, root}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Example Handlers
|
||||||
|
func (fs *root) Fileread(r *Request) (io.ReaderAt, error) {
|
||||||
|
flags := r.Pflags()
|
||||||
|
if !flags.Read {
|
||||||
|
// sanity check
|
||||||
|
return nil, os.ErrInvalid
|
||||||
|
}
|
||||||
|
|
||||||
|
return fs.OpenFile(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *root) Filewrite(r *Request) (io.WriterAt, error) {
|
||||||
|
flags := r.Pflags()
|
||||||
|
if !flags.Write {
|
||||||
|
// sanity check
|
||||||
|
return nil, os.ErrInvalid
|
||||||
|
}
|
||||||
|
|
||||||
|
return fs.OpenFile(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *root) OpenFile(r *Request) (WriterAtReaderAt, error) {
|
||||||
|
if fs.mockErr != nil {
|
||||||
|
return nil, fs.mockErr
|
||||||
|
}
|
||||||
|
_ = r.WithContext(r.Context()) // initialize context for deadlock testing
|
||||||
|
|
||||||
|
fs.mu.Lock()
|
||||||
|
defer fs.mu.Unlock()
|
||||||
|
|
||||||
|
return fs.openfile(r.Filepath, r.Flags)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *root) putfile(pathname string, file *memFile) error {
|
||||||
|
pathname, err := fs.canonName(pathname)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !strings.HasPrefix(pathname, "/") {
|
||||||
|
return os.ErrInvalid
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := fs.lfetch(pathname); err != os.ErrNotExist {
|
||||||
|
return os.ErrExist
|
||||||
|
}
|
||||||
|
|
||||||
|
file.name = pathname
|
||||||
|
fs.files[pathname] = file
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *root) openfile(pathname string, flags uint32) (*memFile, error) {
|
||||||
|
pflags := newFileOpenFlags(flags)
|
||||||
|
|
||||||
|
file, err := fs.fetch(pathname)
|
||||||
|
if err == os.ErrNotExist {
|
||||||
|
if !pflags.Creat {
|
||||||
|
return nil, os.ErrNotExist
|
||||||
|
}
|
||||||
|
|
||||||
|
var count int
|
||||||
|
// You can create files through dangling symlinks.
|
||||||
|
link, err := fs.lfetch(pathname)
|
||||||
|
for err == nil && link.symlink != "" {
|
||||||
|
if pflags.Excl {
|
||||||
|
// unless you also passed in O_EXCL
|
||||||
|
return nil, os.ErrInvalid
|
||||||
|
}
|
||||||
|
|
||||||
|
if count++; count > maxSymlinkFollows {
|
||||||
|
return nil, errTooManySymlinks
|
||||||
|
}
|
||||||
|
|
||||||
|
pathname = link.symlink
|
||||||
|
link, err = fs.lfetch(pathname)
|
||||||
|
}
|
||||||
|
|
||||||
|
file := &memFile{
|
||||||
|
modtime: time.Now(),
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := fs.putfile(pathname, file); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return file, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if pflags.Creat && pflags.Excl {
|
||||||
|
return nil, os.ErrExist
|
||||||
|
}
|
||||||
|
|
||||||
|
if file.IsDir() {
|
||||||
|
return nil, os.ErrInvalid
|
||||||
|
}
|
||||||
|
|
||||||
|
if pflags.Trunc {
|
||||||
|
if err := file.Truncate(0); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return file, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *root) Filecmd(r *Request) error {
|
||||||
|
if fs.mockErr != nil {
|
||||||
|
return fs.mockErr
|
||||||
|
}
|
||||||
|
_ = r.WithContext(r.Context()) // initialize context for deadlock testing
|
||||||
|
|
||||||
|
fs.mu.Lock()
|
||||||
|
defer fs.mu.Unlock()
|
||||||
|
|
||||||
|
switch r.Method {
|
||||||
|
case "Setstat":
|
||||||
|
file, err := fs.openfile(r.Filepath, sshFxfWrite)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.AttrFlags().Size {
|
||||||
|
return file.Truncate(int64(r.Attributes().Size))
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
|
||||||
|
case "Rename":
|
||||||
|
// SFTP-v2: "It is an error if there already exists a file with the name specified by newpath."
|
||||||
|
// This varies from the POSIX specification, which allows limited replacement of target files.
|
||||||
|
if fs.exists(r.Target) {
|
||||||
|
return os.ErrExist
|
||||||
|
}
|
||||||
|
|
||||||
|
return fs.rename(r.Filepath, r.Target)
|
||||||
|
|
||||||
|
case "Rmdir":
|
||||||
|
return fs.rmdir(r.Filepath)
|
||||||
|
|
||||||
|
case "Remove":
|
||||||
|
// IEEE 1003.1 remove explicitly can unlink files and remove empty directories.
|
||||||
|
// We use instead here the semantics of unlink, which is allowed to be restricted against directories.
|
||||||
|
return fs.unlink(r.Filepath)
|
||||||
|
|
||||||
|
case "Mkdir":
|
||||||
|
return fs.mkdir(r.Filepath)
|
||||||
|
|
||||||
|
case "Link":
|
||||||
|
return fs.link(r.Filepath, r.Target)
|
||||||
|
|
||||||
|
case "Symlink":
|
||||||
|
// NOTE: r.Filepath is the target, and r.Target is the linkpath.
|
||||||
|
return fs.symlink(r.Filepath, r.Target)
|
||||||
|
}
|
||||||
|
|
||||||
|
return errors.New("unsupported")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *root) rename(oldpath, newpath string) error {
|
||||||
|
file, err := fs.lfetch(oldpath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
newpath, err = fs.canonName(newpath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !strings.HasPrefix(newpath, "/") {
|
||||||
|
return os.ErrInvalid
|
||||||
|
}
|
||||||
|
|
||||||
|
target, err := fs.lfetch(newpath)
|
||||||
|
if err != os.ErrNotExist {
|
||||||
|
if target == file {
|
||||||
|
// IEEE 1003.1: if oldpath and newpath are the same directory entry,
|
||||||
|
// then return no error, and perform no further action.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case file.IsDir():
|
||||||
|
// IEEE 1003.1: if oldpath is a directory, and newpath exists,
|
||||||
|
// then newpath must be a directory, and empty.
|
||||||
|
// It is to be removed prior to rename.
|
||||||
|
if err := fs.rmdir(newpath); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
case target.IsDir():
|
||||||
|
// IEEE 1003.1: if oldpath is not a directory, and newpath exists,
|
||||||
|
// then newpath may not be a directory.
|
||||||
|
return syscall.EISDIR
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fs.files[newpath] = file
|
||||||
|
|
||||||
|
if file.IsDir() {
|
||||||
|
dirprefix := file.name + "/"
|
||||||
|
|
||||||
|
for name, file := range fs.files {
|
||||||
|
if strings.HasPrefix(name, dirprefix) {
|
||||||
|
newname := path.Join(newpath, strings.TrimPrefix(name, dirprefix))
|
||||||
|
|
||||||
|
fs.files[newname] = file
|
||||||
|
file.name = newname
|
||||||
|
delete(fs.files, name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
file.name = newpath
|
||||||
|
delete(fs.files, oldpath)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *root) PosixRename(r *Request) error {
|
||||||
|
if fs.mockErr != nil {
|
||||||
|
return fs.mockErr
|
||||||
|
}
|
||||||
|
_ = r.WithContext(r.Context()) // initialize context for deadlock testing
|
||||||
|
|
||||||
|
fs.mu.Lock()
|
||||||
|
defer fs.mu.Unlock()
|
||||||
|
|
||||||
|
return fs.rename(r.Filepath, r.Target)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *root) StatVFS(r *Request) (*StatVFS, error) {
|
||||||
|
if fs.mockErr != nil {
|
||||||
|
return nil, fs.mockErr
|
||||||
|
}
|
||||||
|
|
||||||
|
return getStatVFSForPath(r.Filepath)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *root) mkdir(pathname string) error {
|
||||||
|
dir := &memFile{
|
||||||
|
modtime: time.Now(),
|
||||||
|
isdir: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
return fs.putfile(pathname, dir)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *root) rmdir(pathname string) error {
|
||||||
|
// IEEE 1003.1: If pathname is a symlink, then rmdir should fail with ENOTDIR.
|
||||||
|
dir, err := fs.lfetch(pathname)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !dir.IsDir() {
|
||||||
|
return syscall.ENOTDIR
|
||||||
|
}
|
||||||
|
|
||||||
|
// use the dir‘s internal name not the pathname we passed in.
|
||||||
|
// the dir.name is always the canonical name of a directory.
|
||||||
|
pathname = dir.name
|
||||||
|
|
||||||
|
for name := range fs.files {
|
||||||
|
if path.Dir(name) == pathname {
|
||||||
|
return errors.New("directory not empty")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
delete(fs.files, pathname)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *root) link(oldpath, newpath string) error {
|
||||||
|
file, err := fs.lfetch(oldpath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if file.IsDir() {
|
||||||
|
return errors.New("hard link not allowed for directory")
|
||||||
|
}
|
||||||
|
|
||||||
|
return fs.putfile(newpath, file)
|
||||||
|
}
|
||||||
|
|
||||||
|
// symlink() creates a symbolic link named `linkpath` which contains the string `target`.
|
||||||
|
// NOTE! This would be called with `symlink(req.Filepath, req.Target)` due to different semantics.
|
||||||
|
func (fs *root) symlink(target, linkpath string) error {
|
||||||
|
link := &memFile{
|
||||||
|
modtime: time.Now(),
|
||||||
|
symlink: target,
|
||||||
|
}
|
||||||
|
|
||||||
|
return fs.putfile(linkpath, link)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *root) unlink(pathname string) error {
|
||||||
|
// does not follow symlinks!
|
||||||
|
file, err := fs.lfetch(pathname)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if file.IsDir() {
|
||||||
|
// IEEE 1003.1: implementations may opt out of allowing the unlinking of directories.
|
||||||
|
// SFTP-v2: SSH_FXP_REMOVE may not remove directories.
|
||||||
|
return os.ErrInvalid
|
||||||
|
}
|
||||||
|
|
||||||
|
// DO NOT use the file’s internal name.
|
||||||
|
// because of hard-links files cannot have a single canonical name.
|
||||||
|
delete(fs.files, pathname)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type listerat []os.FileInfo
|
||||||
|
|
||||||
|
// Modeled after strings.Reader's ReadAt() implementation
|
||||||
|
func (f listerat) ListAt(ls []os.FileInfo, offset int64) (int, error) {
|
||||||
|
var n int
|
||||||
|
if offset >= int64(len(f)) {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
n = copy(ls, f[offset:])
|
||||||
|
if n < len(ls) {
|
||||||
|
return n, io.EOF
|
||||||
|
}
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *root) Filelist(r *Request) (ListerAt, error) {
|
||||||
|
if fs.mockErr != nil {
|
||||||
|
return nil, fs.mockErr
|
||||||
|
}
|
||||||
|
_ = r.WithContext(r.Context()) // initialize context for deadlock testing
|
||||||
|
|
||||||
|
fs.mu.Lock()
|
||||||
|
defer fs.mu.Unlock()
|
||||||
|
|
||||||
|
switch r.Method {
|
||||||
|
case "List":
|
||||||
|
files, err := fs.readdir(r.Filepath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return listerat(files), nil
|
||||||
|
|
||||||
|
case "Stat":
|
||||||
|
file, err := fs.fetch(r.Filepath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return listerat{file}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, errors.New("unsupported")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *root) readdir(pathname string) ([]os.FileInfo, error) {
|
||||||
|
dir, err := fs.fetch(pathname)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !dir.IsDir() {
|
||||||
|
return nil, syscall.ENOTDIR
|
||||||
|
}
|
||||||
|
|
||||||
|
var files []os.FileInfo
|
||||||
|
|
||||||
|
for name, file := range fs.files {
|
||||||
|
if path.Dir(name) == dir.name {
|
||||||
|
files = append(files, file)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Slice(files, func(i, j int) bool { return files[i].Name() < files[j].Name() })
|
||||||
|
|
||||||
|
return files, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *root) Readlink(pathname string) (string, error) {
|
||||||
|
file, err := fs.lfetch(pathname)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if file.symlink == "" {
|
||||||
|
return "", os.ErrInvalid
|
||||||
|
}
|
||||||
|
|
||||||
|
return file.symlink, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// implements LstatFileLister interface
|
||||||
|
func (fs *root) Lstat(r *Request) (ListerAt, error) {
|
||||||
|
if fs.mockErr != nil {
|
||||||
|
return nil, fs.mockErr
|
||||||
|
}
|
||||||
|
_ = r.WithContext(r.Context()) // initialize context for deadlock testing
|
||||||
|
|
||||||
|
fs.mu.Lock()
|
||||||
|
defer fs.mu.Unlock()
|
||||||
|
|
||||||
|
file, err := fs.lfetch(r.Filepath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return listerat{file}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// In memory file-system-y thing that the Hanlders live on
|
||||||
|
type root struct {
|
||||||
|
rootFile *memFile
|
||||||
|
mockErr error
|
||||||
|
|
||||||
|
mu sync.Mutex
|
||||||
|
files map[string]*memFile
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set a mocked error that the next handler call will return.
|
||||||
|
// Set to nil to reset for no error.
|
||||||
|
func (fs *root) returnErr(err error) {
|
||||||
|
fs.mockErr = err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *root) lfetch(path string) (*memFile, error) {
|
||||||
|
if path == "/" {
|
||||||
|
return fs.rootFile, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
file, ok := fs.files[path]
|
||||||
|
if file == nil {
|
||||||
|
if ok {
|
||||||
|
delete(fs.files, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, os.ErrNotExist
|
||||||
|
}
|
||||||
|
|
||||||
|
return file, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// canonName returns the “canonical” name of a file, that is:
|
||||||
|
// if the directory of the pathname is a symlink, it follows that symlink to the valid directory name.
|
||||||
|
// this is relatively easy, since `dir.name` will be the only valid canonical path for a directory.
|
||||||
|
func (fs *root) canonName(pathname string) (string, error) {
|
||||||
|
dirname, filename := path.Dir(pathname), path.Base(pathname)
|
||||||
|
|
||||||
|
dir, err := fs.fetch(dirname)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !dir.IsDir() {
|
||||||
|
return "", syscall.ENOTDIR
|
||||||
|
}
|
||||||
|
|
||||||
|
return path.Join(dir.name, filename), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *root) exists(path string) bool {
|
||||||
|
path, err := fs.canonName(path)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = fs.lfetch(path)
|
||||||
|
|
||||||
|
return err != os.ErrNotExist
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *root) fetch(pathname string) (*memFile, error) {
|
||||||
|
file, err := fs.lfetch(pathname)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var count int
|
||||||
|
for file.symlink != "" {
|
||||||
|
if count++; count > maxSymlinkFollows {
|
||||||
|
return nil, errTooManySymlinks
|
||||||
|
}
|
||||||
|
|
||||||
|
linkTarget := file.symlink
|
||||||
|
if !path.IsAbs(linkTarget) {
|
||||||
|
linkTarget = path.Join(path.Dir(file.name), linkTarget)
|
||||||
|
}
|
||||||
|
|
||||||
|
file, err = fs.lfetch(linkTarget)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return file, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Implements os.FileInfo, io.ReaderAt and io.WriterAt interfaces.
|
||||||
|
// These are the 3 interfaces necessary for the Handlers.
|
||||||
|
// Implements the optional interface TransferError.
|
||||||
|
type memFile struct {
|
||||||
|
name string
|
||||||
|
modtime time.Time
|
||||||
|
symlink string
|
||||||
|
isdir bool
|
||||||
|
|
||||||
|
mu sync.RWMutex
|
||||||
|
content []byte
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
// These are helper functions, they must be called while holding the memFile.mu mutex
|
||||||
|
func (f *memFile) size() int64 { return int64(len(f.content)) }
|
||||||
|
func (f *memFile) grow(n int64) { f.content = append(f.content, make([]byte, n)...) }
|
||||||
|
|
||||||
|
// Have memFile fulfill os.FileInfo interface
|
||||||
|
func (f *memFile) Name() string { return path.Base(f.name) }
|
||||||
|
func (f *memFile) Size() int64 {
|
||||||
|
f.mu.Lock()
|
||||||
|
defer f.mu.Unlock()
|
||||||
|
|
||||||
|
return f.size()
|
||||||
|
}
|
||||||
|
func (f *memFile) Mode() os.FileMode {
|
||||||
|
if f.isdir {
|
||||||
|
return os.FileMode(0755) | os.ModeDir
|
||||||
|
}
|
||||||
|
if f.symlink != "" {
|
||||||
|
return os.FileMode(0777) | os.ModeSymlink
|
||||||
|
}
|
||||||
|
return os.FileMode(0644)
|
||||||
|
}
|
||||||
|
func (f *memFile) ModTime() time.Time { return f.modtime }
|
||||||
|
func (f *memFile) IsDir() bool { return f.isdir }
|
||||||
|
func (f *memFile) Sys() interface{} {
|
||||||
|
return fakeFileInfoSys()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *memFile) ReadAt(b []byte, off int64) (int, error) {
|
||||||
|
f.mu.Lock()
|
||||||
|
defer f.mu.Unlock()
|
||||||
|
|
||||||
|
if f.err != nil {
|
||||||
|
return 0, f.err
|
||||||
|
}
|
||||||
|
|
||||||
|
if off < 0 {
|
||||||
|
return 0, errors.New("memFile.ReadAt: negative offset")
|
||||||
|
}
|
||||||
|
|
||||||
|
if off >= f.size() {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
n := copy(b, f.content[off:])
|
||||||
|
if n < len(b) {
|
||||||
|
return n, io.EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *memFile) WriteAt(b []byte, off int64) (int, error) {
|
||||||
|
// fmt.Println(string(p), off)
|
||||||
|
// mimic write delays, should be optional
|
||||||
|
time.Sleep(time.Microsecond * time.Duration(len(b)))
|
||||||
|
|
||||||
|
f.mu.Lock()
|
||||||
|
defer f.mu.Unlock()
|
||||||
|
|
||||||
|
if f.err != nil {
|
||||||
|
return 0, f.err
|
||||||
|
}
|
||||||
|
|
||||||
|
grow := int64(len(b)) + off - f.size()
|
||||||
|
if grow > 0 {
|
||||||
|
f.grow(grow)
|
||||||
|
}
|
||||||
|
|
||||||
|
return copy(f.content[off:], b), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *memFile) Truncate(size int64) error {
|
||||||
|
f.mu.Lock()
|
||||||
|
defer f.mu.Unlock()
|
||||||
|
|
||||||
|
if f.err != nil {
|
||||||
|
return f.err
|
||||||
|
}
|
||||||
|
|
||||||
|
grow := size - f.size()
|
||||||
|
if grow <= 0 {
|
||||||
|
f.content = f.content[:size]
|
||||||
|
} else {
|
||||||
|
f.grow(grow)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *memFile) TransferError(err error) {
|
||||||
|
f.mu.Lock()
|
||||||
|
defer f.mu.Unlock()
|
||||||
|
|
||||||
|
f.err = err
|
||||||
|
}
|
157
vendor/github.com/pkg/sftp/request-interfaces.go
generated
vendored
Normal file
157
vendor/github.com/pkg/sftp/request-interfaces.go
generated
vendored
Normal file
|
@ -0,0 +1,157 @@
|
||||||
|
package sftp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
// WriterAtReaderAt defines the interface to return when a file is to
|
||||||
|
// be opened for reading and writing
|
||||||
|
type WriterAtReaderAt interface {
|
||||||
|
io.WriterAt
|
||||||
|
io.ReaderAt
|
||||||
|
}
|
||||||
|
|
||||||
|
// Interfaces are differentiated based on required returned values.
|
||||||
|
// All input arguments are to be pulled from Request (the only arg).
|
||||||
|
|
||||||
|
// The Handler interfaces all take the Request object as its only argument.
|
||||||
|
// All the data you should need to handle the call are in the Request object.
|
||||||
|
// The request.Method attribute is initially the most important one as it
|
||||||
|
// determines which Handler gets called.
|
||||||
|
|
||||||
|
// FileReader should return an io.ReaderAt for the filepath
|
||||||
|
// Note in cases of an error, the error text will be sent to the client.
|
||||||
|
// Called for Methods: Get
|
||||||
|
type FileReader interface {
|
||||||
|
Fileread(*Request) (io.ReaderAt, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileWriter should return an io.WriterAt for the filepath.
|
||||||
|
//
|
||||||
|
// The request server code will call Close() on the returned io.WriterAt
|
||||||
|
// ojbect if an io.Closer type assertion succeeds.
|
||||||
|
// Note in cases of an error, the error text will be sent to the client.
|
||||||
|
// Note when receiving an Append flag it is important to not open files using
|
||||||
|
// O_APPEND if you plan to use WriteAt, as they conflict.
|
||||||
|
// Called for Methods: Put, Open
|
||||||
|
type FileWriter interface {
|
||||||
|
Filewrite(*Request) (io.WriterAt, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpenFileWriter is a FileWriter that implements the generic OpenFile method.
|
||||||
|
// You need to implement this optional interface if you want to be able
|
||||||
|
// to read and write from/to the same handle.
|
||||||
|
// Called for Methods: Open
|
||||||
|
type OpenFileWriter interface {
|
||||||
|
FileWriter
|
||||||
|
OpenFile(*Request) (WriterAtReaderAt, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileCmder should return an error
|
||||||
|
// Note in cases of an error, the error text will be sent to the client.
|
||||||
|
// Called for Methods: Setstat, Rename, Rmdir, Mkdir, Link, Symlink, Remove
|
||||||
|
type FileCmder interface {
|
||||||
|
Filecmd(*Request) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// PosixRenameFileCmder is a FileCmder that implements the PosixRename method.
|
||||||
|
// If this interface is implemented PosixRename requests will call it
|
||||||
|
// otherwise they will be handled in the same way as Rename
|
||||||
|
type PosixRenameFileCmder interface {
|
||||||
|
FileCmder
|
||||||
|
PosixRename(*Request) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatVFSFileCmder is a FileCmder that implements the StatVFS method.
|
||||||
|
// You need to implement this interface if you want to handle statvfs requests.
|
||||||
|
// Please also be sure that the statvfs@openssh.com extension is enabled
|
||||||
|
type StatVFSFileCmder interface {
|
||||||
|
FileCmder
|
||||||
|
StatVFS(*Request) (*StatVFS, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileLister should return an object that fulfils the ListerAt interface
|
||||||
|
// Note in cases of an error, the error text will be sent to the client.
|
||||||
|
// Called for Methods: List, Stat, Readlink
|
||||||
|
//
|
||||||
|
// Since Filelist returns an os.FileInfo, this can make it non-ideal for implementing Readlink.
|
||||||
|
// This is because the Name receiver method defined by that interface defines that it should only return the base name.
|
||||||
|
// However, Readlink is required to be capable of returning essentially any arbitrary valid path relative or absolute.
|
||||||
|
// In order to implement this more expressive requirement, implement [ReadlinkFileLister] which will then be used instead.
|
||||||
|
type FileLister interface {
|
||||||
|
Filelist(*Request) (ListerAt, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LstatFileLister is a FileLister that implements the Lstat method.
|
||||||
|
// If this interface is implemented Lstat requests will call it
|
||||||
|
// otherwise they will be handled in the same way as Stat
|
||||||
|
type LstatFileLister interface {
|
||||||
|
FileLister
|
||||||
|
Lstat(*Request) (ListerAt, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RealPathFileLister is a FileLister that implements the Realpath method.
|
||||||
|
// The built-in RealPath implementation does not resolve symbolic links.
|
||||||
|
// By implementing this interface you can customize the returned path
|
||||||
|
// and, for example, resolve symbolinc links if needed for your use case.
|
||||||
|
// You have to return an absolute POSIX path.
|
||||||
|
//
|
||||||
|
// Up to v1.13.5 the signature for the RealPath method was:
|
||||||
|
//
|
||||||
|
// # RealPath(string) string
|
||||||
|
//
|
||||||
|
// we have added a legacyRealPathFileLister that implements the old method
|
||||||
|
// to ensure that your code does not break.
|
||||||
|
// You should use the new method signature to avoid future issues
|
||||||
|
type RealPathFileLister interface {
|
||||||
|
FileLister
|
||||||
|
RealPath(string) (string, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadlinkFileLister is a FileLister that implements the Readlink method.
|
||||||
|
// By implementing the Readlink method, it is possible to return any arbitrary valid path relative or absolute.
|
||||||
|
// This allows giving a better response than via the default FileLister (which is limited to os.FileInfo, whose Name method should only return the base name of a file)
|
||||||
|
type ReadlinkFileLister interface {
|
||||||
|
FileLister
|
||||||
|
Readlink(string) (string, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// This interface is here for backward compatibility only
|
||||||
|
type legacyRealPathFileLister interface {
|
||||||
|
FileLister
|
||||||
|
RealPath(string) string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NameLookupFileLister is a FileLister that implmeents the LookupUsername and LookupGroupName methods.
|
||||||
|
// If this interface is implemented, then longname ls formatting will use these to convert usernames and groupnames.
|
||||||
|
type NameLookupFileLister interface {
|
||||||
|
FileLister
|
||||||
|
LookupUserName(string) string
|
||||||
|
LookupGroupName(string) string
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListerAt does for file lists what io.ReaderAt does for files, i.e. a []os.FileInfo buffer is passed to the ListAt function
|
||||||
|
// and the entries that are populated in the buffer will be passed to the client.
|
||||||
|
//
|
||||||
|
// ListAt should return the number of entries copied and an io.EOF error if at end of list.
|
||||||
|
// This is testable by comparing how many you copied to how many could be copied (eg. n < len(ls) below).
|
||||||
|
// The copy() builtin is best for the copying.
|
||||||
|
//
|
||||||
|
// Uid and gid information will on unix systems be retrieved from [os.FileInfo.Sys]
|
||||||
|
// if this function returns a [syscall.Stat_t] when called on a populated entry.
|
||||||
|
// Alternatively, if the entry implements [FileInfoUidGid], it will be used for uid and gid information.
|
||||||
|
//
|
||||||
|
// If a populated entry implements [FileInfoExtendedData], extended attributes will also be returned to the client.
|
||||||
|
//
|
||||||
|
// Note in cases of an error, the error text will be sent to the client.
|
||||||
|
type ListerAt interface {
|
||||||
|
ListAt([]os.FileInfo, int64) (int, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TransferError is an optional interface that readerAt and writerAt
|
||||||
|
// can implement to be notified about the error causing Serve() to exit
|
||||||
|
// with the request still open
|
||||||
|
type TransferError interface {
|
||||||
|
TransferError(err error)
|
||||||
|
}
|
16
vendor/github.com/pkg/sftp/request-plan9.go
generated
vendored
Normal file
16
vendor/github.com/pkg/sftp/request-plan9.go
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
//go:build plan9
|
||||||
|
// +build plan9
|
||||||
|
|
||||||
|
package sftp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
func fakeFileInfoSys() interface{} {
|
||||||
|
return &syscall.Dir{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testOsSys(sys interface{}) error {
|
||||||
|
return nil
|
||||||
|
}
|
53
vendor/github.com/pkg/sftp/request-readme.md
generated
vendored
Normal file
53
vendor/github.com/pkg/sftp/request-readme.md
generated
vendored
Normal file
|
@ -0,0 +1,53 @@
|
||||||
|
# Request Based SFTP API
|
||||||
|
|
||||||
|
The request based API allows for custom backends in a way similar to the http
|
||||||
|
package. In order to create a backend you need to implement 4 handler
|
||||||
|
interfaces; one for reading, one for writing, one for misc commands and one for
|
||||||
|
listing files. Each has 1 required method and in each case those methods take
|
||||||
|
the Request as the only parameter and they each return something different.
|
||||||
|
These 4 interfaces are enough to handle all the SFTP traffic in a simplified
|
||||||
|
manner.
|
||||||
|
|
||||||
|
The Request structure has 5 public fields which you will deal with.
|
||||||
|
|
||||||
|
- Method (string) - string name of incoming call
|
||||||
|
- Filepath (string) - POSIX path of file to act on
|
||||||
|
- Flags (uint32) - 32bit bitmask value of file open/create flags
|
||||||
|
- Attrs ([]byte) - byte string of file attribute data
|
||||||
|
- Target (string) - target path for renames and sym-links
|
||||||
|
|
||||||
|
Below are the methods and a brief description of what they need to do.
|
||||||
|
|
||||||
|
### Fileread(*Request) (io.Reader, error)
|
||||||
|
|
||||||
|
Handler for "Get" method and returns an io.Reader for the file which the server
|
||||||
|
then sends to the client.
|
||||||
|
|
||||||
|
### Filewrite(*Request) (io.Writer, error)
|
||||||
|
|
||||||
|
Handler for "Put" method and returns an io.Writer for the file which the server
|
||||||
|
then writes the uploaded file to. The file opening "pflags" are currently
|
||||||
|
preserved in the Request.Flags field as a 32bit bitmask value. See the [SFTP
|
||||||
|
spec](https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt#section-6.3) for
|
||||||
|
details.
|
||||||
|
|
||||||
|
### Filecmd(*Request) error
|
||||||
|
|
||||||
|
Handles "SetStat", "Rename", "Rmdir", "Mkdir" and "Symlink" methods. Makes the
|
||||||
|
appropriate changes and returns nil for success or an filesystem like error
|
||||||
|
(eg. os.ErrNotExist). The attributes are currently propagated in their raw form
|
||||||
|
([]byte) and will need to be unmarshalled to be useful. See the respond method
|
||||||
|
on sshFxpSetstatPacket for example of you might want to do this.
|
||||||
|
|
||||||
|
### Fileinfo(*Request) ([]os.FileInfo, error)
|
||||||
|
|
||||||
|
Handles "List", "Stat", "Readlink" methods. Gathers/creates FileInfo structs
|
||||||
|
with the data on the files and returns in a list (list of 1 for Stat and
|
||||||
|
Readlink).
|
||||||
|
|
||||||
|
|
||||||
|
## TODO
|
||||||
|
|
||||||
|
- Add support for API users to see trace/debugging info of what is going on
|
||||||
|
inside SFTP server.
|
||||||
|
- Unmarshal the file attributes into a structure on the Request object.
|
337
vendor/github.com/pkg/sftp/request-server.go
generated
vendored
Normal file
337
vendor/github.com/pkg/sftp/request-server.go
generated
vendored
Normal file
|
@ -0,0 +1,337 @@
|
||||||
|
package sftp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"path"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
var maxTxPacket uint32 = 1 << 15
|
||||||
|
|
||||||
|
// Handlers contains the 4 SFTP server request handlers.
|
||||||
|
type Handlers struct {
|
||||||
|
FileGet FileReader
|
||||||
|
FilePut FileWriter
|
||||||
|
FileCmd FileCmder
|
||||||
|
FileList FileLister
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequestServer abstracts the sftp protocol with an http request-like protocol
|
||||||
|
type RequestServer struct {
|
||||||
|
Handlers Handlers
|
||||||
|
|
||||||
|
*serverConn
|
||||||
|
pktMgr *packetManager
|
||||||
|
|
||||||
|
startDirectory string
|
||||||
|
|
||||||
|
mu sync.RWMutex
|
||||||
|
handleCount int
|
||||||
|
openRequests map[string]*Request
|
||||||
|
}
|
||||||
|
|
||||||
|
// A RequestServerOption is a function which applies configuration to a RequestServer.
|
||||||
|
type RequestServerOption func(*RequestServer)
|
||||||
|
|
||||||
|
// WithRSAllocator enable the allocator.
|
||||||
|
// After processing a packet we keep in memory the allocated slices
|
||||||
|
// and we reuse them for new packets.
|
||||||
|
// The allocator is experimental
|
||||||
|
func WithRSAllocator() RequestServerOption {
|
||||||
|
return func(rs *RequestServer) {
|
||||||
|
alloc := newAllocator()
|
||||||
|
rs.pktMgr.alloc = alloc
|
||||||
|
rs.conn.alloc = alloc
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithStartDirectory sets a start directory to use as base for relative paths.
|
||||||
|
// If unset the default is "/"
|
||||||
|
func WithStartDirectory(startDirectory string) RequestServerOption {
|
||||||
|
return func(rs *RequestServer) {
|
||||||
|
rs.startDirectory = cleanPath(startDirectory)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRequestServer creates/allocates/returns new RequestServer.
|
||||||
|
// Normally there will be one server per user-session.
|
||||||
|
func NewRequestServer(rwc io.ReadWriteCloser, h Handlers, options ...RequestServerOption) *RequestServer {
|
||||||
|
svrConn := &serverConn{
|
||||||
|
conn: conn{
|
||||||
|
Reader: rwc,
|
||||||
|
WriteCloser: rwc,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
rs := &RequestServer{
|
||||||
|
Handlers: h,
|
||||||
|
|
||||||
|
serverConn: svrConn,
|
||||||
|
pktMgr: newPktMgr(svrConn),
|
||||||
|
|
||||||
|
startDirectory: "/",
|
||||||
|
|
||||||
|
openRequests: make(map[string]*Request),
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, o := range options {
|
||||||
|
o(rs)
|
||||||
|
}
|
||||||
|
return rs
|
||||||
|
}
|
||||||
|
|
||||||
|
// New Open packet/Request
|
||||||
|
func (rs *RequestServer) nextRequest(r *Request) string {
|
||||||
|
rs.mu.Lock()
|
||||||
|
defer rs.mu.Unlock()
|
||||||
|
|
||||||
|
rs.handleCount++
|
||||||
|
|
||||||
|
r.handle = strconv.Itoa(rs.handleCount)
|
||||||
|
rs.openRequests[r.handle] = r
|
||||||
|
|
||||||
|
return r.handle
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns Request from openRequests, bool is false if it is missing.
|
||||||
|
//
|
||||||
|
// The Requests in openRequests work essentially as open file descriptors that
|
||||||
|
// you can do different things with. What you are doing with it are denoted by
|
||||||
|
// the first packet of that type (read/write/etc).
|
||||||
|
func (rs *RequestServer) getRequest(handle string) (*Request, bool) {
|
||||||
|
rs.mu.RLock()
|
||||||
|
defer rs.mu.RUnlock()
|
||||||
|
|
||||||
|
r, ok := rs.openRequests[handle]
|
||||||
|
return r, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close the Request and clear from openRequests map
|
||||||
|
func (rs *RequestServer) closeRequest(handle string) error {
|
||||||
|
rs.mu.Lock()
|
||||||
|
defer rs.mu.Unlock()
|
||||||
|
|
||||||
|
if r, ok := rs.openRequests[handle]; ok {
|
||||||
|
delete(rs.openRequests, handle)
|
||||||
|
return r.close()
|
||||||
|
}
|
||||||
|
|
||||||
|
return EBADF
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close the read/write/closer to trigger exiting the main server loop
|
||||||
|
func (rs *RequestServer) Close() error { return rs.conn.Close() }
|
||||||
|
|
||||||
|
func (rs *RequestServer) serveLoop(pktChan chan<- orderedRequest) error {
|
||||||
|
defer close(pktChan) // shuts down sftpServerWorkers
|
||||||
|
|
||||||
|
var err error
|
||||||
|
var pkt requestPacket
|
||||||
|
var pktType uint8
|
||||||
|
var pktBytes []byte
|
||||||
|
|
||||||
|
for {
|
||||||
|
pktType, pktBytes, err = rs.serverConn.recvPacket(rs.pktMgr.getNextOrderID())
|
||||||
|
if err != nil {
|
||||||
|
// we don't care about releasing allocated pages here, the server will quit and the allocator freed
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
pkt, err = makePacket(rxPacket{fxp(pktType), pktBytes})
|
||||||
|
if err != nil {
|
||||||
|
switch {
|
||||||
|
case errors.Is(err, errUnknownExtendedPacket):
|
||||||
|
// do nothing
|
||||||
|
default:
|
||||||
|
debug("makePacket err: %v", err)
|
||||||
|
rs.conn.Close() // shuts down recvPacket
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pktChan <- rs.pktMgr.newOrderedRequest(pkt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Serve requests for user session
|
||||||
|
func (rs *RequestServer) Serve() error {
|
||||||
|
defer func() {
|
||||||
|
if rs.pktMgr.alloc != nil {
|
||||||
|
rs.pktMgr.alloc.Free()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
runWorker := func(ch chan orderedRequest) {
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
if err := rs.packetWorker(ctx, ch); err != nil {
|
||||||
|
rs.conn.Close() // shuts down recvPacket
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
pktChan := rs.pktMgr.workerChan(runWorker)
|
||||||
|
|
||||||
|
err := rs.serveLoop(pktChan)
|
||||||
|
|
||||||
|
wg.Wait() // wait for all workers to exit
|
||||||
|
|
||||||
|
rs.mu.Lock()
|
||||||
|
defer rs.mu.Unlock()
|
||||||
|
|
||||||
|
// make sure all open requests are properly closed
|
||||||
|
// (eg. possible on dropped connections, client crashes, etc.)
|
||||||
|
for handle, req := range rs.openRequests {
|
||||||
|
if err == io.EOF {
|
||||||
|
err = io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
req.transferError(err)
|
||||||
|
|
||||||
|
delete(rs.openRequests, handle)
|
||||||
|
req.close()
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rs *RequestServer) packetWorker(ctx context.Context, pktChan chan orderedRequest) error {
|
||||||
|
for pkt := range pktChan {
|
||||||
|
orderID := pkt.orderID()
|
||||||
|
if epkt, ok := pkt.requestPacket.(*sshFxpExtendedPacket); ok {
|
||||||
|
if epkt.SpecificPacket != nil {
|
||||||
|
pkt.requestPacket = epkt.SpecificPacket
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var rpkt responsePacket
|
||||||
|
switch pkt := pkt.requestPacket.(type) {
|
||||||
|
case *sshFxInitPacket:
|
||||||
|
rpkt = &sshFxVersionPacket{Version: sftpProtocolVersion, Extensions: sftpExtensions}
|
||||||
|
case *sshFxpClosePacket:
|
||||||
|
handle := pkt.getHandle()
|
||||||
|
rpkt = statusFromError(pkt.ID, rs.closeRequest(handle))
|
||||||
|
case *sshFxpRealpathPacket:
|
||||||
|
var realPath string
|
||||||
|
var err error
|
||||||
|
|
||||||
|
switch pather := rs.Handlers.FileList.(type) {
|
||||||
|
case RealPathFileLister:
|
||||||
|
realPath, err = pather.RealPath(pkt.getPath())
|
||||||
|
case legacyRealPathFileLister:
|
||||||
|
realPath = pather.RealPath(pkt.getPath())
|
||||||
|
default:
|
||||||
|
realPath = cleanPathWithBase(rs.startDirectory, pkt.getPath())
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
rpkt = statusFromError(pkt.ID, err)
|
||||||
|
} else {
|
||||||
|
rpkt = cleanPacketPath(pkt, realPath)
|
||||||
|
}
|
||||||
|
case *sshFxpOpendirPacket:
|
||||||
|
request := requestFromPacket(ctx, pkt, rs.startDirectory)
|
||||||
|
handle := rs.nextRequest(request)
|
||||||
|
rpkt = request.opendir(rs.Handlers, pkt)
|
||||||
|
if _, ok := rpkt.(*sshFxpHandlePacket); !ok {
|
||||||
|
// if we return an error we have to remove the handle from the active ones
|
||||||
|
rs.closeRequest(handle)
|
||||||
|
}
|
||||||
|
case *sshFxpOpenPacket:
|
||||||
|
request := requestFromPacket(ctx, pkt, rs.startDirectory)
|
||||||
|
handle := rs.nextRequest(request)
|
||||||
|
rpkt = request.open(rs.Handlers, pkt)
|
||||||
|
if _, ok := rpkt.(*sshFxpHandlePacket); !ok {
|
||||||
|
// if we return an error we have to remove the handle from the active ones
|
||||||
|
rs.closeRequest(handle)
|
||||||
|
}
|
||||||
|
case *sshFxpFstatPacket:
|
||||||
|
handle := pkt.getHandle()
|
||||||
|
request, ok := rs.getRequest(handle)
|
||||||
|
if !ok {
|
||||||
|
rpkt = statusFromError(pkt.ID, EBADF)
|
||||||
|
} else {
|
||||||
|
request = &Request{
|
||||||
|
Method: "Stat",
|
||||||
|
Filepath: cleanPathWithBase(rs.startDirectory, request.Filepath),
|
||||||
|
}
|
||||||
|
rpkt = request.call(rs.Handlers, pkt, rs.pktMgr.alloc, orderID)
|
||||||
|
}
|
||||||
|
case *sshFxpFsetstatPacket:
|
||||||
|
handle := pkt.getHandle()
|
||||||
|
request, ok := rs.getRequest(handle)
|
||||||
|
if !ok {
|
||||||
|
rpkt = statusFromError(pkt.ID, EBADF)
|
||||||
|
} else {
|
||||||
|
request = &Request{
|
||||||
|
Method: "Setstat",
|
||||||
|
Filepath: cleanPathWithBase(rs.startDirectory, request.Filepath),
|
||||||
|
}
|
||||||
|
rpkt = request.call(rs.Handlers, pkt, rs.pktMgr.alloc, orderID)
|
||||||
|
}
|
||||||
|
case *sshFxpExtendedPacketPosixRename:
|
||||||
|
request := &Request{
|
||||||
|
Method: "PosixRename",
|
||||||
|
Filepath: cleanPathWithBase(rs.startDirectory, pkt.Oldpath),
|
||||||
|
Target: cleanPathWithBase(rs.startDirectory, pkt.Newpath),
|
||||||
|
}
|
||||||
|
rpkt = request.call(rs.Handlers, pkt, rs.pktMgr.alloc, orderID)
|
||||||
|
case *sshFxpExtendedPacketStatVFS:
|
||||||
|
request := &Request{
|
||||||
|
Method: "StatVFS",
|
||||||
|
Filepath: cleanPathWithBase(rs.startDirectory, pkt.Path),
|
||||||
|
}
|
||||||
|
rpkt = request.call(rs.Handlers, pkt, rs.pktMgr.alloc, orderID)
|
||||||
|
case hasHandle:
|
||||||
|
handle := pkt.getHandle()
|
||||||
|
request, ok := rs.getRequest(handle)
|
||||||
|
if !ok {
|
||||||
|
rpkt = statusFromError(pkt.id(), EBADF)
|
||||||
|
} else {
|
||||||
|
rpkt = request.call(rs.Handlers, pkt, rs.pktMgr.alloc, orderID)
|
||||||
|
}
|
||||||
|
case hasPath:
|
||||||
|
request := requestFromPacket(ctx, pkt, rs.startDirectory)
|
||||||
|
rpkt = request.call(rs.Handlers, pkt, rs.pktMgr.alloc, orderID)
|
||||||
|
request.close()
|
||||||
|
default:
|
||||||
|
rpkt = statusFromError(pkt.id(), ErrSSHFxOpUnsupported)
|
||||||
|
}
|
||||||
|
|
||||||
|
rs.pktMgr.readyPacket(
|
||||||
|
rs.pktMgr.newOrderedResponse(rpkt, orderID))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// clean and return name packet for file
|
||||||
|
func cleanPacketPath(pkt *sshFxpRealpathPacket, realPath string) responsePacket {
|
||||||
|
return &sshFxpNamePacket{
|
||||||
|
ID: pkt.id(),
|
||||||
|
NameAttrs: []*sshFxpNameAttr{
|
||||||
|
{
|
||||||
|
Name: realPath,
|
||||||
|
LongName: realPath,
|
||||||
|
Attrs: emptyFileStat,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Makes sure we have a clean POSIX (/) absolute path to work with
|
||||||
|
func cleanPath(p string) string {
|
||||||
|
return cleanPathWithBase("/", p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func cleanPathWithBase(base, p string) string {
|
||||||
|
p = filepath.ToSlash(filepath.Clean(p))
|
||||||
|
if !path.IsAbs(p) {
|
||||||
|
return path.Join(base, p)
|
||||||
|
}
|
||||||
|
return p
|
||||||
|
}
|
24
vendor/github.com/pkg/sftp/request-unix.go
generated
vendored
Normal file
24
vendor/github.com/pkg/sftp/request-unix.go
generated
vendored
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
//go:build !windows && !plan9
|
||||||
|
// +build !windows,!plan9
|
||||||
|
|
||||||
|
package sftp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
func fakeFileInfoSys() interface{} {
|
||||||
|
return &syscall.Stat_t{Uid: 65534, Gid: 65534}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testOsSys(sys interface{}) error {
|
||||||
|
fstat := sys.(*FileStat)
|
||||||
|
if fstat.UID != uint32(65534) {
|
||||||
|
return errors.New("Uid failed to match")
|
||||||
|
}
|
||||||
|
if fstat.GID != uint32(65534) {
|
||||||
|
return errors.New("Gid failed to match")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
653
vendor/github.com/pkg/sftp/request.go
generated
vendored
Normal file
653
vendor/github.com/pkg/sftp/request.go
generated
vendored
Normal file
|
@ -0,0 +1,653 @@
|
||||||
|
package sftp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MaxFilelist is the max number of files to return in a readdir batch.
|
||||||
|
var MaxFilelist int64 = 100
|
||||||
|
|
||||||
|
// state encapsulates the reader/writer/readdir from handlers.
|
||||||
|
type state struct {
|
||||||
|
mu sync.RWMutex
|
||||||
|
|
||||||
|
writerAt io.WriterAt
|
||||||
|
readerAt io.ReaderAt
|
||||||
|
writerAtReaderAt WriterAtReaderAt
|
||||||
|
listerAt ListerAt
|
||||||
|
lsoffset int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// copy returns a shallow copy the state.
|
||||||
|
// This is broken out to specific fields,
|
||||||
|
// because we have to copy around the mutex in state.
|
||||||
|
func (s *state) copy() state {
|
||||||
|
s.mu.RLock()
|
||||||
|
defer s.mu.RUnlock()
|
||||||
|
|
||||||
|
return state{
|
||||||
|
writerAt: s.writerAt,
|
||||||
|
readerAt: s.readerAt,
|
||||||
|
writerAtReaderAt: s.writerAtReaderAt,
|
||||||
|
listerAt: s.listerAt,
|
||||||
|
lsoffset: s.lsoffset,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state) setReaderAt(rd io.ReaderAt) {
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
|
||||||
|
s.readerAt = rd
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state) getReaderAt() io.ReaderAt {
|
||||||
|
s.mu.RLock()
|
||||||
|
defer s.mu.RUnlock()
|
||||||
|
|
||||||
|
return s.readerAt
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state) setWriterAt(rd io.WriterAt) {
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
|
||||||
|
s.writerAt = rd
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state) getWriterAt() io.WriterAt {
|
||||||
|
s.mu.RLock()
|
||||||
|
defer s.mu.RUnlock()
|
||||||
|
|
||||||
|
return s.writerAt
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state) setWriterAtReaderAt(rw WriterAtReaderAt) {
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
|
||||||
|
s.writerAtReaderAt = rw
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state) getWriterAtReaderAt() WriterAtReaderAt {
|
||||||
|
s.mu.RLock()
|
||||||
|
defer s.mu.RUnlock()
|
||||||
|
|
||||||
|
return s.writerAtReaderAt
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state) getAllReaderWriters() (io.ReaderAt, io.WriterAt, WriterAtReaderAt) {
|
||||||
|
s.mu.RLock()
|
||||||
|
defer s.mu.RUnlock()
|
||||||
|
|
||||||
|
return s.readerAt, s.writerAt, s.writerAtReaderAt
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns current offset for file list
|
||||||
|
func (s *state) lsNext() int64 {
|
||||||
|
s.mu.RLock()
|
||||||
|
defer s.mu.RUnlock()
|
||||||
|
|
||||||
|
return s.lsoffset
|
||||||
|
}
|
||||||
|
|
||||||
|
// Increases next offset
|
||||||
|
func (s *state) lsInc(offset int64) {
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
|
||||||
|
s.lsoffset += offset
|
||||||
|
}
|
||||||
|
|
||||||
|
// manage file read/write state
|
||||||
|
func (s *state) setListerAt(la ListerAt) {
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
|
||||||
|
s.listerAt = la
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *state) getListerAt() ListerAt {
|
||||||
|
s.mu.RLock()
|
||||||
|
defer s.mu.RUnlock()
|
||||||
|
|
||||||
|
return s.listerAt
|
||||||
|
}
|
||||||
|
|
||||||
|
// Request contains the data and state for the incoming service request.
|
||||||
|
type Request struct {
|
||||||
|
// Get, Put, Setstat, Stat, Rename, Remove
|
||||||
|
// Rmdir, Mkdir, List, Readlink, Link, Symlink
|
||||||
|
Method string
|
||||||
|
Filepath string
|
||||||
|
Flags uint32
|
||||||
|
Attrs []byte // convert to sub-struct
|
||||||
|
Target string // for renames and sym-links
|
||||||
|
handle string
|
||||||
|
|
||||||
|
// reader/writer/readdir from handlers
|
||||||
|
state
|
||||||
|
|
||||||
|
// context lasts duration of request
|
||||||
|
ctx context.Context
|
||||||
|
cancelCtx context.CancelFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRequest creates a new Request object.
|
||||||
|
func NewRequest(method, path string) *Request {
|
||||||
|
return &Request{
|
||||||
|
Method: method,
|
||||||
|
Filepath: cleanPath(path),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// copy returns a shallow copy of existing request.
|
||||||
|
// This is broken out to specific fields,
|
||||||
|
// because we have to copy around the mutex in state.
|
||||||
|
func (r *Request) copy() *Request {
|
||||||
|
return &Request{
|
||||||
|
Method: r.Method,
|
||||||
|
Filepath: r.Filepath,
|
||||||
|
Flags: r.Flags,
|
||||||
|
Attrs: r.Attrs,
|
||||||
|
Target: r.Target,
|
||||||
|
handle: r.handle,
|
||||||
|
|
||||||
|
state: r.state.copy(),
|
||||||
|
|
||||||
|
ctx: r.ctx,
|
||||||
|
cancelCtx: r.cancelCtx,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// New Request initialized based on packet data
|
||||||
|
func requestFromPacket(ctx context.Context, pkt hasPath, baseDir string) *Request {
|
||||||
|
request := &Request{
|
||||||
|
Method: requestMethod(pkt),
|
||||||
|
Filepath: cleanPathWithBase(baseDir, pkt.getPath()),
|
||||||
|
}
|
||||||
|
request.ctx, request.cancelCtx = context.WithCancel(ctx)
|
||||||
|
|
||||||
|
switch p := pkt.(type) {
|
||||||
|
case *sshFxpOpenPacket:
|
||||||
|
request.Flags = p.Pflags
|
||||||
|
case *sshFxpSetstatPacket:
|
||||||
|
request.Flags = p.Flags
|
||||||
|
request.Attrs = p.Attrs.([]byte)
|
||||||
|
case *sshFxpRenamePacket:
|
||||||
|
request.Target = cleanPathWithBase(baseDir, p.Newpath)
|
||||||
|
case *sshFxpSymlinkPacket:
|
||||||
|
// NOTE: given a POSIX compliant signature: symlink(target, linkpath string)
|
||||||
|
// this makes Request.Target the linkpath, and Request.Filepath the target.
|
||||||
|
request.Target = cleanPathWithBase(baseDir, p.Linkpath)
|
||||||
|
request.Filepath = p.Targetpath
|
||||||
|
case *sshFxpExtendedPacketHardlink:
|
||||||
|
request.Target = cleanPathWithBase(baseDir, p.Newpath)
|
||||||
|
}
|
||||||
|
return request
|
||||||
|
}
|
||||||
|
|
||||||
|
// Context returns the request's context. To change the context,
|
||||||
|
// use WithContext.
|
||||||
|
//
|
||||||
|
// The returned context is always non-nil; it defaults to the
|
||||||
|
// background context.
|
||||||
|
//
|
||||||
|
// For incoming server requests, the context is canceled when the
|
||||||
|
// request is complete or the client's connection closes.
|
||||||
|
func (r *Request) Context() context.Context {
|
||||||
|
if r.ctx != nil {
|
||||||
|
return r.ctx
|
||||||
|
}
|
||||||
|
return context.Background()
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithContext returns a copy of r with its context changed to ctx.
|
||||||
|
// The provided ctx must be non-nil.
|
||||||
|
func (r *Request) WithContext(ctx context.Context) *Request {
|
||||||
|
if ctx == nil {
|
||||||
|
panic("nil context")
|
||||||
|
}
|
||||||
|
r2 := r.copy()
|
||||||
|
r2.ctx = ctx
|
||||||
|
r2.cancelCtx = nil
|
||||||
|
return r2
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close reader/writer if possible
|
||||||
|
func (r *Request) close() error {
|
||||||
|
defer func() {
|
||||||
|
if r.cancelCtx != nil {
|
||||||
|
r.cancelCtx()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
rd, wr, rw := r.getAllReaderWriters()
|
||||||
|
|
||||||
|
var err error
|
||||||
|
|
||||||
|
// Close errors on a Writer are far more likely to be the important one.
|
||||||
|
// As they can be information that there was a loss of data.
|
||||||
|
if c, ok := wr.(io.Closer); ok {
|
||||||
|
if err2 := c.Close(); err == nil {
|
||||||
|
// update error if it is still nil
|
||||||
|
err = err2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if c, ok := rw.(io.Closer); ok {
|
||||||
|
if err2 := c.Close(); err == nil {
|
||||||
|
// update error if it is still nil
|
||||||
|
err = err2
|
||||||
|
|
||||||
|
r.setWriterAtReaderAt(nil)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if c, ok := rd.(io.Closer); ok {
|
||||||
|
if err2 := c.Close(); err == nil {
|
||||||
|
// update error if it is still nil
|
||||||
|
err = err2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Notify transfer error if any
|
||||||
|
func (r *Request) transferError(err error) {
|
||||||
|
if err == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
rd, wr, rw := r.getAllReaderWriters()
|
||||||
|
|
||||||
|
if t, ok := wr.(TransferError); ok {
|
||||||
|
t.TransferError(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if t, ok := rw.(TransferError); ok {
|
||||||
|
t.TransferError(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if t, ok := rd.(TransferError); ok {
|
||||||
|
t.TransferError(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// called from worker to handle packet/request
|
||||||
|
func (r *Request) call(handlers Handlers, pkt requestPacket, alloc *allocator, orderID uint32) responsePacket {
|
||||||
|
switch r.Method {
|
||||||
|
case "Get":
|
||||||
|
return fileget(handlers.FileGet, r, pkt, alloc, orderID)
|
||||||
|
case "Put":
|
||||||
|
return fileput(handlers.FilePut, r, pkt, alloc, orderID)
|
||||||
|
case "Open":
|
||||||
|
return fileputget(handlers.FilePut, r, pkt, alloc, orderID)
|
||||||
|
case "Setstat", "Rename", "Rmdir", "Mkdir", "Link", "Symlink", "Remove", "PosixRename", "StatVFS":
|
||||||
|
return filecmd(handlers.FileCmd, r, pkt)
|
||||||
|
case "List":
|
||||||
|
return filelist(handlers.FileList, r, pkt)
|
||||||
|
case "Stat", "Lstat":
|
||||||
|
return filestat(handlers.FileList, r, pkt)
|
||||||
|
case "Readlink":
|
||||||
|
if readlinkFileLister, ok := handlers.FileList.(ReadlinkFileLister); ok {
|
||||||
|
return readlink(readlinkFileLister, r, pkt)
|
||||||
|
}
|
||||||
|
return filestat(handlers.FileList, r, pkt)
|
||||||
|
default:
|
||||||
|
return statusFromError(pkt.id(), fmt.Errorf("unexpected method: %s", r.Method))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Additional initialization for Open packets
|
||||||
|
func (r *Request) open(h Handlers, pkt requestPacket) responsePacket {
|
||||||
|
flags := r.Pflags()
|
||||||
|
|
||||||
|
id := pkt.id()
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case flags.Write, flags.Append, flags.Creat, flags.Trunc:
|
||||||
|
if flags.Read {
|
||||||
|
if openFileWriter, ok := h.FilePut.(OpenFileWriter); ok {
|
||||||
|
r.Method = "Open"
|
||||||
|
rw, err := openFileWriter.OpenFile(r)
|
||||||
|
if err != nil {
|
||||||
|
return statusFromError(id, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
r.setWriterAtReaderAt(rw)
|
||||||
|
|
||||||
|
return &sshFxpHandlePacket{
|
||||||
|
ID: id,
|
||||||
|
Handle: r.handle,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Method = "Put"
|
||||||
|
wr, err := h.FilePut.Filewrite(r)
|
||||||
|
if err != nil {
|
||||||
|
return statusFromError(id, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
r.setWriterAt(wr)
|
||||||
|
|
||||||
|
case flags.Read:
|
||||||
|
r.Method = "Get"
|
||||||
|
rd, err := h.FileGet.Fileread(r)
|
||||||
|
if err != nil {
|
||||||
|
return statusFromError(id, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
r.setReaderAt(rd)
|
||||||
|
|
||||||
|
default:
|
||||||
|
return statusFromError(id, errors.New("bad file flags"))
|
||||||
|
}
|
||||||
|
|
||||||
|
return &sshFxpHandlePacket{
|
||||||
|
ID: id,
|
||||||
|
Handle: r.handle,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Request) opendir(h Handlers, pkt requestPacket) responsePacket {
|
||||||
|
r.Method = "List"
|
||||||
|
la, err := h.FileList.Filelist(r)
|
||||||
|
if err != nil {
|
||||||
|
return statusFromError(pkt.id(), wrapPathError(r.Filepath, err))
|
||||||
|
}
|
||||||
|
|
||||||
|
r.setListerAt(la)
|
||||||
|
|
||||||
|
return &sshFxpHandlePacket{
|
||||||
|
ID: pkt.id(),
|
||||||
|
Handle: r.handle,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// wrap FileReader handler
|
||||||
|
func fileget(h FileReader, r *Request, pkt requestPacket, alloc *allocator, orderID uint32) responsePacket {
|
||||||
|
rd := r.getReaderAt()
|
||||||
|
if rd == nil {
|
||||||
|
return statusFromError(pkt.id(), errors.New("unexpected read packet"))
|
||||||
|
}
|
||||||
|
|
||||||
|
data, offset, _ := packetData(pkt, alloc, orderID)
|
||||||
|
|
||||||
|
n, err := rd.ReadAt(data, offset)
|
||||||
|
// only return EOF error if no data left to read
|
||||||
|
if err != nil && (err != io.EOF || n == 0) {
|
||||||
|
return statusFromError(pkt.id(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &sshFxpDataPacket{
|
||||||
|
ID: pkt.id(),
|
||||||
|
Length: uint32(n),
|
||||||
|
Data: data[:n],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// wrap FileWriter handler
|
||||||
|
func fileput(h FileWriter, r *Request, pkt requestPacket, alloc *allocator, orderID uint32) responsePacket {
|
||||||
|
wr := r.getWriterAt()
|
||||||
|
if wr == nil {
|
||||||
|
return statusFromError(pkt.id(), errors.New("unexpected write packet"))
|
||||||
|
}
|
||||||
|
|
||||||
|
data, offset, _ := packetData(pkt, alloc, orderID)
|
||||||
|
|
||||||
|
_, err := wr.WriteAt(data, offset)
|
||||||
|
return statusFromError(pkt.id(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// wrap OpenFileWriter handler
|
||||||
|
func fileputget(h FileWriter, r *Request, pkt requestPacket, alloc *allocator, orderID uint32) responsePacket {
|
||||||
|
rw := r.getWriterAtReaderAt()
|
||||||
|
if rw == nil {
|
||||||
|
return statusFromError(pkt.id(), errors.New("unexpected write and read packet"))
|
||||||
|
}
|
||||||
|
|
||||||
|
switch p := pkt.(type) {
|
||||||
|
case *sshFxpReadPacket:
|
||||||
|
data, offset := p.getDataSlice(alloc, orderID), int64(p.Offset)
|
||||||
|
|
||||||
|
n, err := rw.ReadAt(data, offset)
|
||||||
|
// only return EOF error if no data left to read
|
||||||
|
if err != nil && (err != io.EOF || n == 0) {
|
||||||
|
return statusFromError(pkt.id(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &sshFxpDataPacket{
|
||||||
|
ID: pkt.id(),
|
||||||
|
Length: uint32(n),
|
||||||
|
Data: data[:n],
|
||||||
|
}
|
||||||
|
|
||||||
|
case *sshFxpWritePacket:
|
||||||
|
data, offset := p.Data, int64(p.Offset)
|
||||||
|
|
||||||
|
_, err := rw.WriteAt(data, offset)
|
||||||
|
return statusFromError(pkt.id(), err)
|
||||||
|
|
||||||
|
default:
|
||||||
|
return statusFromError(pkt.id(), errors.New("unexpected packet type for read or write"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// file data for additional read/write packets
|
||||||
|
func packetData(p requestPacket, alloc *allocator, orderID uint32) (data []byte, offset int64, length uint32) {
|
||||||
|
switch p := p.(type) {
|
||||||
|
case *sshFxpReadPacket:
|
||||||
|
return p.getDataSlice(alloc, orderID), int64(p.Offset), p.Len
|
||||||
|
case *sshFxpWritePacket:
|
||||||
|
return p.Data, int64(p.Offset), p.Length
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// wrap FileCmder handler
|
||||||
|
func filecmd(h FileCmder, r *Request, pkt requestPacket) responsePacket {
|
||||||
|
switch p := pkt.(type) {
|
||||||
|
case *sshFxpFsetstatPacket:
|
||||||
|
r.Flags = p.Flags
|
||||||
|
r.Attrs = p.Attrs.([]byte)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch r.Method {
|
||||||
|
case "PosixRename":
|
||||||
|
if posixRenamer, ok := h.(PosixRenameFileCmder); ok {
|
||||||
|
err := posixRenamer.PosixRename(r)
|
||||||
|
return statusFromError(pkt.id(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PosixRenameFileCmder not implemented handle this request as a Rename
|
||||||
|
r.Method = "Rename"
|
||||||
|
err := h.Filecmd(r)
|
||||||
|
return statusFromError(pkt.id(), err)
|
||||||
|
|
||||||
|
case "StatVFS":
|
||||||
|
if statVFSCmdr, ok := h.(StatVFSFileCmder); ok {
|
||||||
|
stat, err := statVFSCmdr.StatVFS(r)
|
||||||
|
if err != nil {
|
||||||
|
return statusFromError(pkt.id(), err)
|
||||||
|
}
|
||||||
|
stat.ID = pkt.id()
|
||||||
|
return stat
|
||||||
|
}
|
||||||
|
|
||||||
|
return statusFromError(pkt.id(), ErrSSHFxOpUnsupported)
|
||||||
|
}
|
||||||
|
|
||||||
|
err := h.Filecmd(r)
|
||||||
|
return statusFromError(pkt.id(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// wrap FileLister handler
|
||||||
|
func filelist(h FileLister, r *Request, pkt requestPacket) responsePacket {
|
||||||
|
lister := r.getListerAt()
|
||||||
|
if lister == nil {
|
||||||
|
return statusFromError(pkt.id(), errors.New("unexpected dir packet"))
|
||||||
|
}
|
||||||
|
|
||||||
|
offset := r.lsNext()
|
||||||
|
finfo := make([]os.FileInfo, MaxFilelist)
|
||||||
|
n, err := lister.ListAt(finfo, offset)
|
||||||
|
r.lsInc(int64(n))
|
||||||
|
// ignore EOF as we only return it when there are no results
|
||||||
|
finfo = finfo[:n] // avoid need for nil tests below
|
||||||
|
|
||||||
|
switch r.Method {
|
||||||
|
case "List":
|
||||||
|
if err != nil && (err != io.EOF || n == 0) {
|
||||||
|
return statusFromError(pkt.id(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
nameAttrs := make([]*sshFxpNameAttr, 0, len(finfo))
|
||||||
|
|
||||||
|
// If the type conversion fails, we get untyped `nil`,
|
||||||
|
// which is handled by not looking up any names.
|
||||||
|
idLookup, _ := h.(NameLookupFileLister)
|
||||||
|
|
||||||
|
for _, fi := range finfo {
|
||||||
|
nameAttrs = append(nameAttrs, &sshFxpNameAttr{
|
||||||
|
Name: fi.Name(),
|
||||||
|
LongName: runLs(idLookup, fi),
|
||||||
|
Attrs: []interface{}{fi},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return &sshFxpNamePacket{
|
||||||
|
ID: pkt.id(),
|
||||||
|
NameAttrs: nameAttrs,
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
err = fmt.Errorf("unexpected method: %s", r.Method)
|
||||||
|
return statusFromError(pkt.id(), err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func filestat(h FileLister, r *Request, pkt requestPacket) responsePacket {
|
||||||
|
var lister ListerAt
|
||||||
|
var err error
|
||||||
|
|
||||||
|
if r.Method == "Lstat" {
|
||||||
|
if lstatFileLister, ok := h.(LstatFileLister); ok {
|
||||||
|
lister, err = lstatFileLister.Lstat(r)
|
||||||
|
} else {
|
||||||
|
// LstatFileLister not implemented handle this request as a Stat
|
||||||
|
r.Method = "Stat"
|
||||||
|
lister, err = h.Filelist(r)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
lister, err = h.Filelist(r)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return statusFromError(pkt.id(), err)
|
||||||
|
}
|
||||||
|
finfo := make([]os.FileInfo, 1)
|
||||||
|
n, err := lister.ListAt(finfo, 0)
|
||||||
|
finfo = finfo[:n] // avoid need for nil tests below
|
||||||
|
|
||||||
|
switch r.Method {
|
||||||
|
case "Stat", "Lstat":
|
||||||
|
if err != nil && err != io.EOF {
|
||||||
|
return statusFromError(pkt.id(), err)
|
||||||
|
}
|
||||||
|
if n == 0 {
|
||||||
|
err = &os.PathError{
|
||||||
|
Op: strings.ToLower(r.Method),
|
||||||
|
Path: r.Filepath,
|
||||||
|
Err: syscall.ENOENT,
|
||||||
|
}
|
||||||
|
return statusFromError(pkt.id(), err)
|
||||||
|
}
|
||||||
|
return &sshFxpStatResponse{
|
||||||
|
ID: pkt.id(),
|
||||||
|
info: finfo[0],
|
||||||
|
}
|
||||||
|
case "Readlink":
|
||||||
|
if err != nil && err != io.EOF {
|
||||||
|
return statusFromError(pkt.id(), err)
|
||||||
|
}
|
||||||
|
if n == 0 {
|
||||||
|
err = &os.PathError{
|
||||||
|
Op: "readlink",
|
||||||
|
Path: r.Filepath,
|
||||||
|
Err: syscall.ENOENT,
|
||||||
|
}
|
||||||
|
return statusFromError(pkt.id(), err)
|
||||||
|
}
|
||||||
|
filename := finfo[0].Name()
|
||||||
|
return &sshFxpNamePacket{
|
||||||
|
ID: pkt.id(),
|
||||||
|
NameAttrs: []*sshFxpNameAttr{
|
||||||
|
{
|
||||||
|
Name: filename,
|
||||||
|
LongName: filename,
|
||||||
|
Attrs: emptyFileStat,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
err = fmt.Errorf("unexpected method: %s", r.Method)
|
||||||
|
return statusFromError(pkt.id(), err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func readlink(readlinkFileLister ReadlinkFileLister, r *Request, pkt requestPacket) responsePacket {
|
||||||
|
resolved, err := readlinkFileLister.Readlink(r.Filepath)
|
||||||
|
if err != nil {
|
||||||
|
return statusFromError(pkt.id(), err)
|
||||||
|
}
|
||||||
|
return &sshFxpNamePacket{
|
||||||
|
ID: pkt.id(),
|
||||||
|
NameAttrs: []*sshFxpNameAttr{
|
||||||
|
{
|
||||||
|
Name: resolved,
|
||||||
|
LongName: resolved,
|
||||||
|
Attrs: emptyFileStat,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// init attributes of request object from packet data
|
||||||
|
func requestMethod(p requestPacket) (method string) {
|
||||||
|
switch p.(type) {
|
||||||
|
case *sshFxpReadPacket, *sshFxpWritePacket, *sshFxpOpenPacket:
|
||||||
|
// set in open() above
|
||||||
|
case *sshFxpOpendirPacket, *sshFxpReaddirPacket:
|
||||||
|
// set in opendir() above
|
||||||
|
case *sshFxpSetstatPacket, *sshFxpFsetstatPacket:
|
||||||
|
method = "Setstat"
|
||||||
|
case *sshFxpRenamePacket:
|
||||||
|
method = "Rename"
|
||||||
|
case *sshFxpSymlinkPacket:
|
||||||
|
method = "Symlink"
|
||||||
|
case *sshFxpRemovePacket:
|
||||||
|
method = "Remove"
|
||||||
|
case *sshFxpStatPacket, *sshFxpFstatPacket:
|
||||||
|
method = "Stat"
|
||||||
|
case *sshFxpLstatPacket:
|
||||||
|
method = "Lstat"
|
||||||
|
case *sshFxpRmdirPacket:
|
||||||
|
method = "Rmdir"
|
||||||
|
case *sshFxpReadlinkPacket:
|
||||||
|
method = "Readlink"
|
||||||
|
case *sshFxpMkdirPacket:
|
||||||
|
method = "Mkdir"
|
||||||
|
case *sshFxpExtendedPacketHardlink:
|
||||||
|
method = "Link"
|
||||||
|
}
|
||||||
|
return method
|
||||||
|
}
|
13
vendor/github.com/pkg/sftp/request_windows.go
generated
vendored
Normal file
13
vendor/github.com/pkg/sftp/request_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
package sftp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
func fakeFileInfoSys() interface{} {
|
||||||
|
return syscall.Win32FileAttributeData{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testOsSys(sys interface{}) error {
|
||||||
|
return nil
|
||||||
|
}
|
634
vendor/github.com/pkg/sftp/server.go
generated
vendored
Normal file
634
vendor/github.com/pkg/sftp/server.go
generated
vendored
Normal file
|
@ -0,0 +1,634 @@
|
||||||
|
package sftp
|
||||||
|
|
||||||
|
// sftp server counterpart
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// SftpServerWorkerCount defines the number of workers for the SFTP server
|
||||||
|
SftpServerWorkerCount = 8
|
||||||
|
)
|
||||||
|
|
||||||
|
// Server is an SSH File Transfer Protocol (sftp) server.
|
||||||
|
// This is intended to provide the sftp subsystem to an ssh server daemon.
|
||||||
|
// This implementation currently supports most of sftp server protocol version 3,
|
||||||
|
// as specified at https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt
|
||||||
|
type Server struct {
|
||||||
|
*serverConn
|
||||||
|
debugStream io.Writer
|
||||||
|
readOnly bool
|
||||||
|
pktMgr *packetManager
|
||||||
|
openFiles map[string]*os.File
|
||||||
|
openFilesLock sync.RWMutex
|
||||||
|
handleCount int
|
||||||
|
workDir string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (svr *Server) nextHandle(f *os.File) string {
|
||||||
|
svr.openFilesLock.Lock()
|
||||||
|
defer svr.openFilesLock.Unlock()
|
||||||
|
svr.handleCount++
|
||||||
|
handle := strconv.Itoa(svr.handleCount)
|
||||||
|
svr.openFiles[handle] = f
|
||||||
|
return handle
|
||||||
|
}
|
||||||
|
|
||||||
|
func (svr *Server) closeHandle(handle string) error {
|
||||||
|
svr.openFilesLock.Lock()
|
||||||
|
defer svr.openFilesLock.Unlock()
|
||||||
|
if f, ok := svr.openFiles[handle]; ok {
|
||||||
|
delete(svr.openFiles, handle)
|
||||||
|
return f.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
return EBADF
|
||||||
|
}
|
||||||
|
|
||||||
|
func (svr *Server) getHandle(handle string) (*os.File, bool) {
|
||||||
|
svr.openFilesLock.RLock()
|
||||||
|
defer svr.openFilesLock.RUnlock()
|
||||||
|
f, ok := svr.openFiles[handle]
|
||||||
|
return f, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
type serverRespondablePacket interface {
|
||||||
|
encoding.BinaryUnmarshaler
|
||||||
|
id() uint32
|
||||||
|
respond(svr *Server) responsePacket
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewServer creates a new Server instance around the provided streams, serving
|
||||||
|
// content from the root of the filesystem. Optionally, ServerOption
|
||||||
|
// functions may be specified to further configure the Server.
|
||||||
|
//
|
||||||
|
// A subsequent call to Serve() is required to begin serving files over SFTP.
|
||||||
|
func NewServer(rwc io.ReadWriteCloser, options ...ServerOption) (*Server, error) {
|
||||||
|
svrConn := &serverConn{
|
||||||
|
conn: conn{
|
||||||
|
Reader: rwc,
|
||||||
|
WriteCloser: rwc,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
s := &Server{
|
||||||
|
serverConn: svrConn,
|
||||||
|
debugStream: ioutil.Discard,
|
||||||
|
pktMgr: newPktMgr(svrConn),
|
||||||
|
openFiles: make(map[string]*os.File),
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, o := range options {
|
||||||
|
if err := o(s); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// A ServerOption is a function which applies configuration to a Server.
|
||||||
|
type ServerOption func(*Server) error
|
||||||
|
|
||||||
|
// WithDebug enables Server debugging output to the supplied io.Writer.
|
||||||
|
func WithDebug(w io.Writer) ServerOption {
|
||||||
|
return func(s *Server) error {
|
||||||
|
s.debugStream = w
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadOnly configures a Server to serve files in read-only mode.
|
||||||
|
func ReadOnly() ServerOption {
|
||||||
|
return func(s *Server) error {
|
||||||
|
s.readOnly = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithAllocator enable the allocator.
|
||||||
|
// After processing a packet we keep in memory the allocated slices
|
||||||
|
// and we reuse them for new packets.
|
||||||
|
// The allocator is experimental
|
||||||
|
func WithAllocator() ServerOption {
|
||||||
|
return func(s *Server) error {
|
||||||
|
alloc := newAllocator()
|
||||||
|
s.pktMgr.alloc = alloc
|
||||||
|
s.conn.alloc = alloc
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithServerWorkingDirectory sets a working directory to use as base
|
||||||
|
// for relative paths.
|
||||||
|
// If unset the default is current working directory (os.Getwd).
|
||||||
|
func WithServerWorkingDirectory(workDir string) ServerOption {
|
||||||
|
return func(s *Server) error {
|
||||||
|
s.workDir = cleanPath(workDir)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type rxPacket struct {
|
||||||
|
pktType fxp
|
||||||
|
pktBytes []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// Up to N parallel servers
|
||||||
|
func (svr *Server) sftpServerWorker(pktChan chan orderedRequest) error {
|
||||||
|
for pkt := range pktChan {
|
||||||
|
// readonly checks
|
||||||
|
readonly := true
|
||||||
|
switch pkt := pkt.requestPacket.(type) {
|
||||||
|
case notReadOnly:
|
||||||
|
readonly = false
|
||||||
|
case *sshFxpOpenPacket:
|
||||||
|
readonly = pkt.readonly()
|
||||||
|
case *sshFxpExtendedPacket:
|
||||||
|
readonly = pkt.readonly()
|
||||||
|
}
|
||||||
|
|
||||||
|
// If server is operating read-only and a write operation is requested,
|
||||||
|
// return permission denied
|
||||||
|
if !readonly && svr.readOnly {
|
||||||
|
svr.pktMgr.readyPacket(
|
||||||
|
svr.pktMgr.newOrderedResponse(statusFromError(pkt.id(), syscall.EPERM), pkt.orderID()),
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := handlePacket(svr, pkt); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func handlePacket(s *Server, p orderedRequest) error {
|
||||||
|
var rpkt responsePacket
|
||||||
|
orderID := p.orderID()
|
||||||
|
switch p := p.requestPacket.(type) {
|
||||||
|
case *sshFxInitPacket:
|
||||||
|
rpkt = &sshFxVersionPacket{
|
||||||
|
Version: sftpProtocolVersion,
|
||||||
|
Extensions: sftpExtensions,
|
||||||
|
}
|
||||||
|
case *sshFxpStatPacket:
|
||||||
|
// stat the requested file
|
||||||
|
info, err := os.Stat(s.toLocalPath(p.Path))
|
||||||
|
rpkt = &sshFxpStatResponse{
|
||||||
|
ID: p.ID,
|
||||||
|
info: info,
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
rpkt = statusFromError(p.ID, err)
|
||||||
|
}
|
||||||
|
case *sshFxpLstatPacket:
|
||||||
|
// stat the requested file
|
||||||
|
info, err := os.Lstat(s.toLocalPath(p.Path))
|
||||||
|
rpkt = &sshFxpStatResponse{
|
||||||
|
ID: p.ID,
|
||||||
|
info: info,
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
rpkt = statusFromError(p.ID, err)
|
||||||
|
}
|
||||||
|
case *sshFxpFstatPacket:
|
||||||
|
f, ok := s.getHandle(p.Handle)
|
||||||
|
var err error = EBADF
|
||||||
|
var info os.FileInfo
|
||||||
|
if ok {
|
||||||
|
info, err = f.Stat()
|
||||||
|
rpkt = &sshFxpStatResponse{
|
||||||
|
ID: p.ID,
|
||||||
|
info: info,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
rpkt = statusFromError(p.ID, err)
|
||||||
|
}
|
||||||
|
case *sshFxpMkdirPacket:
|
||||||
|
// TODO FIXME: ignore flags field
|
||||||
|
err := os.Mkdir(s.toLocalPath(p.Path), 0o755)
|
||||||
|
rpkt = statusFromError(p.ID, err)
|
||||||
|
case *sshFxpRmdirPacket:
|
||||||
|
err := os.Remove(s.toLocalPath(p.Path))
|
||||||
|
rpkt = statusFromError(p.ID, err)
|
||||||
|
case *sshFxpRemovePacket:
|
||||||
|
err := os.Remove(s.toLocalPath(p.Filename))
|
||||||
|
rpkt = statusFromError(p.ID, err)
|
||||||
|
case *sshFxpRenamePacket:
|
||||||
|
err := os.Rename(s.toLocalPath(p.Oldpath), s.toLocalPath(p.Newpath))
|
||||||
|
rpkt = statusFromError(p.ID, err)
|
||||||
|
case *sshFxpSymlinkPacket:
|
||||||
|
err := os.Symlink(s.toLocalPath(p.Targetpath), s.toLocalPath(p.Linkpath))
|
||||||
|
rpkt = statusFromError(p.ID, err)
|
||||||
|
case *sshFxpClosePacket:
|
||||||
|
rpkt = statusFromError(p.ID, s.closeHandle(p.Handle))
|
||||||
|
case *sshFxpReadlinkPacket:
|
||||||
|
f, err := os.Readlink(s.toLocalPath(p.Path))
|
||||||
|
rpkt = &sshFxpNamePacket{
|
||||||
|
ID: p.ID,
|
||||||
|
NameAttrs: []*sshFxpNameAttr{
|
||||||
|
{
|
||||||
|
Name: f,
|
||||||
|
LongName: f,
|
||||||
|
Attrs: emptyFileStat,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
rpkt = statusFromError(p.ID, err)
|
||||||
|
}
|
||||||
|
case *sshFxpRealpathPacket:
|
||||||
|
f, err := filepath.Abs(s.toLocalPath(p.Path))
|
||||||
|
f = cleanPath(f)
|
||||||
|
rpkt = &sshFxpNamePacket{
|
||||||
|
ID: p.ID,
|
||||||
|
NameAttrs: []*sshFxpNameAttr{
|
||||||
|
{
|
||||||
|
Name: f,
|
||||||
|
LongName: f,
|
||||||
|
Attrs: emptyFileStat,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
rpkt = statusFromError(p.ID, err)
|
||||||
|
}
|
||||||
|
case *sshFxpOpendirPacket:
|
||||||
|
lp := s.toLocalPath(p.Path)
|
||||||
|
|
||||||
|
if stat, err := os.Stat(lp); err != nil {
|
||||||
|
rpkt = statusFromError(p.ID, err)
|
||||||
|
} else if !stat.IsDir() {
|
||||||
|
rpkt = statusFromError(p.ID, &os.PathError{
|
||||||
|
Path: lp, Err: syscall.ENOTDIR,
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
rpkt = (&sshFxpOpenPacket{
|
||||||
|
ID: p.ID,
|
||||||
|
Path: p.Path,
|
||||||
|
Pflags: sshFxfRead,
|
||||||
|
}).respond(s)
|
||||||
|
}
|
||||||
|
case *sshFxpReadPacket:
|
||||||
|
var err error = EBADF
|
||||||
|
f, ok := s.getHandle(p.Handle)
|
||||||
|
if ok {
|
||||||
|
err = nil
|
||||||
|
data := p.getDataSlice(s.pktMgr.alloc, orderID)
|
||||||
|
n, _err := f.ReadAt(data, int64(p.Offset))
|
||||||
|
if _err != nil && (_err != io.EOF || n == 0) {
|
||||||
|
err = _err
|
||||||
|
}
|
||||||
|
rpkt = &sshFxpDataPacket{
|
||||||
|
ID: p.ID,
|
||||||
|
Length: uint32(n),
|
||||||
|
Data: data[:n],
|
||||||
|
// do not use data[:n:n] here to clamp the capacity, we allocated extra capacity above to avoid reallocations
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
rpkt = statusFromError(p.ID, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
case *sshFxpWritePacket:
|
||||||
|
f, ok := s.getHandle(p.Handle)
|
||||||
|
var err error = EBADF
|
||||||
|
if ok {
|
||||||
|
_, err = f.WriteAt(p.Data, int64(p.Offset))
|
||||||
|
}
|
||||||
|
rpkt = statusFromError(p.ID, err)
|
||||||
|
case *sshFxpExtendedPacket:
|
||||||
|
if p.SpecificPacket == nil {
|
||||||
|
rpkt = statusFromError(p.ID, ErrSSHFxOpUnsupported)
|
||||||
|
} else {
|
||||||
|
rpkt = p.respond(s)
|
||||||
|
}
|
||||||
|
case serverRespondablePacket:
|
||||||
|
rpkt = p.respond(s)
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unexpected packet type %T", p)
|
||||||
|
}
|
||||||
|
|
||||||
|
s.pktMgr.readyPacket(s.pktMgr.newOrderedResponse(rpkt, orderID))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Serve serves SFTP connections until the streams stop or the SFTP subsystem
|
||||||
|
// is stopped. It returns nil if the server exits cleanly.
|
||||||
|
func (svr *Server) Serve() error {
|
||||||
|
defer func() {
|
||||||
|
if svr.pktMgr.alloc != nil {
|
||||||
|
svr.pktMgr.alloc.Free()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
runWorker := func(ch chan orderedRequest) {
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
if err := svr.sftpServerWorker(ch); err != nil {
|
||||||
|
svr.conn.Close() // shuts down recvPacket
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
pktChan := svr.pktMgr.workerChan(runWorker)
|
||||||
|
|
||||||
|
var err error
|
||||||
|
var pkt requestPacket
|
||||||
|
var pktType uint8
|
||||||
|
var pktBytes []byte
|
||||||
|
for {
|
||||||
|
pktType, pktBytes, err = svr.serverConn.recvPacket(svr.pktMgr.getNextOrderID())
|
||||||
|
if err != nil {
|
||||||
|
// Check whether the connection terminated cleanly in-between packets.
|
||||||
|
if err == io.EOF {
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
// we don't care about releasing allocated pages here, the server will quit and the allocator freed
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
pkt, err = makePacket(rxPacket{fxp(pktType), pktBytes})
|
||||||
|
if err != nil {
|
||||||
|
switch {
|
||||||
|
case errors.Is(err, errUnknownExtendedPacket):
|
||||||
|
//if err := svr.serverConn.sendError(pkt, ErrSshFxOpUnsupported); err != nil {
|
||||||
|
// debug("failed to send err packet: %v", err)
|
||||||
|
// svr.conn.Close() // shuts down recvPacket
|
||||||
|
// break
|
||||||
|
//}
|
||||||
|
default:
|
||||||
|
debug("makePacket err: %v", err)
|
||||||
|
svr.conn.Close() // shuts down recvPacket
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pktChan <- svr.pktMgr.newOrderedRequest(pkt)
|
||||||
|
}
|
||||||
|
|
||||||
|
close(pktChan) // shuts down sftpServerWorkers
|
||||||
|
wg.Wait() // wait for all workers to exit
|
||||||
|
|
||||||
|
// close any still-open files
|
||||||
|
for handle, file := range svr.openFiles {
|
||||||
|
fmt.Fprintf(svr.debugStream, "sftp server file with handle %q left open: %v\n", handle, file.Name())
|
||||||
|
file.Close()
|
||||||
|
}
|
||||||
|
return err // error from recvPacket
|
||||||
|
}
|
||||||
|
|
||||||
|
type ider interface {
|
||||||
|
id() uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// The init packet has no ID, so we just return a zero-value ID
|
||||||
|
func (p *sshFxInitPacket) id() uint32 { return 0 }
|
||||||
|
|
||||||
|
type sshFxpStatResponse struct {
|
||||||
|
ID uint32
|
||||||
|
info os.FileInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *sshFxpStatResponse) marshalPacket() ([]byte, []byte, error) {
|
||||||
|
l := 4 + 1 + 4 // uint32(length) + byte(type) + uint32(id)
|
||||||
|
|
||||||
|
b := make([]byte, 4, l)
|
||||||
|
b = append(b, sshFxpAttrs)
|
||||||
|
b = marshalUint32(b, p.ID)
|
||||||
|
|
||||||
|
var payload []byte
|
||||||
|
payload = marshalFileInfo(payload, p.info)
|
||||||
|
|
||||||
|
return b, payload, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *sshFxpStatResponse) MarshalBinary() ([]byte, error) {
|
||||||
|
header, payload, err := p.marshalPacket()
|
||||||
|
return append(header, payload...), err
|
||||||
|
}
|
||||||
|
|
||||||
|
var emptyFileStat = []interface{}{uint32(0)}
|
||||||
|
|
||||||
|
func (p *sshFxpOpenPacket) readonly() bool {
|
||||||
|
return !p.hasPflags(sshFxfWrite)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *sshFxpOpenPacket) hasPflags(flags ...uint32) bool {
|
||||||
|
for _, f := range flags {
|
||||||
|
if p.Pflags&f == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *sshFxpOpenPacket) respond(svr *Server) responsePacket {
|
||||||
|
var osFlags int
|
||||||
|
if p.hasPflags(sshFxfRead, sshFxfWrite) {
|
||||||
|
osFlags |= os.O_RDWR
|
||||||
|
} else if p.hasPflags(sshFxfWrite) {
|
||||||
|
osFlags |= os.O_WRONLY
|
||||||
|
} else if p.hasPflags(sshFxfRead) {
|
||||||
|
osFlags |= os.O_RDONLY
|
||||||
|
} else {
|
||||||
|
// how are they opening?
|
||||||
|
return statusFromError(p.ID, syscall.EINVAL)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Don't use O_APPEND flag as it conflicts with WriteAt.
|
||||||
|
// The sshFxfAppend flag is a no-op here as the client sends the offsets.
|
||||||
|
|
||||||
|
if p.hasPflags(sshFxfCreat) {
|
||||||
|
osFlags |= os.O_CREATE
|
||||||
|
}
|
||||||
|
if p.hasPflags(sshFxfTrunc) {
|
||||||
|
osFlags |= os.O_TRUNC
|
||||||
|
}
|
||||||
|
if p.hasPflags(sshFxfExcl) {
|
||||||
|
osFlags |= os.O_EXCL
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := os.OpenFile(svr.toLocalPath(p.Path), osFlags, 0o644)
|
||||||
|
if err != nil {
|
||||||
|
return statusFromError(p.ID, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
handle := svr.nextHandle(f)
|
||||||
|
return &sshFxpHandlePacket{ID: p.ID, Handle: handle}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *sshFxpReaddirPacket) respond(svr *Server) responsePacket {
|
||||||
|
f, ok := svr.getHandle(p.Handle)
|
||||||
|
if !ok {
|
||||||
|
return statusFromError(p.ID, EBADF)
|
||||||
|
}
|
||||||
|
|
||||||
|
dirents, err := f.Readdir(128)
|
||||||
|
if err != nil {
|
||||||
|
return statusFromError(p.ID, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
idLookup := osIDLookup{}
|
||||||
|
|
||||||
|
ret := &sshFxpNamePacket{ID: p.ID}
|
||||||
|
for _, dirent := range dirents {
|
||||||
|
ret.NameAttrs = append(ret.NameAttrs, &sshFxpNameAttr{
|
||||||
|
Name: dirent.Name(),
|
||||||
|
LongName: runLs(idLookup, dirent),
|
||||||
|
Attrs: []interface{}{dirent},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *sshFxpSetstatPacket) respond(svr *Server) responsePacket {
|
||||||
|
// additional unmarshalling is required for each possibility here
|
||||||
|
b := p.Attrs.([]byte)
|
||||||
|
var err error
|
||||||
|
|
||||||
|
p.Path = svr.toLocalPath(p.Path)
|
||||||
|
|
||||||
|
debug("setstat name \"%s\"", p.Path)
|
||||||
|
if (p.Flags & sshFileXferAttrSize) != 0 {
|
||||||
|
var size uint64
|
||||||
|
if size, b, err = unmarshalUint64Safe(b); err == nil {
|
||||||
|
err = os.Truncate(p.Path, int64(size))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (p.Flags & sshFileXferAttrPermissions) != 0 {
|
||||||
|
var mode uint32
|
||||||
|
if mode, b, err = unmarshalUint32Safe(b); err == nil {
|
||||||
|
err = os.Chmod(p.Path, os.FileMode(mode))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (p.Flags & sshFileXferAttrACmodTime) != 0 {
|
||||||
|
var atime uint32
|
||||||
|
var mtime uint32
|
||||||
|
if atime, b, err = unmarshalUint32Safe(b); err != nil {
|
||||||
|
} else if mtime, b, err = unmarshalUint32Safe(b); err != nil {
|
||||||
|
} else {
|
||||||
|
atimeT := time.Unix(int64(atime), 0)
|
||||||
|
mtimeT := time.Unix(int64(mtime), 0)
|
||||||
|
err = os.Chtimes(p.Path, atimeT, mtimeT)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (p.Flags & sshFileXferAttrUIDGID) != 0 {
|
||||||
|
var uid uint32
|
||||||
|
var gid uint32
|
||||||
|
if uid, b, err = unmarshalUint32Safe(b); err != nil {
|
||||||
|
} else if gid, _, err = unmarshalUint32Safe(b); err != nil {
|
||||||
|
} else {
|
||||||
|
err = os.Chown(p.Path, int(uid), int(gid))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return statusFromError(p.ID, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *sshFxpFsetstatPacket) respond(svr *Server) responsePacket {
|
||||||
|
f, ok := svr.getHandle(p.Handle)
|
||||||
|
if !ok {
|
||||||
|
return statusFromError(p.ID, EBADF)
|
||||||
|
}
|
||||||
|
|
||||||
|
// additional unmarshalling is required for each possibility here
|
||||||
|
b := p.Attrs.([]byte)
|
||||||
|
var err error
|
||||||
|
|
||||||
|
debug("fsetstat name \"%s\"", f.Name())
|
||||||
|
if (p.Flags & sshFileXferAttrSize) != 0 {
|
||||||
|
var size uint64
|
||||||
|
if size, b, err = unmarshalUint64Safe(b); err == nil {
|
||||||
|
err = f.Truncate(int64(size))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (p.Flags & sshFileXferAttrPermissions) != 0 {
|
||||||
|
var mode uint32
|
||||||
|
if mode, b, err = unmarshalUint32Safe(b); err == nil {
|
||||||
|
err = f.Chmod(os.FileMode(mode))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (p.Flags & sshFileXferAttrACmodTime) != 0 {
|
||||||
|
var atime uint32
|
||||||
|
var mtime uint32
|
||||||
|
if atime, b, err = unmarshalUint32Safe(b); err != nil {
|
||||||
|
} else if mtime, b, err = unmarshalUint32Safe(b); err != nil {
|
||||||
|
} else {
|
||||||
|
atimeT := time.Unix(int64(atime), 0)
|
||||||
|
mtimeT := time.Unix(int64(mtime), 0)
|
||||||
|
err = os.Chtimes(f.Name(), atimeT, mtimeT)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (p.Flags & sshFileXferAttrUIDGID) != 0 {
|
||||||
|
var uid uint32
|
||||||
|
var gid uint32
|
||||||
|
if uid, b, err = unmarshalUint32Safe(b); err != nil {
|
||||||
|
} else if gid, _, err = unmarshalUint32Safe(b); err != nil {
|
||||||
|
} else {
|
||||||
|
err = f.Chown(int(uid), int(gid))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return statusFromError(p.ID, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func statusFromError(id uint32, err error) *sshFxpStatusPacket {
|
||||||
|
ret := &sshFxpStatusPacket{
|
||||||
|
ID: id,
|
||||||
|
StatusError: StatusError{
|
||||||
|
// sshFXOk = 0
|
||||||
|
// sshFXEOF = 1
|
||||||
|
// sshFXNoSuchFile = 2 ENOENT
|
||||||
|
// sshFXPermissionDenied = 3
|
||||||
|
// sshFXFailure = 4
|
||||||
|
// sshFXBadMessage = 5
|
||||||
|
// sshFXNoConnection = 6
|
||||||
|
// sshFXConnectionLost = 7
|
||||||
|
// sshFXOPUnsupported = 8
|
||||||
|
Code: sshFxOk,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
debug("statusFromError: error is %T %#v", err, err)
|
||||||
|
ret.StatusError.Code = sshFxFailure
|
||||||
|
ret.StatusError.msg = err.Error()
|
||||||
|
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
ret.StatusError.Code = sshFxNoSuchFile
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
if code, ok := translateSyscallError(err); ok {
|
||||||
|
ret.StatusError.Code = code
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
ret.StatusError.Code = sshFxEOF
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
var e fxerr
|
||||||
|
if errors.As(err, &e) {
|
||||||
|
ret.StatusError.Code = uint32(e)
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret
|
||||||
|
}
|
27
vendor/github.com/pkg/sftp/server_plan9.go
generated
vendored
Normal file
27
vendor/github.com/pkg/sftp/server_plan9.go
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
||||||
|
package sftp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"path"
|
||||||
|
"path/filepath"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (s *Server) toLocalPath(p string) string {
|
||||||
|
if s.workDir != "" && !path.IsAbs(p) {
|
||||||
|
p = path.Join(s.workDir, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
lp := filepath.FromSlash(p)
|
||||||
|
|
||||||
|
if path.IsAbs(p) {
|
||||||
|
tmp := lp[1:]
|
||||||
|
|
||||||
|
if filepath.IsAbs(tmp) {
|
||||||
|
// If the FromSlash without any starting slashes is absolute,
|
||||||
|
// then we have a filepath encoded with a prefix '/'.
|
||||||
|
// e.g. "/#s/boot" to "#s/boot"
|
||||||
|
return tmp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return lp
|
||||||
|
}
|
21
vendor/github.com/pkg/sftp/server_statvfs_darwin.go
generated
vendored
Normal file
21
vendor/github.com/pkg/sftp/server_statvfs_darwin.go
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
package sftp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
func statvfsFromStatfst(stat *syscall.Statfs_t) (*StatVFS, error) {
|
||||||
|
return &StatVFS{
|
||||||
|
Bsize: uint64(stat.Bsize),
|
||||||
|
Frsize: uint64(stat.Bsize), // fragment size is a linux thing; use block size here
|
||||||
|
Blocks: stat.Blocks,
|
||||||
|
Bfree: stat.Bfree,
|
||||||
|
Bavail: stat.Bavail,
|
||||||
|
Files: stat.Files,
|
||||||
|
Ffree: stat.Ffree,
|
||||||
|
Favail: stat.Ffree, // not sure how to calculate Favail
|
||||||
|
Fsid: uint64(uint64(stat.Fsid.Val[1])<<32 | uint64(stat.Fsid.Val[0])), // endianness?
|
||||||
|
Flag: uint64(stat.Flags), // assuming POSIX?
|
||||||
|
Namemax: 1024, // man 2 statfs shows: #define MAXPATHLEN 1024
|
||||||
|
}, nil
|
||||||
|
}
|
30
vendor/github.com/pkg/sftp/server_statvfs_impl.go
generated
vendored
Normal file
30
vendor/github.com/pkg/sftp/server_statvfs_impl.go
generated
vendored
Normal file
|
@ -0,0 +1,30 @@
|
||||||
|
//go:build darwin || linux
|
||||||
|
// +build darwin linux
|
||||||
|
|
||||||
|
// fill in statvfs structure with OS specific values
|
||||||
|
// Statfs_t is different per-kernel, and only exists on some unixes (not Solaris for instance)
|
||||||
|
|
||||||
|
package sftp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (p *sshFxpExtendedPacketStatVFS) respond(svr *Server) responsePacket {
|
||||||
|
retPkt, err := getStatVFSForPath(p.Path)
|
||||||
|
if err != nil {
|
||||||
|
return statusFromError(p.ID, err)
|
||||||
|
}
|
||||||
|
retPkt.ID = p.ID
|
||||||
|
|
||||||
|
return retPkt
|
||||||
|
}
|
||||||
|
|
||||||
|
func getStatVFSForPath(name string) (*StatVFS, error) {
|
||||||
|
var stat syscall.Statfs_t
|
||||||
|
if err := syscall.Statfs(name, &stat); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return statvfsFromStatfst(&stat)
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue