Compare commits

..

No commits in common. "main" and "v0.0.5" have entirely different histories.
main ... v0.0.5

429 changed files with 17073 additions and 56872 deletions

View file

@ -1,36 +0,0 @@
run-name: ${{ github.actor }} is testing
on: ["push"]
jobs:
Release:
runs-on: docker
#container:
# image: goreleaser/goreleaser
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
#- run: goreleaser release --skip-publish --snapshot --rm-dist --debug
- name: Setup Go environment
uses: https://github.com/actions/setup-go@v2.1.3
with:
go-version: 1.20
- name: Run GoReleaser
uses: https://github.com/goreleaser/goreleaser-action@v4
with:
# either 'goreleaser' (default) or 'goreleaser-pro'
distribution: goreleaser
version: latest
args: release --skip-publish --snapshot --rm-dist --debug --clean
env:
GORELEASER_FORCE_TOKEN: "gitea"
- name: Copy deb to publish folder
run: mkdir publish; cp dist/*deb publish/last.deb
- name: Upload to s3
uses: https://github.com/shallwefootball/s3-upload-action@master
with:
aws_key_id: ${{ secrets.AWS_KEY_ID }}
aws_secret_access_key: ${{ secrets.AWS_SECRET_ACCESS_KEY}}
aws_bucket: 'kekscloud-releases'
source_dir: 'publish/'
destination_dir: 'http-server-status/'
endpoint: 's3.eu-central-003.backblazeb2.com'

View file

@ -1,40 +0,0 @@
run-name: ${{ github.actor }} is testing
on:
push:
tags:
- '*'
jobs:
Release:
runs-on: docker
#container:
# image: goreleaser/goreleaser
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
#- run: goreleaser release --skip-publish --snapshot --rm-dist --debug
- name: Setup Go environment
uses: https://github.com/actions/setup-go@v2.1.3
with:
go-version: 1.20
- name: Run GoReleaser
uses: https://github.com/goreleaser/goreleaser-action@v4
with:
# either 'goreleaser' (default) or 'goreleaser-pro'
distribution: goreleaser
version: latest
args: release --clean
env:
GORELEASER_FORCE_TOKEN: "gitea"
GITEA_TOKEN: "${{ secrets.GITEA }}"
- name: Copy deb to publish folder
run: mkdir publish; cp dist/*deb publish/stable.deb; cp dist/*deb publish/
- name: Upload to s3
uses: https://github.com/shallwefootball/s3-upload-action@master
with:
aws_key_id: ${{ secrets.AWS_KEY_ID }}
aws_secret_access_key: ${{ secrets.AWS_SECRET_ACCESS_KEY}}
aws_bucket: 'kekscloud-releases'
source_dir: 'publish/'
destination_dir: 'http-server-status/'
endpoint: 's3.eu-central-003.backblazeb2.com'

View file

@ -15,8 +15,11 @@ builds:
#- darwin
goarch:
- amd64
ldflags:
- -X main.version={{ .Version }}
archives:
- replacements:
darwin: Darwin
386: i386
amd64: x86_64
checksum:
name_template: 'checksums.txt'
changelog:
@ -47,6 +50,3 @@ nfpms:
dst: /etc/systemd/system/http-server-status.service
- src: config.yml
dst: /etc/http-server-status/config.yml.sample
scripts:
postinstall: "scripts/postinstall.sh"
preinstall: "scripts/preinstall.sh"

View file

@ -1,32 +1,19 @@
# HTTP Server Status
Status Page with your Server informations (HDD usage, ram usage, load, systemd) which returns a http Status 500 if a limit is reached. With this Service you can monitor your Server with a external tool like uptimerobot.com.
Status Page for your VM or HDD Server to montiro with an external tool like uptimerobot.com
# Installation
## Dowanload URLs:
* Last Dev Build: `https://kekscloud-releases.s3.eu-central-003.backblazeb2.com/http-server-status/last.deb`
* Last Release Build: `https://kekscloud-releases.s3.eu-central-003.backblazeb2.com/http-server-status/stable.deb`
* Special Version: `https://kekscloud-releases.s3.eu-central-003.backblazeb2.com/http-server-status/http-server-status_1.0.7_linux_amd64.deb` (Bigger than 1.0.7)
To Install the Server Montiroing download the last .deb File from the Release page or the links above and save it as http-server-status.deb on your server. Than install the package with dpkg
To Install the Server Montiroing download the last .deb File from the Release page and save it as http-server-status.deb on your server. Than install the package with dpkg
```
dpkg -i http-server-status.deb
```
# Config
Copy the config sample and edit the config.
```bigquery
cp /etc/http-server-status/config.yml.sample /etc/http-server-status/config.yml
nano /etc/http-server-status/config.yml
```
# Run
After this start the server
```

View file

@ -1,6 +1,7 @@
package main
import (
"fmt"
"github.com/rs/zerolog/log"
"gopkg.in/yaml.v2"
"http-server-status/internal/pkg/checks"
@ -14,21 +15,15 @@ type Config struct {
HTTP struct{
Listen string `yaml:"listen"`
} `yaml:"http"`
Auth struct{
Enabled bool `yaml:"enabled"`
Username string `yaml:"username"`
Password string `yaml:"password"`
} `yaml:"auth"`
Checks struct{
HDD checks.HDDConfig `yaml:"hdd"`
Load checks.LoadConfig `yaml:"load"`
Memory checks.MemoryConfig `yaml:"memory"`
Systemd checks.SystemdConf `yaml:"systemd"`
Version checks.VersionConfig `yaml:"version"`
} `yaml:"checks"`
}
func readConfig() {
fmt.Println(os.Args)
filename := "/etc/http-server-status/config.yml"
if len(os.Args) > 1 {
filename = os.Args[1]
@ -44,6 +39,4 @@ func readConfig() {
if err != nil {
log.Fatal().Err(err).Msg("Cant parse yaml file")
}
c.Checks.Version.Version = version
}

View file

@ -1,27 +1,12 @@
http:
listen: ":3003"
auth:
enabled: false
username: test
password: test
checks:
hdd:
enabled: true
max_percent: 80
max_percent: 100
load:
enabled: true
max_load_1: 10
max_load_5: 8
max_load_15: 5
memory:
enabled: true
max: 80
max_swap: 80
systemd:
enabled: true
services:
- sshd
version:
enabled: true
max: 70

16
go.mod
View file

@ -3,18 +3,14 @@ module http-server-status
go 1.17
require (
github.com/mackerelio/go-osstat v0.2.5
github.com/rs/zerolog v1.33.0
github.com/shirou/gopsutil v3.21.11+incompatible
gopkg.in/yaml.v3 v3.0.1
gopkg.in/yaml.v3 v3.0.1
github.com/mackerelio/go-osstat v0.2.0
github.com/rs/zerolog v1.25.0
github.com/shirou/gopsutil v3.21.8+incompatible
gopkg.in/yaml.v2 v2.4.0
)
require (
github.com/StackExchange/wmi v1.2.1 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.19 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
golang.org/x/sys v0.20.0 // indirect
github.com/go-ole/go-ole v1.2.5 // indirect
golang.org/x/sys v0.0.0-20210915083310-ed5796bab164 // indirect
)

23
go.sum
View file

@ -1,35 +1,18 @@
github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA=
github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8=
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/go-ole/go-ole v1.2.5 h1:t4MGB5xEDZvXI+0rMjjsfBsD7yAgp/s9ZDkL1JndXwY=
github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/mackerelio/go-osstat v0.2.0 h1:UVn9Am/OOj2Ig0LNNHLqiHeXsZWmMNcMPZ3h+z/8+h8=
github.com/mackerelio/go-osstat v0.2.0/go.mod h1:UzRL8dMCCTqG5WdRtsxbuljMpZt9PCAGXqxPst5QtaY=
github.com/mackerelio/go-osstat v0.2.5 h1:+MqTbZUhoIt4m8qzkVoXUJg1EuifwlAJSk4Yl2GXh+o=
github.com/mackerelio/go-osstat v0.2.5/go.mod h1:atxwWF+POUZcdtR1wnsUcQxTytoHG4uhl2AKKzrOajY=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/rs/zerolog v1.25.0 h1:Rj7XygbUHKUlDPcVdoLyR91fJBsduXj5fRxyqIQj/II=
github.com/rs/zerolog v1.25.0/go.mod h1:7KHcEGe0QZPOm2IE4Kpb5rTh6n1h2hIgS5OOnu1rUaI=
github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8=
github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
github.com/shirou/gopsutil v3.21.8+incompatible h1:sh0foI8tMRlCidUJR+KzqWYWxrkuuPIGiO6Vp+KXdCU=
github.com/shirou/gopsutil v3.21.8+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
@ -47,11 +30,6 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210915083310-ed5796bab164 h1:7ZDGnxgHAMw7thfC5bEos0RDAccZKxioiWBhfIe+tvw=
golang.org/x/sys v0.0.0-20210915083310-ed5796bab164/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
@ -65,4 +43,3 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View file

@ -1,11 +1,11 @@
[Unit]
Description=HTTP Status Server
Description=Go Mail Admin
After=syslog.target
[Service]
Type=simple
User=http-server-status
ExecStart=/usr/bin/http-server-status
User=root
ExecStart=/usr/local/bin/http-server-status
SyslogIdentifier=http-server-status
StandardOutput=syslog
StandardError=syslog

View file

@ -1,5 +0,0 @@
URL=`python3 -c 'import urllib.request;import json;f = urllib.request.urlopen("https://git.keks.cloud/api/v1/repos/kekskurse/http-server-status/releases?limit=1");d = json.loads(f.read().decode("utf-8"));
for asset in d[0]["assets"]: x = asset["browser_download_url"] if asset["name"][-4:] == ".deb" else ""; print(x, end="");'`
wget -O hss.deb $URL
dpkg -i hss.deb
rm hss.deb

View file

@ -1,12 +1,12 @@
package checks
import (
"github.com/rs/zerolog/log"
"github.com/shirou/gopsutil/disk"
)
type HDDConfig struct {
MaxPercent int `yaml:"max_percent"`
Enabled bool `yaml:"enabled"`
}
type HDD struct {
@ -14,11 +14,9 @@ type HDD struct {
}
func (h HDD) Execute() (ok bool, data interface{}, err error) {
if h.Config.Enabled == false {
return true, nil, nil
}
success := true
parts, _ := disk.Partitions(false)
parts, _ := disk.Partitions(true)
log.Debug().Int("max_percent", h.Config.MaxPercent).Msg("Execute HDD Check")
usage := make(map[string]float64)
for _, p := range parts {
device := p.Mountpoint
@ -34,5 +32,5 @@ func (h HDD) Execute() (ok bool, data interface{}, err error) {
}
func (h HDD) Name() string {
return "DiscSpace"
return "Disc space"
}

View file

@ -8,7 +8,6 @@ type LoadConfig struct {
MaxLoad1 float64 `yaml:"max_load_1"`
MaxLoad5 float64 `yaml:"max_load_5"`
MaxLoad15 float64 `yaml:"max_load_15"`
Enabled bool `yaml:"enabled"`
}
type Load struct {
@ -16,9 +15,6 @@ type Load struct {
}
func (h Load) Execute() (ok bool, data interface{}, err error) {
if h.Config.Enabled == false {
return true, nil, nil
}
load, err := loadavg.Get()
if load.Loadavg1 > h.Config.MaxLoad1 {
return false,load, nil
@ -36,5 +32,5 @@ func (h Load) Execute() (ok bool, data interface{}, err error) {
}
func (h Load) Name() string {
return "SystemLoad"
return "System Load"
}

View file

@ -1,14 +1,11 @@
package checks
import (
"fmt"
"github.com/mackerelio/go-osstat/memory"
)
type MemoryConfig struct {
Max float64 `yaml:"max"`
MaxSwap float64 `yaml:"max_swap"`
Enabled bool `yaml:"enabled"`
}
type Memory struct {
@ -16,32 +13,15 @@ type Memory struct {
}
func (h Memory) Execute() (ok bool, data interface{}, err error) {
if h.Config.Enabled == false {
return true, nil, nil
}
memory, err := memory.Get()
fmt.Println(memory)
p := float64(100) / float64(memory.Total) * float64(memory.Used)
ps := float64(100) / float64(memory.SwapTotal) * float64(memory.SwapUsed)
res := make(map[string]interface{})
res["row"] = memory
res["ram"] = p
res["swap"] = ps
if memory.SwapTotal == 0 {
res["swap"] = 100
}
fmt.Println(ps)
if p > h.Config.Max {
return false, res, nil
}
if ps > h.Config.MaxSwap {
return false, res, nil
return false, memory, nil
}
return true, res,nil
return true, memory,nil
}
func (h Memory) Name() string {
return "MemoryUsage"
return "Memory usage"
}

View file

@ -1,63 +0,0 @@
package checks
import (
"github.com/rs/zerolog/log"
"os/exec"
"strings"
)
type SystemdConf struct {
Services []string `yaml:"services"`
Enabled bool `yaml:"enabled"`
}
type Systemd struct {
Config SystemdConf
}
func (h Systemd) Execute() (ok bool, data interface{}, err error) {
if h.Config.Enabled == false {
return true, nil, nil
}
success := true
servicelist := make(map[string]bool)
for _, service := range h.Config.Services {
res, err := h.getStatus(service)
if err != nil {
return false, nil, err
}
if res == false {
success = false
}
servicelist[service] = res
}
return success, servicelist,nil
}
func (h Systemd) Name() string {
return "SystemdStatus"
}
func (h Systemd) getStatus(name string) (bool, error) {
cmd := exec.Command("systemctl", "check", name)
out, err := cmd.CombinedOutput()
if err != nil {
log.Debug().Err(err).Msg("Error in systemd communication")
if exitErr, ok := err.(*exec.ExitError); ok {
if exitErr.ExitCode() == 3 {
return false, nil
}
}
return false, err
}
s := string(out)
s = strings.Trim(s, "\n")
if s == "active" {
return true, nil
}
return false, nil
}

View file

@ -1,45 +0,0 @@
package checks
import (
"encoding/json"
"fmt"
"io"
"net/http"
)
type VersionConfig struct {
Enabled bool `yaml:"enabled"`
Version string
}
type Version struct {
Config VersionConfig
}
func (h Version) Execute() (ok bool, data interface{}, err error) {
if h.Config.Enabled == false {
return true, nil, nil
}
resp, err := http.Get("https://git.keks.cloud/api/v1/repos/kekskurse/http-server-status/releases?limit=1")
defer resp.Body.Close()
var g []struct {
TagName string `json:"tag_name"`
}
body, err := io.ReadAll(resp.Body)
if err != nil {
return false, nil, err
}
json.Unmarshal(body, &g)
if g[0].TagName == fmt.Sprintf("v%v", h.Config.Version) {
return true, map[string]string{"version": g[0].TagName}, nil
}
return false, map[string]string{"version": g[0].TagName}, nil
}
func (h Version) Name() string {
return "Version"
}

72
main.go
View file

@ -1,74 +1,27 @@
package main
import (
_ "embed"
"encoding/json"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
checks "http-server-status/internal/pkg/checks"
"net/http"
"os"
"sync"
"text/template"
)
//go:embed template/index.html
var s string
var (
version = "development"
)
var checkList []checks.Check
func init() {
readConfig()
pp := log.Output(zerolog.ConsoleWriter{Out: os.Stdout})
multi := zerolog.MultiLevelWriter(pp)
log.Logger = zerolog.New(multi).With().Timestamp().Caller().Logger()
checkList = append(checkList, checks.HDD{c.Checks.HDD}, checks.Memory{Config: c.Checks.Memory}, checks.Load{Config: c.Checks.Load}, checks.Systemd{Config: c.Checks.Systemd}, checks.Version{Config: c.Checks.Version})
log.Debug().Int("max_percent", c.Checks.HDD.MaxPercent).Msg("HDD CHECK")
checkList = append(checkList, checks.HDD{c.Checks.HDD}, checks.Memory{Config: c.Checks.Memory}, checks.Load{Config: c.Checks.Load})
}
func auth(fn http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if c.Auth.Enabled {
user, pass, _ := r.BasicAuth()
if !check(user, pass) {
w.Header().Set("WWW-Authenticate", `Basic realm="MY REALM"`)
http.Error(w, "Unauthorized.", 401)
return
}
}
fn(w, r)
}
}
func check(u, p string) bool {
if u == c.Auth.Username && p == c.Auth.Password {
return true
}
return false
}
func main() {
http.HandleFunc("/", auth(func(writer http.ResponseWriter, request *http.Request) {
res, gloableRes := checkSystem()
if gloableRes == false {
writer.WriteHeader(http.StatusInternalServerError)
}
t, err := template.New("todos").Parse(s)
if err != nil {
panic(err)
}
t.Execute(writer, map[string]interface{}{"checks":res, "version": version})
//writer.Write([]byte(s))
}))
http.HandleFunc("/data.json", auth(handler))
http.HandleFunc("/", func(writer http.ResponseWriter, request *http.Request) {
http.Redirect(writer, request, "/data.json", http.StatusSeeOther)
})
http.HandleFunc("/data.json", handler)
err := http.ListenAndServe(c.HTTP.Listen, nil)
log.Fatal().Err(err).Msg("Shutdown")
}
@ -84,7 +37,7 @@ func handler(w http.ResponseWriter, r *http.Request) {
resJson, err := json.Marshal(httpResposne)
if err != nil {
log.Fatal().Err(err).Interface("httpResponse", ResultReturn{}).Msg("Check Error")
log.Fatal().Err(err).Msg("Check Error")
}
if gloableRes {
w.Write(resJson)
@ -94,7 +47,6 @@ func handler(w http.ResponseWriter, r *http.Request) {
}
type ResultReturn struct {
Success bool `json:"success"`
Data interface{} `json:"data"`
@ -104,26 +56,18 @@ func checkSystem() (map[string]ResultReturn, bool) {
globaleResult := true
wg := sync.WaitGroup{}
res := make(map[string]ResultReturn)
mutex := sync.Mutex{}
for _, c := range checkList {
wg.Add(1)
go func(check checks.Check) {
defer wg.Done()
s, data, err := check.Execute()
if err != nil {
log.Error().Err(err).Msg("Cant execute check")
return
}
log.Debug().Str("check", check.Name()).Bool("status", s).Interface("Data", data).Msg("Executed Check, got Results")
s, data, _ := check.Execute()
if s == false {
globaleResult = false
}
mutex.Lock()
res[check.Name()] = ResultReturn{
Success: s,
Data: data,
}
mutex.Unlock()
}(c)
}
wg.Wait()

View file

@ -1,3 +0,0 @@
{
"$schema": "https://docs.renovatebot.com/renovate-schema.json"
}

View file

@ -1,12 +1,2 @@
if [ -f "/etc/http-server-status/config.yml" ]; then
echo "Config exists"
else
cp /etc/http-server-status/config.yml.sample /etc/http-server-status/config.yml
fi
systemctl daemon-reload
STATUS="$(systemctl is-active tomcat.service)"
if [ "${STATUS}" = "active" ]; then
systemctl restart http-server-status
echo "Restart Service"
fi
systemctl restart http-server-status

View file

@ -1,4 +0,0 @@
#/bin/sh
if ! id -u http-server-status > /dev/null 2>&1; then
adduser --system http-server-status
fi

View file

@ -1,156 +0,0 @@
<html>
<head>
<!-- Required meta tags -->
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>HTTP Status Page</title>
<link href="https://cdn.jsdelivr.net/npm/bootstrap@5.1.1/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-F3w7mX95PdgyTmZZMECAngseQB83DfGTowi0iMjiWaeVhAn4FJkqJByhZMI3AhiU" crossorigin="anonymous">
</head>
<body>
<div class="container">
<div class="row">
<div class="col-md-12">
<h1>HTTP Status Page</h1>
</div>
</div>
<div class="row">
<div class="col-md-6">
<div class="card">
<div class="card-header {{if .checks.DiscSpace.Success }}bg-success{{else}}bg-danger{{end}}" id="hddtitle">
Check HDD
</div>
<div class="card-body" id="hddpercent">
<table class="table">
{{range $key, $val := .checks.DiscSpace.Data}}
<tr>
<td style="width: 20%">{{ $key }}</td>
<td>
<div class="progress">
<div class="progress-bar" role="progressbar" style="width: {{ $val }}%;" aria-valuenow="{{ $val }}" aria-valuemin="0" aria-valuemax="100">{{ $val }}%</div>
</div>
</td>
</tr>
{{end }}
</table>
</div>
</div>
</div>
<div class="col-md-6">
<div class="card">
<div class="card-header {{if .checks.MemoryUsage.Success }}bg-success{{else}}bg-danger{{end}}" id="ramtitle">
Check RAM
</div>
<div class="card-body">
{{ if .checks.MemoryUsage.Data }}
<table class="table">
<tr>
<th style="width: 20%">Ram</th>
<td>
<div class="progress">
<div class="progress-bar" role="progressbar" style="width: {{ .checks.MemoryUsage.Data.ram }}%;" aria-valuenow="{{ .checks.MemoryUsage.Data.ram }}" aria-valuemin="0" aria-valuemax="100">{{ .checks.MemoryUsage.Data.ram }}%</div>
</div>
</td>
</tr>
<tr>
<th style="width: 20%">Swap</th>
<td>
<div class="progress">
<div class="progress-bar" role="progressbar" style="width: {{ .checks.MemoryUsage.Data.swap }}%;" aria-valuenow="{{ .checks.MemoryUsage.Data.swap }}" aria-valuemin="0" aria-valuemax="100">{{ .checks.MemoryUsage.Data.swap }}%</div>
</div>
</td>
</tr>
</table>
{{ end }}
</div>
</div>
</div>
</div>
<div class="row" style="margin-top: 20px;">
<div class="col-md-6">
<div class="card">
<div class="card-header {{if .checks.SystemLoad.Success }}bg-success{{else}}bg-danger{{end}}" id="loadtitle">
Check Load
</div>
<div class="card-body">
{{ if .checks.SystemLoad.Data }}
<table class="table">
<tr>
<th style="width: 20%">1</th>
<td>{{ .checks.SystemLoad.Data.Loadavg1 }}</td>
</tr>
<tr>
<th style="width: 20%">5</th>
<td>{{ .checks.SystemLoad.Data.Loadavg5 }}</td>
</tr>
<tr>
<th style="width: 20%">15</th>
<td>{{ .checks.SystemLoad.Data.Loadavg15 }}</td>
</tr>
</table>
{{ end }}
</div>
</div>
</div>
<div class="col-md-6">
<div class="card">
<div class="card-header {{if .checks.SystemdStatus.Success }}bg-success{{else}}bg-danger{{end}}" id="systemdtitle">
Systemd Check
</div>
<div class="card-body">
<table class="table">
{{range $key, $val := .checks.SystemdStatus.Data}}
<tr>
<td>{{ $key }}</td>
<td>
{{ $val }}
</td>
</tr>
{{end }}
</table>
</div>
</div>
</div>
</div>
<div class="row" style="margin-top: 20px;">
<div class="col-md-6">
<div class="card">
<div class="card-header {{if .checks.Version.Success }}bg-success{{else}}bg-danger{{end}}" id="loadtitle">
Version
</div>
<div class="card-body">
{{ if .checks.Version.Data }}
Newest Version: {{ .checks.Version.Data.version }}
{{ end }}
</div>
</div>
</div>
<div class="col-md-6">
</div>
</div>
</div>
</div>
<div class="container">
<footer class="d-flex flex-wrap justify-content-between align-items-center py-3 my-4 border-top">
<div class="col-md-4 d-flex align-items-center">
<a href="/" class="mb-3 me-2 mb-md-0 text-muted text-decoration-none lh-1">
<svg class="bi" width="30" height="24"><use xlink:href="#bootstrap"/></svg>
</a>
<span class="text-muted">Version: {{ .version }}</span>
</div>
<ul class="nav col-md-4 justify-content-end list-unstyled d-flex">
<li><a href="https://git.keks.cloud/kekskurse/http-server-status">Git/Source Code</a></li>
</ul>
</footer>
</div>
</body>
</html>

13
vendor/github.com/StackExchange/wmi/README.md generated vendored Normal file
View file

@ -0,0 +1,13 @@
wmi
===
Package wmi provides a WQL interface to Windows WMI.
Note: It interfaces with WMI on the local machine, therefore it only runs on Windows.
---
NOTE: This project is no longer being actively maintained. If you would like
to become its new owner, please contact tlimoncelli at stack over flow dot com.
---

View file

@ -1,4 +1,3 @@
//go:build windows
// +build windows
package wmi

View file

@ -1,4 +1,3 @@
//go:build windows
// +build windows
/*
@ -21,6 +20,7 @@ Example code to print names of running processes:
println(i, v.Name)
}
}
*/
package wmi
@ -338,6 +338,11 @@ func (c *Client) loadEntity(dst interface{}, src *ole.IDispatch) (errFieldMismat
f := v.Field(i)
of := f
isPtr := f.Kind() == reflect.Ptr
if isPtr {
ptr := reflect.New(f.Type().Elem())
f.Set(ptr)
f = f.Elem()
}
n := v.Type().Field(i).Name
if n[0] < 'A' || n[0] > 'Z' {
continue
@ -362,12 +367,6 @@ func (c *Client) loadEntity(dst interface{}, src *ole.IDispatch) (errFieldMismat
}
defer prop.Clear()
if isPtr && !(c.PtrNil && prop.VT == 0x1) {
ptr := reflect.New(f.Type().Elem())
f.Set(ptr)
f = f.Elem()
}
if prop.VT == 0x1 { //VT_NULL
continue
}
@ -456,18 +455,6 @@ func (c *Client) loadEntity(dst interface{}, src *ole.IDispatch) (errFieldMismat
Reason: "not a Float32",
}
}
case float64:
switch f.Kind() {
case reflect.Float32, reflect.Float64:
f.SetFloat(val)
default:
return &ErrFieldMismatch{
StructType: of.Type(),
FieldName: n,
Reason: "not a Float64",
}
}
default:
if f.Kind() == reflect.Slice {
switch f.Type().Elem().Kind() {

View file

@ -185,9 +185,7 @@ func invoke(disp *IDispatch, dispid int32, dispatch int16, params ...interface{}
uintptr(unsafe.Pointer(&excepInfo)),
0)
if hr != 0 {
excepInfo.renderStrings()
excepInfo.Clear()
err = NewErrorWithSubError(hr, excepInfo.description, excepInfo)
err = NewErrorWithSubError(hr, BstrToString(excepInfo.bstrDescription), excepInfo)
}
for i, varg := range vargs {
n := len(params) - i - 1

View file

@ -3,7 +3,6 @@ package ole
import (
"fmt"
"strings"
"unsafe"
)
// DISPPARAMS are the arguments that passed to methods or property.
@ -25,56 +24,6 @@ type EXCEPINFO struct {
pvReserved uintptr
pfnDeferredFillIn uintptr
scode uint32
// Go-specific part. Don't move upper cos it'll break structure layout for native code.
rendered bool
source string
description string
helpFile string
}
// renderStrings translates BSTR strings to Go ones so `.Error` and `.String`
// could be safely called after `.Clear`. We need this when we can't rely on
// a caller to call `.Clear`.
func (e *EXCEPINFO) renderStrings() {
e.rendered = true
if e.bstrSource == nil {
e.source = "<nil>"
} else {
e.source = BstrToString(e.bstrSource)
}
if e.bstrDescription == nil {
e.description = "<nil>"
} else {
e.description = BstrToString(e.bstrDescription)
}
if e.bstrHelpFile == nil {
e.helpFile = "<nil>"
} else {
e.helpFile = BstrToString(e.bstrHelpFile)
}
}
// Clear frees BSTR strings inside an EXCEPINFO and set it to NULL.
func (e *EXCEPINFO) Clear() {
freeBSTR := func(s *uint16) {
// SysFreeString don't return errors and is safe for call's on NULL.
// https://docs.microsoft.com/en-us/windows/win32/api/oleauto/nf-oleauto-sysfreestring
_ = SysFreeString((*int16)(unsafe.Pointer(s)))
}
if e.bstrSource != nil {
freeBSTR(e.bstrSource)
e.bstrSource = nil
}
if e.bstrDescription != nil {
freeBSTR(e.bstrDescription)
e.bstrDescription = nil
}
if e.bstrHelpFile != nil {
freeBSTR(e.bstrHelpFile)
e.bstrHelpFile = nil
}
}
// WCode return wCode in EXCEPINFO.
@ -89,30 +38,48 @@ func (e EXCEPINFO) SCODE() uint32 {
// String convert EXCEPINFO to string.
func (e EXCEPINFO) String() string {
if !e.rendered {
e.renderStrings()
var src, desc, hlp string
if e.bstrSource == nil {
src = "<nil>"
} else {
src = BstrToString(e.bstrSource)
}
if e.bstrDescription == nil {
desc = "<nil>"
} else {
desc = BstrToString(e.bstrDescription)
}
if e.bstrHelpFile == nil {
hlp = "<nil>"
} else {
hlp = BstrToString(e.bstrHelpFile)
}
return fmt.Sprintf(
"wCode: %#x, bstrSource: %v, bstrDescription: %v, bstrHelpFile: %v, dwHelpContext: %#x, scode: %#x",
e.wCode, e.source, e.description, e.helpFile, e.dwHelpContext, e.scode,
e.wCode, src, desc, hlp, e.dwHelpContext, e.scode,
)
}
// Error implements error interface and returns error string.
func (e EXCEPINFO) Error() string {
if !e.rendered {
e.renderStrings()
if e.bstrDescription != nil {
return strings.TrimSpace(BstrToString(e.bstrDescription))
}
if e.description != "<nil>" {
return strings.TrimSpace(e.description)
src := "Unknown"
if e.bstrSource != nil {
src = BstrToString(e.bstrSource)
}
code := e.scode
if e.wCode != 0 {
code = uint32(e.wCode)
}
return fmt.Sprintf("%v: %#x", e.source, code)
return fmt.Sprintf("%v: %#x", src, code)
}
// PARAMDATA defines parameter data type.

View file

@ -84,13 +84,13 @@ func (sac *SafeArrayConversion) ToValueArray() (values []interface{}) {
safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v))
values[i] = v
case VT_BSTR:
v , _ := safeArrayGetElementString(sac.Array, i)
var v string
safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v))
values[i] = v
case VT_VARIANT:
var v VARIANT
safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v))
values[i] = v.Value()
v.Clear()
default:
// TODO
}

View file

@ -1,11 +0,0 @@
// +build arm
package ole
type VARIANT struct {
VT VT // 2
wReserved1 uint16 // 4
wReserved2 uint16 // 6
wReserved3 uint16 // 8
Val int64 // 16
}

View file

@ -1,13 +0,0 @@
//go:build arm64
// +build arm64
package ole
type VARIANT struct {
VT VT // 2
wReserved1 uint16 // 4
wReserved2 uint16 // 6
wReserved3 uint16 // 8
Val int64 // 16
_ [8]byte // 24
}

View file

@ -1,22 +0,0 @@
// +build windows,arm
package ole
import (
"errors"
"syscall"
"time"
"unsafe"
)
// GetVariantDate converts COM Variant Time value to Go time.Time.
func GetVariantDate(value uint64) (time.Time, error) {
var st syscall.Systemtime
v1 := uint32(value)
v2 := uint32(value >> 32)
r, _, _ := procVariantTimeToSystemTime.Call(uintptr(v1), uintptr(v2), uintptr(unsafe.Pointer(&st)))
if r != 0 {
return time.Date(int(st.Year), time.Month(st.Month), int(st.Day), int(st.Hour), int(st.Minute), int(st.Second), int(st.Milliseconds/1000), time.UTC), nil
}
return time.Now(), errors.New("Could not convert to time, passing current time.")
}

View file

@ -1,23 +0,0 @@
//go:build windows && arm64
// +build windows,arm64
package ole
import (
"errors"
"syscall"
"time"
"unsafe"
)
// GetVariantDate converts COM Variant Time value to Go time.Time.
func GetVariantDate(value uint64) (time.Time, error) {
var st syscall.Systemtime
v1 := uint32(value)
v2 := uint32(value >> 32)
r, _, _ := procVariantTimeToSystemTime.Call(uintptr(v1), uintptr(v2), uintptr(unsafe.Pointer(&st)))
if r != 0 {
return time.Date(int(st.Year), time.Month(st.Month), int(st.Day), int(st.Hour), int(st.Minute), int(st.Second), int(st.Milliseconds/1000), time.UTC), nil
}
return time.Now(), errors.New("Could not convert to time, passing current time.")
}

View file

@ -1,4 +1,3 @@
//go:build (darwin || freebsd || netbsd || openbsd) && !cgo
// +build darwin freebsd netbsd openbsd
// +build !cgo

View file

@ -1,4 +1,3 @@
//go:build !windows && cgo
// +build !windows,cgo
package loadavg

View file

@ -1,4 +1,3 @@
//go:build linux && !cgo
// +build linux,!cgo
package loadavg

View file

@ -1,4 +1,3 @@
//go:build windows
// +build windows
package loadavg

View file

@ -1,4 +1,3 @@
//go:build darwin
// +build darwin
package memory
@ -33,8 +32,7 @@ func Get() (*Stats, error) {
}
memory, err := collectMemoryStats(out)
if err != nil {
// it is needed to cleanup the process, but its result is not needed.
go cmd.Wait() //nolint:errcheck
go cmd.Wait()
return nil, err
}
if err := cmd.Wait(); err != nil {
@ -63,16 +61,16 @@ type Stats struct {
}
// References:
// - https://support.apple.com/guide/activity-monitor/view-memory-usage-actmntr1004/10.14/mac/11.0
// - https://opensource.apple.com/source/system_cmds/system_cmds-880.60.2/vm_stat.tproj/
// - https://support.apple.com/en-us/HT201464#memory
// - https://developer.apple.com/library/content/documentation/Performance/Conceptual/ManagingMemoryStats/Articles/AboutMemoryStats.html
// - https://opensource.apple.com/source/system_cmds/system_cmds-790/vm_stat.tproj/
func collectMemoryStats(out io.Reader) (*Stats, error) {
scanner := bufio.NewScanner(out)
if !scanner.Scan() {
return nil, fmt.Errorf("failed to scan output of vm_stat")
}
line := scanner.Text()
var pageSize uint64
if _, err := fmt.Sscanf(line, "Mach Virtual Memory Statistics: (page size of %d bytes)", &pageSize); err != nil {
if !strings.HasPrefix(line, "Mach Virtual Memory Statistics:") {
return nil, fmt.Errorf("unexpected output of vm_stat: %s", line)
}
@ -97,7 +95,7 @@ func collectMemoryStats(out io.Reader) (*Stats, error) {
if ptr := memStats[line[:i]]; ptr != nil {
val := strings.TrimRight(strings.TrimSpace(line[i+1:]), ".")
if v, err := strconv.ParseUint(val, 10, 64); err == nil {
*ptr = v * pageSize
*ptr = v * 4096
}
}
}

View file

@ -1,4 +1,3 @@
//go:build freebsd
// +build freebsd
package memory

View file

@ -1,4 +1,3 @@
//go:build linux
// +build linux
package memory

View file

@ -1,4 +1,3 @@
//go:build !linux && !darwin && !windows && !freebsd
// +build !linux,!darwin,!windows,!freebsd
package memory

View file

@ -1,4 +1,3 @@
//go:build windows
// +build windows
package memory

View file

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2016 Yasuhiro Matsumoto
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View file

@ -1,48 +0,0 @@
# go-colorable
[![Build Status](https://github.com/mattn/go-colorable/workflows/test/badge.svg)](https://github.com/mattn/go-colorable/actions?query=workflow%3Atest)
[![Codecov](https://codecov.io/gh/mattn/go-colorable/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-colorable)
[![GoDoc](https://godoc.org/github.com/mattn/go-colorable?status.svg)](http://godoc.org/github.com/mattn/go-colorable)
[![Go Report Card](https://goreportcard.com/badge/mattn/go-colorable)](https://goreportcard.com/report/mattn/go-colorable)
Colorable writer for windows.
For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.)
This package is possible to handle escape sequence for ansi color on windows.
## Too Bad!
![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/bad.png)
## So Good!
![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/good.png)
## Usage
```go
logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true})
logrus.SetOutput(colorable.NewColorableStdout())
logrus.Info("succeeded")
logrus.Warn("not correct")
logrus.Error("something error")
logrus.Fatal("panic")
```
You can compile above code on non-windows OSs.
## Installation
```
$ go get github.com/mattn/go-colorable
```
# License
MIT
# Author
Yasuhiro Matsumoto (a.k.a mattn)

View file

@ -1,38 +0,0 @@
//go:build appengine
// +build appengine
package colorable
import (
"io"
"os"
_ "github.com/mattn/go-isatty"
)
// NewColorable returns new instance of Writer which handles escape sequence.
func NewColorable(file *os.File) io.Writer {
if file == nil {
panic("nil passed instead of *os.File to NewColorable()")
}
return file
}
// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout.
func NewColorableStdout() io.Writer {
return os.Stdout
}
// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr.
func NewColorableStderr() io.Writer {
return os.Stderr
}
// EnableColorsStdout enable colors if possible.
func EnableColorsStdout(enabled *bool) func() {
if enabled != nil {
*enabled = true
}
return func() {}
}

View file

@ -1,38 +0,0 @@
//go:build !windows && !appengine
// +build !windows,!appengine
package colorable
import (
"io"
"os"
_ "github.com/mattn/go-isatty"
)
// NewColorable returns new instance of Writer which handles escape sequence.
func NewColorable(file *os.File) io.Writer {
if file == nil {
panic("nil passed instead of *os.File to NewColorable()")
}
return file
}
// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout.
func NewColorableStdout() io.Writer {
return os.Stdout
}
// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr.
func NewColorableStderr() io.Writer {
return os.Stderr
}
// EnableColorsStdout enable colors if possible.
func EnableColorsStdout(enabled *bool) func() {
if enabled != nil {
*enabled = true
}
return func() {}
}

File diff suppressed because it is too large Load diff

View file

@ -1,12 +0,0 @@
#!/usr/bin/env bash
set -e
echo "" > coverage.txt
for d in $(go list ./... | grep -v vendor); do
go test -race -coverprofile=profile.out -covermode=atomic "$d"
if [ -f profile.out ]; then
cat profile.out >> coverage.txt
rm profile.out
fi
done

View file

@ -1,57 +0,0 @@
package colorable
import (
"bytes"
"io"
)
// NonColorable holds writer but removes escape sequence.
type NonColorable struct {
out io.Writer
}
// NewNonColorable returns new instance of Writer which removes escape sequence from Writer.
func NewNonColorable(w io.Writer) io.Writer {
return &NonColorable{out: w}
}
// Write writes data on console
func (w *NonColorable) Write(data []byte) (n int, err error) {
er := bytes.NewReader(data)
var plaintext bytes.Buffer
loop:
for {
c1, err := er.ReadByte()
if err != nil {
plaintext.WriteTo(w.out)
break loop
}
if c1 != 0x1b {
plaintext.WriteByte(c1)
continue
}
_, err = plaintext.WriteTo(w.out)
if err != nil {
break loop
}
c2, err := er.ReadByte()
if err != nil {
break loop
}
if c2 != 0x5b {
continue
}
for {
c, err := er.ReadByte()
if err != nil {
break loop
}
if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' {
break
}
}
}
return len(data), nil
}

View file

@ -1,9 +0,0 @@
Copyright (c) Yasuhiro MATSUMOTO <mattn.jp@gmail.com>
MIT License (Expat)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View file

@ -1,50 +0,0 @@
# go-isatty
[![Godoc Reference](https://godoc.org/github.com/mattn/go-isatty?status.svg)](http://godoc.org/github.com/mattn/go-isatty)
[![Codecov](https://codecov.io/gh/mattn/go-isatty/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-isatty)
[![Coverage Status](https://coveralls.io/repos/github/mattn/go-isatty/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-isatty?branch=master)
[![Go Report Card](https://goreportcard.com/badge/mattn/go-isatty)](https://goreportcard.com/report/mattn/go-isatty)
isatty for golang
## Usage
```go
package main
import (
"fmt"
"github.com/mattn/go-isatty"
"os"
)
func main() {
if isatty.IsTerminal(os.Stdout.Fd()) {
fmt.Println("Is Terminal")
} else if isatty.IsCygwinTerminal(os.Stdout.Fd()) {
fmt.Println("Is Cygwin/MSYS2 Terminal")
} else {
fmt.Println("Is Not Terminal")
}
}
```
## Installation
```
$ go get github.com/mattn/go-isatty
```
## License
MIT
## Author
Yasuhiro Matsumoto (a.k.a mattn)
## Thanks
* k-takata: base idea for IsCygwinTerminal
https://github.com/k-takata/go-iscygpty

View file

@ -1,2 +0,0 @@
// Package isatty implements interface to isatty
package isatty

View file

@ -1,12 +0,0 @@
#!/usr/bin/env bash
set -e
echo "" > coverage.txt
for d in $(go list ./... | grep -v vendor); do
go test -race -coverprofile=profile.out -covermode=atomic "$d"
if [ -f profile.out ]; then
cat profile.out >> coverage.txt
rm profile.out
fi
done

View file

@ -1,19 +0,0 @@
//go:build (darwin || freebsd || openbsd || netbsd || dragonfly || hurd) && !appengine
// +build darwin freebsd openbsd netbsd dragonfly hurd
// +build !appengine
package isatty
import "golang.org/x/sys/unix"
// IsTerminal return true if the file descriptor is terminal.
func IsTerminal(fd uintptr) bool {
_, err := unix.IoctlGetTermios(int(fd), unix.TIOCGETA)
return err == nil
}
// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
// terminal. This is also always false on this environment.
func IsCygwinTerminal(fd uintptr) bool {
return false
}

View file

@ -1,16 +0,0 @@
//go:build appengine || js || nacl || wasm
// +build appengine js nacl wasm
package isatty
// IsTerminal returns true if the file descriptor is terminal which
// is always false on js and appengine classic which is a sandboxed PaaS.
func IsTerminal(fd uintptr) bool {
return false
}
// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2
// terminal. This is also always false on this environment.
func IsCygwinTerminal(fd uintptr) bool {
return false
}

View file

@ -1,23 +0,0 @@
//go:build plan9
// +build plan9
package isatty
import (
"syscall"
)
// IsTerminal returns true if the given file descriptor is a terminal.
func IsTerminal(fd uintptr) bool {
path, err := syscall.Fd2path(int(fd))
if err != nil {
return false
}
return path == "/dev/cons" || path == "/mnt/term/dev/cons"
}
// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
// terminal. This is also always false on this environment.
func IsCygwinTerminal(fd uintptr) bool {
return false
}

View file

@ -1,21 +0,0 @@
//go:build solaris && !appengine
// +build solaris,!appengine
package isatty
import (
"golang.org/x/sys/unix"
)
// IsTerminal returns true if the given file descriptor is a terminal.
// see: https://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libc/port/gen/isatty.c
func IsTerminal(fd uintptr) bool {
_, err := unix.IoctlGetTermio(int(fd), unix.TCGETA)
return err == nil
}
// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
// terminal. This is also always false on this environment.
func IsCygwinTerminal(fd uintptr) bool {
return false
}

View file

@ -1,19 +0,0 @@
//go:build (linux || aix || zos) && !appengine
// +build linux aix zos
// +build !appengine
package isatty
import "golang.org/x/sys/unix"
// IsTerminal return true if the file descriptor is terminal.
func IsTerminal(fd uintptr) bool {
_, err := unix.IoctlGetTermios(int(fd), unix.TCGETS)
return err == nil
}
// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2
// terminal. This is also always false on this environment.
func IsCygwinTerminal(fd uintptr) bool {
return false
}

View file

@ -1,125 +0,0 @@
//go:build windows && !appengine
// +build windows,!appengine
package isatty
import (
"errors"
"strings"
"syscall"
"unicode/utf16"
"unsafe"
)
const (
objectNameInfo uintptr = 1
fileNameInfo = 2
fileTypePipe = 3
)
var (
kernel32 = syscall.NewLazyDLL("kernel32.dll")
ntdll = syscall.NewLazyDLL("ntdll.dll")
procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
procGetFileInformationByHandleEx = kernel32.NewProc("GetFileInformationByHandleEx")
procGetFileType = kernel32.NewProc("GetFileType")
procNtQueryObject = ntdll.NewProc("NtQueryObject")
)
func init() {
// Check if GetFileInformationByHandleEx is available.
if procGetFileInformationByHandleEx.Find() != nil {
procGetFileInformationByHandleEx = nil
}
}
// IsTerminal return true if the file descriptor is terminal.
func IsTerminal(fd uintptr) bool {
var st uint32
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0)
return r != 0 && e == 0
}
// Check pipe name is used for cygwin/msys2 pty.
// Cygwin/MSYS2 PTY has a name like:
// \{cygwin,msys}-XXXXXXXXXXXXXXXX-ptyN-{from,to}-master
func isCygwinPipeName(name string) bool {
token := strings.Split(name, "-")
if len(token) < 5 {
return false
}
if token[0] != `\msys` &&
token[0] != `\cygwin` &&
token[0] != `\Device\NamedPipe\msys` &&
token[0] != `\Device\NamedPipe\cygwin` {
return false
}
if token[1] == "" {
return false
}
if !strings.HasPrefix(token[2], "pty") {
return false
}
if token[3] != `from` && token[3] != `to` {
return false
}
if token[4] != "master" {
return false
}
return true
}
// getFileNameByHandle use the undocomented ntdll NtQueryObject to get file full name from file handler
// since GetFileInformationByHandleEx is not available under windows Vista and still some old fashion
// guys are using Windows XP, this is a workaround for those guys, it will also work on system from
// Windows vista to 10
// see https://stackoverflow.com/a/18792477 for details
func getFileNameByHandle(fd uintptr) (string, error) {
if procNtQueryObject == nil {
return "", errors.New("ntdll.dll: NtQueryObject not supported")
}
var buf [4 + syscall.MAX_PATH]uint16
var result int
r, _, e := syscall.Syscall6(procNtQueryObject.Addr(), 5,
fd, objectNameInfo, uintptr(unsafe.Pointer(&buf)), uintptr(2*len(buf)), uintptr(unsafe.Pointer(&result)), 0)
if r != 0 {
return "", e
}
return string(utf16.Decode(buf[4 : 4+buf[0]/2])), nil
}
// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2
// terminal.
func IsCygwinTerminal(fd uintptr) bool {
if procGetFileInformationByHandleEx == nil {
name, err := getFileNameByHandle(fd)
if err != nil {
return false
}
return isCygwinPipeName(name)
}
// Cygwin/msys's pty is a pipe.
ft, _, e := syscall.Syscall(procGetFileType.Addr(), 1, fd, 0, 0)
if ft != fileTypePipe || e != 0 {
return false
}
var buf [2 + syscall.MAX_PATH]uint16
r, _, e := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(),
4, fd, fileNameInfo, uintptr(unsafe.Pointer(&buf)),
uintptr(len(buf)*2), 0, 0)
if r == 0 || e != 0 {
return false
}
l := *(*uint32)(unsafe.Pointer(&buf))
return isCygwinPipeName(string(utf16.Decode(buf[2 : 2+l/2])))
}

View file

@ -1,6 +1,6 @@
# Zero Allocation JSON Logger
[![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/rs/zerolog) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/rs/zerolog/master/LICENSE) [![Build Status](https://github.com/rs/zerolog/actions/workflows/test.yml/badge.svg)](https://github.com/rs/zerolog/actions/workflows/test.yml) [![Go Coverage](https://github.com/rs/zerolog/wiki/coverage.svg)](https://raw.githack.com/wiki/rs/zerolog/coverage.html)
[![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/rs/zerolog) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/rs/zerolog/master/LICENSE) [![Build Status](https://travis-ci.org/rs/zerolog.svg?branch=master)](https://travis-ci.org/rs/zerolog) [![Coverage](http://gocover.io/_badge/github.com/rs/zerolog)](http://gocover.io/github.com/rs/zerolog)
The zerolog package provides a fast and simple logger dedicated to JSON output.
@ -24,7 +24,7 @@ Find out [who uses zerolog](https://github.com/rs/zerolog/wiki/Who-uses-zerolog)
* [Sampling](#log-sampling)
* [Hooks](#hooks)
* [Contextual fields](#contextual-logging)
* [`context.Context` integration](#contextcontext-integration)
* `context.Context` integration
* [Integration with `net/http`](#integration-with-nethttp)
* [JSON and CBOR encoding formats](#binary-encoding)
* [Pretty logging for development](#pretty-logging)
@ -60,7 +60,7 @@ func main() {
// Output: {"time":1516134303,"level":"debug","message":"hello world"}
```
> Note: By default log writes to `os.Stderr`
> Note: The default log level for `log.Print` is *trace*
> Note: The default log level for `log.Print` is *debug*
### Contextual Logging
@ -399,8 +399,6 @@ log.Logger = log.With().Str("foo", "bar").Logger()
### Add file and line number to log
Equivalent of `Llongfile`:
```go
log.Logger = log.With().Caller().Logger()
log.Info().Msg("hello world")
@ -408,21 +406,10 @@ log.Info().Msg("hello world")
// Output: {"level": "info", "message": "hello world", "caller": "/go/src/your_project/some_file:21"}
```
Equivalent of `Lshortfile`:
```go
zerolog.CallerMarshalFunc = func(pc uintptr, file string, line int) string {
return filepath.Base(file) + ":" + strconv.Itoa(line)
}
log.Logger = log.With().Caller().Logger()
log.Info().Msg("hello world")
// Output: {"level": "info", "message": "hello world", "caller": "some_file:21"}
```
### Thread-safe, lock-free, non-blocking writer
If your writer might be slow or not thread-safe and you need your log producers to never get slowed down by a slow writer, you can use a `diode.Writer` as follows:
If your writer might be slow or not thread-safe and you need your log producers to never get slowed down by a slow writer, you can use a `diode.Writer` as follow:
```go
wr := diode.NewWriter(os.Stdout, 1000, 10*time.Millisecond, func(missed int) {
@ -503,58 +490,6 @@ stdlog.Print("hello world")
// Output: {"foo":"bar","message":"hello world"}
```
### context.Context integration
Go contexts are commonly passed throughout Go code, and this can help you pass
your Logger into places it might otherwise be hard to inject. The `Logger`
instance may be attached to Go context (`context.Context`) using
`Logger.WithContext(ctx)` and extracted from it using `zerolog.Ctx(ctx)`.
For example:
```go
func f() {
logger := zerolog.New(os.Stdout)
ctx := context.Background()
// Attach the Logger to the context.Context
ctx = logger.WithContext(ctx)
someFunc(ctx)
}
func someFunc(ctx context.Context) {
// Get Logger from the go Context. if it's nil, then
// `zerolog.DefaultContextLogger` is returned, if
// `DefaultContextLogger` is nil, then a disabled logger is returned.
logger := zerolog.Ctx(ctx)
logger.Info().Msg("Hello")
}
```
A second form of `context.Context` integration allows you to pass the current
context.Context into the logged event, and retrieve it from hooks. This can be
useful to log trace and span IDs or other information stored in the go context,
and facilitates the unification of logging and tracing in some systems:
```go
type TracingHook struct{}
func (h TracingHook) Run(e *zerolog.Event, level zerolog.Level, msg string) {
ctx := e.GetCtx()
spanId := getSpanIdFromContext(ctx) // as per your tracing framework
e.Str("span-id", spanId)
}
func f() {
// Setup the logger
logger := zerolog.New(os.Stdout)
logger = logger.Hook(TracingHook{})
ctx := context.Background()
// Use the Ctx function to make the context available to the hook
logger.Info().Ctx(ctx).Msg("Hello")
}
```
### Integration with `net/http`
The `github.com/rs/zerolog/hlog` package provides some helpers to integrate zerolog with `http.Handler`.
@ -629,7 +564,7 @@ func main() {
## Global Settings
Some settings can be changed and will be applied to all loggers:
Some settings can be changed and will by applied to all loggers:
* `log.Logger`: You can set this value to customize the global logger (the one used by package level methods).
* `zerolog.SetGlobalLevel`: Can raise the minimum level of all loggers. Call this with `zerolog.Disabled` to disable logging altogether (quiet mode).
@ -638,14 +573,10 @@ Some settings can be changed and will be applied to all loggers:
* `zerolog.LevelFieldName`: Can be set to customize level field name.
* `zerolog.MessageFieldName`: Can be set to customize message field name.
* `zerolog.ErrorFieldName`: Can be set to customize `Err` field name.
* `zerolog.TimeFieldFormat`: Can be set to customize `Time` field value formatting. If set with `zerolog.TimeFormatUnix`, `zerolog.TimeFormatUnixMs` or `zerolog.TimeFormatUnixMicro`, times are formatted as UNIX timestamp.
* `zerolog.TimeFieldFormat`: Can be set to customize `Time` field value formatting. If set with `zerolog.TimeFormatUnix`, `zerolog.TimeFormatUnixMs` or `zerolog.TimeFormatUnixMicro`, times are formated as UNIX timestamp.
* `zerolog.DurationFieldUnit`: Can be set to customize the unit for time.Duration type fields added by `Dur` (default: `time.Millisecond`).
* `zerolog.DurationFieldInteger`: If set to `true`, `Dur` fields are formatted as integers instead of floats (default: `false`).
* `zerolog.ErrorHandler`: Called whenever zerolog fails to write an event on its output. If not set, an error is printed on the stderr. This handler must be thread safe and non-blocking.
* `zerolog.FloatingPointPrecision`: If set to a value other than -1, controls the number
of digits when formatting float numbers in JSON. See
[strconv.FormatFloat](https://pkg.go.dev/strconv#FormatFloat)
for more details.
## Field Types
@ -673,7 +604,7 @@ Most fields are also available in the slice format (`Strs` for `[]string`, `Errs
## Binary Encoding
In addition to the default JSON encoding, `zerolog` can produce binary logs using [CBOR](https://cbor.io) encoding. The choice of encoding can be decided at compile time using the build tag `binary_log` as follows:
In addition to the default JSON encoding, `zerolog` can produce binary logs using [CBOR](http://cbor.io) encoding. The choice of encoding can be decided at compile time using the build tag `binary_log` as follows:
```bash
go build -tags binary_log .
@ -686,11 +617,11 @@ with zerolog library is [CSD](https://github.com/toravir/csd/).
* [grpc-zerolog](https://github.com/cheapRoc/grpc-zerolog): Implementation of `grpclog.LoggerV2` interface using `zerolog`
* [overlog](https://github.com/Trendyol/overlog): Implementation of `Mapped Diagnostic Context` interface using `zerolog`
* [zerologr](https://github.com/go-logr/zerologr): Implementation of `logr.LogSink` interface using `zerolog`
* [zerologr](https://github.com/hn8/zerologr): Implementation of `logr.LogSink` interface using `zerolog`
## Benchmarks
See [logbench](http://bench.zerolog.io/) for more comprehensive and up-to-date benchmarks.
See [logbench](http://hackemist.com/logbench/) for more comprehensive and up-to-date benchmarks.
All operations are allocation free (those numbers *include* JSON encoding):
@ -751,8 +682,6 @@ Log a static string, without any context or `printf`-style templating:
## Caveats
### Field duplication
Note that zerolog does no de-duplication of fields. Using the same key multiple times creates multiple keys in final JSON:
```go
@ -764,19 +693,3 @@ logger.Info().
```
In this case, many consumers will take the last value, but this is not guaranteed; check yours if in doubt.
### Concurrency safety
Be careful when calling UpdateContext. It is not concurrency safe. Use the With method to create a child logger:
```go
func handler(w http.ResponseWriter, r *http.Request) {
// Create a child logger for concurrency safety
logger := log.Logger.With().Logger()
// Add context fields, for example User-Agent from HTTP headers
logger.UpdateContext(func(c zerolog.Context) zerolog.Context {
...
})
}
```

View file

@ -49,7 +49,7 @@ func (*Array) MarshalZerologArray(*Array) {
func (a *Array) write(dst []byte) []byte {
dst = enc.AppendArrayStart(dst)
if len(a.buf) > 0 {
dst = append(dst, a.buf...)
dst = append(append(dst, a.buf...))
}
dst = enc.AppendArrayEnd(dst)
putArray(a)
@ -57,7 +57,7 @@ func (a *Array) write(dst []byte) []byte {
}
// Object marshals an object that implement the LogObjectMarshaler
// interface and appends it to the array.
// interface and append append it to the array.
func (a *Array) Object(obj LogObjectMarshaler) *Array {
e := Dict()
obj.MarshalZerologObject(e)
@ -67,19 +67,19 @@ func (a *Array) Object(obj LogObjectMarshaler) *Array {
return a
}
// Str appends the val as a string to the array.
// Str append append the val as a string to the array.
func (a *Array) Str(val string) *Array {
a.buf = enc.AppendString(enc.AppendArrayDelim(a.buf), val)
return a
}
// Bytes appends the val as a string to the array.
// Bytes append append the val as a string to the array.
func (a *Array) Bytes(val []byte) *Array {
a.buf = enc.AppendBytes(enc.AppendArrayDelim(a.buf), val)
return a
}
// Hex appends the val as a hex string to the array.
// Hex append append the val as a hex string to the array.
func (a *Array) Hex(val []byte) *Array {
a.buf = enc.AppendHex(enc.AppendArrayDelim(a.buf), val)
return a
@ -115,97 +115,97 @@ func (a *Array) Err(err error) *Array {
return a
}
// Bool appends the val as a bool to the array.
// Bool append append the val as a bool to the array.
func (a *Array) Bool(b bool) *Array {
a.buf = enc.AppendBool(enc.AppendArrayDelim(a.buf), b)
return a
}
// Int appends i as a int to the array.
// Int append append i as a int to the array.
func (a *Array) Int(i int) *Array {
a.buf = enc.AppendInt(enc.AppendArrayDelim(a.buf), i)
return a
}
// Int8 appends i as a int8 to the array.
// Int8 append append i as a int8 to the array.
func (a *Array) Int8(i int8) *Array {
a.buf = enc.AppendInt8(enc.AppendArrayDelim(a.buf), i)
return a
}
// Int16 appends i as a int16 to the array.
// Int16 append append i as a int16 to the array.
func (a *Array) Int16(i int16) *Array {
a.buf = enc.AppendInt16(enc.AppendArrayDelim(a.buf), i)
return a
}
// Int32 appends i as a int32 to the array.
// Int32 append append i as a int32 to the array.
func (a *Array) Int32(i int32) *Array {
a.buf = enc.AppendInt32(enc.AppendArrayDelim(a.buf), i)
return a
}
// Int64 appends i as a int64 to the array.
// Int64 append append i as a int64 to the array.
func (a *Array) Int64(i int64) *Array {
a.buf = enc.AppendInt64(enc.AppendArrayDelim(a.buf), i)
return a
}
// Uint appends i as a uint to the array.
// Uint append append i as a uint to the array.
func (a *Array) Uint(i uint) *Array {
a.buf = enc.AppendUint(enc.AppendArrayDelim(a.buf), i)
return a
}
// Uint8 appends i as a uint8 to the array.
// Uint8 append append i as a uint8 to the array.
func (a *Array) Uint8(i uint8) *Array {
a.buf = enc.AppendUint8(enc.AppendArrayDelim(a.buf), i)
return a
}
// Uint16 appends i as a uint16 to the array.
// Uint16 append append i as a uint16 to the array.
func (a *Array) Uint16(i uint16) *Array {
a.buf = enc.AppendUint16(enc.AppendArrayDelim(a.buf), i)
return a
}
// Uint32 appends i as a uint32 to the array.
// Uint32 append append i as a uint32 to the array.
func (a *Array) Uint32(i uint32) *Array {
a.buf = enc.AppendUint32(enc.AppendArrayDelim(a.buf), i)
return a
}
// Uint64 appends i as a uint64 to the array.
// Uint64 append append i as a uint64 to the array.
func (a *Array) Uint64(i uint64) *Array {
a.buf = enc.AppendUint64(enc.AppendArrayDelim(a.buf), i)
return a
}
// Float32 appends f as a float32 to the array.
// Float32 append append f as a float32 to the array.
func (a *Array) Float32(f float32) *Array {
a.buf = enc.AppendFloat32(enc.AppendArrayDelim(a.buf), f, FloatingPointPrecision)
a.buf = enc.AppendFloat32(enc.AppendArrayDelim(a.buf), f)
return a
}
// Float64 appends f as a float64 to the array.
// Float64 append append f as a float64 to the array.
func (a *Array) Float64(f float64) *Array {
a.buf = enc.AppendFloat64(enc.AppendArrayDelim(a.buf), f, FloatingPointPrecision)
a.buf = enc.AppendFloat64(enc.AppendArrayDelim(a.buf), f)
return a
}
// Time appends t formatted as string using zerolog.TimeFieldFormat.
// Time append append t formated as string using zerolog.TimeFieldFormat.
func (a *Array) Time(t time.Time) *Array {
a.buf = enc.AppendTime(enc.AppendArrayDelim(a.buf), t, TimeFieldFormat)
return a
}
// Dur appends d to the array.
// Dur append append d to the array.
func (a *Array) Dur(d time.Duration) *Array {
a.buf = enc.AppendDuration(enc.AppendArrayDelim(a.buf), d, DurationFieldUnit, DurationFieldInteger, FloatingPointPrecision)
a.buf = enc.AppendDuration(enc.AppendArrayDelim(a.buf), d, DurationFieldUnit, DurationFieldInteger)
return a
}
// Interface appends i marshaled using reflection.
// Interface append append i marshaled using reflection.
func (a *Array) Interface(i interface{}) *Array {
if obj, ok := i.(LogObjectMarshaler); ok {
return a.Object(obj)
@ -231,10 +231,3 @@ func (a *Array) MACAddr(ha net.HardwareAddr) *Array {
a.buf = enc.AppendMACAddr(enc.AppendArrayDelim(a.buf), ha)
return a
}
// Dict adds the dict Event to the array
func (a *Array) Dict(dict *Event) *Array {
dict.buf = enc.AppendEndMarker(dict.buf)
a.buf = append(enc.AppendArrayDelim(a.buf), dict.buf...)
return a
}

View file

@ -12,8 +12,6 @@ import (
"strings"
"sync"
"time"
"github.com/mattn/go-colorable"
)
const (
@ -28,8 +26,6 @@ const (
colorBold = 1
colorDarkGray = 90
unknownLevel = "???"
)
var (
@ -59,24 +55,12 @@ type ConsoleWriter struct {
// TimeFormat specifies the format for timestamp in output.
TimeFormat string
// TimeLocation tells ConsoleWriters default FormatTimestamp
// how to localize the time.
TimeLocation *time.Location
// PartsOrder defines the order of parts in output.
PartsOrder []string
// PartsExclude defines parts to not display in output.
PartsExclude []string
// FieldsOrder defines the order of contextual fields in output.
FieldsOrder []string
fieldIsOrdered map[string]int
// FieldsExclude defines contextual fields to not display in output.
FieldsExclude []string
FormatTimestamp Formatter
FormatLevel Formatter
FormatCaller Formatter
@ -85,10 +69,6 @@ type ConsoleWriter struct {
FormatFieldValue Formatter
FormatErrFieldName Formatter
FormatErrFieldValue Formatter
FormatExtra func(map[string]interface{}, *bytes.Buffer) error
FormatPrepare func(map[string]interface{}) error
}
// NewConsoleWriter creates and initializes a new ConsoleWriter.
@ -103,21 +83,11 @@ func NewConsoleWriter(options ...func(w *ConsoleWriter)) ConsoleWriter {
opt(&w)
}
// Fix color on Windows
if w.Out == os.Stdout || w.Out == os.Stderr {
w.Out = colorable.NewColorable(w.Out.(*os.File))
}
return w
}
// Write transforms the JSON input with formatters and appends to w.Out.
func (w ConsoleWriter) Write(p []byte) (n int, err error) {
// Fix color on Windows
if w.Out == os.Stdout || w.Out == os.Stderr {
w.Out = colorable.NewColorable(w.Out.(*os.File))
}
if w.PartsOrder == nil {
w.PartsOrder = consoleDefaultPartsOrder()
}
@ -137,74 +107,33 @@ func (w ConsoleWriter) Write(p []byte) (n int, err error) {
return n, fmt.Errorf("cannot decode event: %s", err)
}
if w.FormatPrepare != nil {
err = w.FormatPrepare(evt)
if err != nil {
return n, err
}
}
for _, p := range w.PartsOrder {
w.writePart(buf, evt, p)
}
w.writeFields(evt, buf)
if w.FormatExtra != nil {
err = w.FormatExtra(evt, buf)
if err != nil {
return n, err
}
}
err = buf.WriteByte('\n')
if err != nil {
return n, err
}
_, err = buf.WriteTo(w.Out)
return len(p), err
}
// Call the underlying writer's Close method if it is an io.Closer. Otherwise
// does nothing.
func (w ConsoleWriter) Close() error {
if closer, ok := w.Out.(io.Closer); ok {
return closer.Close()
}
return nil
}
// writeFields appends formatted key-value pairs to buf.
func (w ConsoleWriter) writeFields(evt map[string]interface{}, buf *bytes.Buffer) {
var fields = make([]string, 0, len(evt))
for field := range evt {
var isExcluded bool
for _, excluded := range w.FieldsExclude {
if field == excluded {
isExcluded = true
break
}
}
if isExcluded {
continue
}
switch field {
case LevelFieldName, TimestampFieldName, MessageFieldName, CallerFieldName:
continue
}
fields = append(fields, field)
}
if len(w.FieldsOrder) > 0 {
w.orderFields(fields)
} else {
sort.Strings(fields)
}
// Write space only if something has already been written to the buffer, and if there are fields.
if buf.Len() > 0 && len(fields) > 0 {
if len(fields) > 0 {
buf.WriteByte(' ')
}
@ -265,7 +194,7 @@ func (w ConsoleWriter) writeFields(evt map[string]interface{}, buf *bytes.Buffer
case json.Number:
buf.WriteString(fv(fValue))
default:
b, err := InterfaceMarshalFunc(fValue)
b, err := json.Marshal(fValue)
if err != nil {
fmt.Fprintf(buf, colorize("[error: %v]", colorRed, w.NoColor), err)
} else {
@ -300,13 +229,13 @@ func (w ConsoleWriter) writePart(buf *bytes.Buffer, evt map[string]interface{},
}
case TimestampFieldName:
if w.FormatTimestamp == nil {
f = consoleDefaultFormatTimestamp(w.TimeFormat, w.TimeLocation, w.NoColor)
f = consoleDefaultFormatTimestamp(w.TimeFormat, w.NoColor)
} else {
f = w.FormatTimestamp
}
case MessageFieldName:
if w.FormatMessage == nil {
f = consoleDefaultFormatMessage(w.NoColor, evt[LevelFieldName])
f = consoleDefaultFormatMessage
} else {
f = w.FormatMessage
}
@ -327,37 +256,11 @@ func (w ConsoleWriter) writePart(buf *bytes.Buffer, evt map[string]interface{},
var s = f(evt[p])
if len(s) > 0 {
if buf.Len() > 0 {
buf.WriteByte(' ') // Write space only if not the first part
}
buf.WriteString(s)
}
}
// orderFields takes an array of field names and an array representing field order
// and returns an array with any ordered fields at the beginning, in order,
// and the remaining fields after in their original order.
func (w ConsoleWriter) orderFields(fields []string) {
if w.fieldIsOrdered == nil {
w.fieldIsOrdered = make(map[string]int)
for i, fieldName := range w.FieldsOrder {
w.fieldIsOrdered[fieldName] = i
if p != w.PartsOrder[len(w.PartsOrder)-1] { // Skip space for last part
buf.WriteByte(' ')
}
}
sort.Slice(fields, func(i, j int) bool {
ii, iOrdered := w.fieldIsOrdered[fields[i]]
jj, jOrdered := w.fieldIsOrdered[fields[j]]
if iOrdered && jOrdered {
return ii < jj
}
if iOrdered {
return true
}
if jOrdered {
return false
}
return fields[i] < fields[j]
})
}
// needsQuote returns true when the string s should be quoted in output.
@ -370,13 +273,8 @@ func needsQuote(s string) bool {
return false
}
// colorize returns the string s wrapped in ANSI code c, unless disabled is true or c is 0.
// colorize returns the string s wrapped in ANSI code c, unless disabled is true.
func colorize(s interface{}, c int, disabled bool) string {
e := os.Getenv("NO_COLOR")
if e != "" || c == 0 {
disabled = true
}
if disabled {
return fmt.Sprintf("%s", s)
}
@ -394,74 +292,72 @@ func consoleDefaultPartsOrder() []string {
}
}
func consoleDefaultFormatTimestamp(timeFormat string, location *time.Location, noColor bool) Formatter {
func consoleDefaultFormatTimestamp(timeFormat string, noColor bool) Formatter {
if timeFormat == "" {
timeFormat = consoleDefaultTimeFormat
}
if location == nil {
location = time.Local
}
return func(i interface{}) string {
t := "<nil>"
switch tt := i.(type) {
case string:
ts, err := time.ParseInLocation(TimeFieldFormat, tt, location)
ts, err := time.Parse(TimeFieldFormat, tt)
if err != nil {
t = tt
} else {
t = ts.In(location).Format(timeFormat)
t = ts.Format(timeFormat)
}
case json.Number:
i, err := tt.Int64()
if err != nil {
t = tt.String()
} else {
var sec, nsec int64
var sec, nsec int64 = i, 0
switch TimeFieldFormat {
case TimeFormatUnixNano:
sec, nsec = 0, i
case TimeFormatUnixMicro:
sec, nsec = 0, int64(time.Duration(i)*time.Microsecond)
case TimeFormatUnixMs:
sec, nsec = 0, int64(time.Duration(i)*time.Millisecond)
default:
sec, nsec = i, 0
nsec = int64(time.Duration(i) * time.Millisecond)
sec = 0
case TimeFormatUnixMicro:
nsec = int64(time.Duration(i) * time.Microsecond)
sec = 0
}
ts := time.Unix(sec, nsec)
t = ts.In(location).Format(timeFormat)
ts := time.Unix(sec, nsec).UTC()
t = ts.Format(timeFormat)
}
}
return colorize(t, colorDarkGray, noColor)
}
}
func stripLevel(ll string) string {
if len(ll) == 0 {
return unknownLevel
}
if len(ll) > 3 {
ll = ll[:3]
}
return strings.ToUpper(ll)
}
func consoleDefaultFormatLevel(noColor bool) Formatter {
return func(i interface{}) string {
var l string
if ll, ok := i.(string); ok {
level, _ := ParseLevel(ll)
fl, ok := FormattedLevels[level]
if ok {
return colorize(fl, LevelColors[level], noColor)
}
return stripLevel(ll)
switch ll {
case LevelTraceValue:
l = colorize("TRC", colorMagenta, noColor)
case LevelDebugValue:
l = colorize("DBG", colorYellow, noColor)
case LevelInfoValue:
l = colorize("INF", colorGreen, noColor)
case LevelWarnValue:
l = colorize("WRN", colorRed, noColor)
case LevelErrorValue:
l = colorize(colorize("ERR", colorRed, noColor), colorBold, noColor)
case LevelFatalValue:
l = colorize(colorize("FTL", colorRed, noColor), colorBold, noColor)
case LevelPanicValue:
l = colorize(colorize("PNC", colorRed, noColor), colorBold, noColor)
default:
l = colorize("???", colorBold, noColor)
}
} else {
if i == nil {
return unknownLevel
l = colorize("???", colorBold, noColor)
} else {
l = strings.ToUpper(fmt.Sprintf("%s", i))[0:3]
}
return stripLevel(fmt.Sprintf("%s", i))
}
return l
}
}
@ -483,18 +379,11 @@ func consoleDefaultFormatCaller(noColor bool) Formatter {
}
}
func consoleDefaultFormatMessage(noColor bool, level interface{}) Formatter {
return func(i interface{}) string {
if i == nil || i == "" {
func consoleDefaultFormatMessage(i interface{}) string {
if i == nil {
return ""
}
switch level {
case LevelInfoValue, LevelWarnValue, LevelErrorValue, LevelFatalValue, LevelPanicValue:
return colorize(fmt.Sprintf("%s", i), colorBold, noColor)
default:
return fmt.Sprintf("%s", i)
}
}
}
func consoleDefaultFormatFieldName(noColor bool) Formatter {
@ -509,12 +398,12 @@ func consoleDefaultFormatFieldValue(i interface{}) string {
func consoleDefaultFormatErrFieldName(noColor bool) Formatter {
return func(i interface{}) string {
return colorize(fmt.Sprintf("%s=", i), colorCyan, noColor)
return colorize(fmt.Sprintf("%s=", i), colorRed, noColor)
}
}
func consoleDefaultFormatErrFieldValue(noColor bool) Formatter {
return func(i interface{}) string {
return colorize(colorize(fmt.Sprintf("%s", i), colorBold, noColor), colorRed, noColor)
return colorize(fmt.Sprintf("%s", i), colorRed, noColor)
}
}

View file

@ -1,9 +1,8 @@
package zerolog
import (
"context"
"fmt"
"io"
"io/ioutil"
"math"
"net"
"time"
@ -23,7 +22,7 @@ func (c Context) Logger() Logger {
// Only map[string]interface{} and []interface{} are accepted. []interface{} must
// alternate string keys and arbitrary values, and extraneous ones are ignored.
func (c Context) Fields(fields interface{}) Context {
c.l.context = appendFields(c.l.context, fields, c.l.stack)
c.l.context = appendFields(c.l.context, fields)
return c
}
@ -57,7 +56,7 @@ func (c Context) Array(key string, arr LogArrayMarshaler) Context {
// Object marshals an object that implement the LogObjectMarshaler interface.
func (c Context) Object(key string, obj LogObjectMarshaler) Context {
e := newEvent(LevelWriterAdapter{io.Discard}, 0)
e := newEvent(levelWriterAdapter{ioutil.Discard}, 0)
e.Object(key, obj)
c.l.context = enc.AppendObjectData(c.l.context, e.buf)
putEvent(e)
@ -66,7 +65,7 @@ func (c Context) Object(key string, obj LogObjectMarshaler) Context {
// EmbedObject marshals and Embeds an object that implement the LogObjectMarshaler interface.
func (c Context) EmbedObject(obj LogObjectMarshaler) Context {
e := newEvent(LevelWriterAdapter{io.Discard}, 0)
e := newEvent(levelWriterAdapter{ioutil.Discard}, 0)
e.EmbedObject(obj)
c.l.context = enc.AppendObjectData(c.l.context, e.buf)
putEvent(e)
@ -163,34 +162,9 @@ func (c Context) Errs(key string, errs []error) Context {
// Err adds the field "error" with serialized err to the logger context.
func (c Context) Err(err error) Context {
if c.l.stack && ErrorStackMarshaler != nil {
switch m := ErrorStackMarshaler(err).(type) {
case nil:
case LogObjectMarshaler:
c = c.Object(ErrorStackFieldName, m)
case error:
if m != nil && !isNilValue(m) {
c = c.Str(ErrorStackFieldName, m.Error())
}
case string:
c = c.Str(ErrorStackFieldName, m)
default:
c = c.Interface(ErrorStackFieldName, m)
}
}
return c.AnErr(ErrorFieldName, err)
}
// Ctx adds the context.Context to the logger context. The context.Context is
// not rendered in the error message, but is made available for hooks to use.
// A typical use case is to extract tracing information from the
// context.Context.
func (c Context) Ctx(ctx context.Context) Context {
c.l.ctx = ctx
return c
}
// Bool adds the field key with val as a bool to the logger context.
func (c Context) Bool(key string, b bool) Context {
c.l.context = enc.AppendBool(enc.AppendKey(c.l.context, key), b)
@ -325,25 +299,25 @@ func (c Context) Uints64(key string, i []uint64) Context {
// Float32 adds the field key with f as a float32 to the logger context.
func (c Context) Float32(key string, f float32) Context {
c.l.context = enc.AppendFloat32(enc.AppendKey(c.l.context, key), f, FloatingPointPrecision)
c.l.context = enc.AppendFloat32(enc.AppendKey(c.l.context, key), f)
return c
}
// Floats32 adds the field key with f as a []float32 to the logger context.
func (c Context) Floats32(key string, f []float32) Context {
c.l.context = enc.AppendFloats32(enc.AppendKey(c.l.context, key), f, FloatingPointPrecision)
c.l.context = enc.AppendFloats32(enc.AppendKey(c.l.context, key), f)
return c
}
// Float64 adds the field key with f as a float64 to the logger context.
func (c Context) Float64(key string, f float64) Context {
c.l.context = enc.AppendFloat64(enc.AppendKey(c.l.context, key), f, FloatingPointPrecision)
c.l.context = enc.AppendFloat64(enc.AppendKey(c.l.context, key), f)
return c
}
// Floats64 adds the field key with f as a []float64 to the logger context.
func (c Context) Floats64(key string, f []float64) Context {
c.l.context = enc.AppendFloats64(enc.AppendKey(c.l.context, key), f, FloatingPointPrecision)
c.l.context = enc.AppendFloats64(enc.AppendKey(c.l.context, key), f)
return c
}
@ -355,9 +329,8 @@ func (ts timestampHook) Run(e *Event, level Level, msg string) {
var th = timestampHook{}
// Timestamp adds the current local time to the logger context with the "time" key, formatted using zerolog.TimeFieldFormat.
// Timestamp adds the current local time as UNIX timestamp to the logger context with the "time" key.
// To customize the key name, change zerolog.TimestampFieldName.
// To customize the time format, change zerolog.TimeFieldFormat.
//
// NOTE: It won't dedupe the "time" key if the *Context has one already.
func (c Context) Timestamp() Context {
@ -365,13 +338,13 @@ func (c Context) Timestamp() Context {
return c
}
// Time adds the field key with t formatted as string using zerolog.TimeFieldFormat.
// Time adds the field key with t formated as string using zerolog.TimeFieldFormat.
func (c Context) Time(key string, t time.Time) Context {
c.l.context = enc.AppendTime(enc.AppendKey(c.l.context, key), t, TimeFieldFormat)
return c
}
// Times adds the field key with t formatted as string using zerolog.TimeFieldFormat.
// Times adds the field key with t formated as string using zerolog.TimeFieldFormat.
func (c Context) Times(key string, t []time.Time) Context {
c.l.context = enc.AppendTimes(enc.AppendKey(c.l.context, key), t, TimeFieldFormat)
return c
@ -379,42 +352,22 @@ func (c Context) Times(key string, t []time.Time) Context {
// Dur adds the fields key with d divided by unit and stored as a float.
func (c Context) Dur(key string, d time.Duration) Context {
c.l.context = enc.AppendDuration(enc.AppendKey(c.l.context, key), d, DurationFieldUnit, DurationFieldInteger, FloatingPointPrecision)
c.l.context = enc.AppendDuration(enc.AppendKey(c.l.context, key), d, DurationFieldUnit, DurationFieldInteger)
return c
}
// Durs adds the fields key with d divided by unit and stored as a float.
func (c Context) Durs(key string, d []time.Duration) Context {
c.l.context = enc.AppendDurations(enc.AppendKey(c.l.context, key), d, DurationFieldUnit, DurationFieldInteger, FloatingPointPrecision)
c.l.context = enc.AppendDurations(enc.AppendKey(c.l.context, key), d, DurationFieldUnit, DurationFieldInteger)
return c
}
// Interface adds the field key with obj marshaled using reflection.
func (c Context) Interface(key string, i interface{}) Context {
if obj, ok := i.(LogObjectMarshaler); ok {
return c.Object(key, obj)
}
c.l.context = enc.AppendInterface(enc.AppendKey(c.l.context, key), i)
return c
}
// Type adds the field key with val's type using reflection.
func (c Context) Type(key string, val interface{}) Context {
c.l.context = enc.AppendType(enc.AppendKey(c.l.context, key), val)
return c
}
// Any is a wrapper around Context.Interface.
func (c Context) Any(key string, i interface{}) Context {
return c.Interface(key, i)
}
// Reset removes all the context fields.
func (c Context) Reset() Context {
c.l.context = enc.AppendBeginMarker(make([]byte, 0, 500))
return c
}
type callerHook struct {
callerSkipFrameCount int
}

23
vendor/github.com/rs/zerolog/ctx.go generated vendored
View file

@ -14,15 +14,10 @@ func init() {
type ctxKey struct{}
// WithContext returns a copy of ctx with the receiver attached. The Logger
// attached to the provided Context (if any) will not be effected. If the
// receiver's log level is Disabled it will only be attached to the returned
// Context if the provided Context has a previously attached Logger. If the
// provided Context has no attached Logger, a Disabled Logger will not be
// attached.
// WithContext returns a copy of ctx with l associated. If an instance of Logger
// is already in the context, the context is not updated.
//
// Note: to modify the existing Logger attached to a Context (instead of
// replacing it in a new Context), use UpdateContext with the following
// For instance, to add a field to an existing logger in the context, use this
// notation:
//
// ctx := r.Context()
@ -30,13 +25,17 @@ type ctxKey struct{}
// l.UpdateContext(func(c Context) Context {
// return c.Str("bar", "baz")
// })
//
func (l Logger) WithContext(ctx context.Context) context.Context {
if _, ok := ctx.Value(ctxKey{}).(*Logger); !ok && l.level == Disabled {
func (l *Logger) WithContext(ctx context.Context) context.Context {
if lp, ok := ctx.Value(ctxKey{}).(*Logger); ok {
if lp == l {
// Do not store same logger.
return ctx
}
} else if l.level == Disabled {
// Do not store disabled logger.
return ctx
}
return context.WithValue(ctx, ctxKey{}, &l)
return context.WithValue(ctx, ctxKey{}, l)
}
// Ctx returns the Logger associated with the ctx. If no logger

View file

@ -13,13 +13,13 @@ type encoder interface {
AppendBool(dst []byte, val bool) []byte
AppendBools(dst []byte, vals []bool) []byte
AppendBytes(dst, s []byte) []byte
AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool, precision int) []byte
AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool, precision int) []byte
AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool) []byte
AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool) []byte
AppendEndMarker(dst []byte) []byte
AppendFloat32(dst []byte, val float32, precision int) []byte
AppendFloat64(dst []byte, val float64, precision int) []byte
AppendFloats32(dst []byte, vals []float32, precision int) []byte
AppendFloats64(dst []byte, vals []float64, precision int) []byte
AppendFloat32(dst []byte, val float32) []byte
AppendFloat64(dst []byte, val float64) []byte
AppendFloats32(dst []byte, vals []float32) []byte
AppendFloats64(dst []byte, vals []float64) []byte
AppendHex(dst, s []byte) []byte
AppendIPAddr(dst []byte, ip net.IP) []byte
AppendIPPrefix(dst []byte, pfx net.IPNet) []byte

View file

@ -24,9 +24,6 @@ func init() {
func appendJSON(dst []byte, j []byte) []byte {
return cbor.AppendEmbeddedJSON(dst, j)
}
func appendCBOR(dst []byte, c []byte) []byte {
return cbor.AppendEmbeddedCBOR(dst, c)
}
// decodeIfBinaryToString - converts a binary formatted log msg to a
// JSON formatted String Log message.

View file

@ -6,7 +6,6 @@ package zerolog
// JSON encoded byte stream.
import (
"encoding/base64"
"github.com/rs/zerolog/internal/json"
)
@ -26,17 +25,6 @@ func init() {
func appendJSON(dst []byte, j []byte) []byte {
return append(dst, j...)
}
func appendCBOR(dst []byte, cbor []byte) []byte {
dst = append(dst, []byte("\"data:application/cbor;base64,")...)
l := len(dst)
enc := base64.StdEncoding
n := enc.EncodedLen(len(cbor))
for i := 0; i < n; i++ {
dst = append(dst, '.')
}
enc.Encode(dst[l:], cbor)
return append(dst, '"')
}
func decodeIfBinaryToString(in []byte) string {
return string(in)

View file

@ -1,7 +1,6 @@
package zerolog
import (
"context"
"fmt"
"net"
"os"
@ -28,7 +27,6 @@ type Event struct {
stack bool // enable error stack trace
ch []Hook // hooks from context
skipFrame int // The number of additional frames to skip when printing the caller.
ctx context.Context // Optional Go context for event
}
func putEvent(e *Event) {
@ -131,13 +129,6 @@ func (e *Event) Msgf(format string, v ...interface{}) {
e.msg(fmt.Sprintf(format, v...))
}
func (e *Event) MsgFunc(createMsg func() string) {
if e == nil {
return
}
e.msg(createMsg())
}
func (e *Event) msg(msg string) {
for _, hook := range e.ch {
hook.Run(e, e.level, msg)
@ -164,7 +155,7 @@ func (e *Event) Fields(fields interface{}) *Event {
if e == nil {
return e
}
e.buf = appendFields(e.buf, fields, e.stack)
e.buf = appendFields(e.buf, fields)
return e
}
@ -266,24 +257,18 @@ func (e *Event) Strs(key string, vals []string) *Event {
return e
}
// Stringer adds the field key with val.String() (or null if val is nil)
// to the *Event context.
// Stringer adds the field key with val.String() (or null if val is nil) to the *Event context.
func (e *Event) Stringer(key string, val fmt.Stringer) *Event {
if e == nil {
return e
}
e.buf = enc.AppendStringer(enc.AppendKey(e.buf, key), val)
return e
}
// Stringers adds the field key with vals where each individual val
// is used as val.String() (or null if val is empty) to the *Event
// context.
func (e *Event) Stringers(key string, vals []fmt.Stringer) *Event {
if e == nil {
if val != nil {
e.buf = enc.AppendString(enc.AppendKey(e.buf, key), val.String())
return e
}
e.buf = enc.AppendStringers(enc.AppendKey(e.buf, key), vals)
e.buf = enc.AppendInterface(enc.AppendKey(e.buf, key), nil)
return e
}
@ -320,18 +305,6 @@ func (e *Event) RawJSON(key string, b []byte) *Event {
return e
}
// RawCBOR adds already encoded CBOR to the log line under key.
//
// No sanity check is performed on b
// Note: The full featureset of CBOR is supported as data will not be mapped to json but stored as data-url
func (e *Event) RawCBOR(key string, b []byte) *Event {
if e == nil {
return e
}
e.buf = appendCBOR(enc.AppendKey(e.buf, key), b)
return e
}
// AnErr adds the field key with serialized err to the *Event context.
// If err is nil, no field is added.
func (e *Event) AnErr(key string, err error) *Event {
@ -419,28 +392,6 @@ func (e *Event) Stack() *Event {
return e
}
// Ctx adds the Go Context to the *Event context. The context is not rendered
// in the output message, but is available to hooks and to Func() calls via the
// GetCtx() accessor. A typical use case is to extract tracing information from
// the Go Ctx.
func (e *Event) Ctx(ctx context.Context) *Event {
if e != nil {
e.ctx = ctx
}
return e
}
// GetCtx retrieves the Go context.Context which is optionally stored in the
// Event. This allows Hooks and functions passed to Func() to retrieve values
// which are stored in the context.Context. This can be useful in tracing,
// where span information is commonly propagated in the context.Context.
func (e *Event) GetCtx() context.Context {
if e == nil || e.ctx == nil {
return context.Background()
}
return e.ctx
}
// Bool adds the field key with val as a bool to the *Event context.
func (e *Event) Bool(key string, b bool) *Event {
if e == nil {
@ -644,7 +595,7 @@ func (e *Event) Float32(key string, f float32) *Event {
if e == nil {
return e
}
e.buf = enc.AppendFloat32(enc.AppendKey(e.buf, key), f, FloatingPointPrecision)
e.buf = enc.AppendFloat32(enc.AppendKey(e.buf, key), f)
return e
}
@ -653,7 +604,7 @@ func (e *Event) Floats32(key string, f []float32) *Event {
if e == nil {
return e
}
e.buf = enc.AppendFloats32(enc.AppendKey(e.buf, key), f, FloatingPointPrecision)
e.buf = enc.AppendFloats32(enc.AppendKey(e.buf, key), f)
return e
}
@ -662,7 +613,7 @@ func (e *Event) Float64(key string, f float64) *Event {
if e == nil {
return e
}
e.buf = enc.AppendFloat64(enc.AppendKey(e.buf, key), f, FloatingPointPrecision)
e.buf = enc.AppendFloat64(enc.AppendKey(e.buf, key), f)
return e
}
@ -671,7 +622,7 @@ func (e *Event) Floats64(key string, f []float64) *Event {
if e == nil {
return e
}
e.buf = enc.AppendFloats64(enc.AppendKey(e.buf, key), f, FloatingPointPrecision)
e.buf = enc.AppendFloats64(enc.AppendKey(e.buf, key), f)
return e
}
@ -688,7 +639,7 @@ func (e *Event) Timestamp() *Event {
return e
}
// Time adds the field key with t formatted as string using zerolog.TimeFieldFormat.
// Time adds the field key with t formated as string using zerolog.TimeFieldFormat.
func (e *Event) Time(key string, t time.Time) *Event {
if e == nil {
return e
@ -697,7 +648,7 @@ func (e *Event) Time(key string, t time.Time) *Event {
return e
}
// Times adds the field key with t formatted as string using zerolog.TimeFieldFormat.
// Times adds the field key with t formated as string using zerolog.TimeFieldFormat.
func (e *Event) Times(key string, t []time.Time) *Event {
if e == nil {
return e
@ -713,7 +664,7 @@ func (e *Event) Dur(key string, d time.Duration) *Event {
if e == nil {
return e
}
e.buf = enc.AppendDuration(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger, FloatingPointPrecision)
e.buf = enc.AppendDuration(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger)
return e
}
@ -724,7 +675,7 @@ func (e *Event) Durs(key string, d []time.Duration) *Event {
if e == nil {
return e
}
e.buf = enc.AppendDurations(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger, FloatingPointPrecision)
e.buf = enc.AppendDurations(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger)
return e
}
@ -739,15 +690,10 @@ func (e *Event) TimeDiff(key string, t time.Time, start time.Time) *Event {
if t.After(start) {
d = t.Sub(start)
}
e.buf = enc.AppendDuration(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger, FloatingPointPrecision)
e.buf = enc.AppendDuration(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger)
return e
}
// Any is a wrapper around Event.Interface.
func (e *Event) Any(key string, i interface{}) *Event {
return e.Interface(key, i)
}
// Interface adds the field key with i marshaled using reflection.
func (e *Event) Interface(key string, i interface{}) *Event {
if e == nil {
@ -760,15 +706,6 @@ func (e *Event) Interface(key string, i interface{}) *Event {
return e
}
// Type adds the field key with val's type using reflection.
func (e *Event) Type(key string, val interface{}) *Event {
if e == nil {
return e
}
e.buf = enc.AppendType(enc.AppendKey(e.buf, key), val)
return e
}
// CallerSkipFrame instructs any future Caller calls to skip the specified number of frames.
// This includes those added via hooks from the context.
func (e *Event) CallerSkipFrame(skip int) *Event {
@ -794,11 +731,11 @@ func (e *Event) caller(skip int) *Event {
if e == nil {
return e
}
pc, file, line, ok := runtime.Caller(skip + e.skipFrame)
_, file, line, ok := runtime.Caller(skip + e.skipFrame)
if !ok {
return e
}
e.buf = enc.AppendString(enc.AppendKey(e.buf, CallerFieldName), CallerMarshalFunc(pc, file, line))
e.buf = enc.AppendString(enc.AppendKey(e.buf, CallerFieldName), CallerMarshalFunc(file, line))
return e
}

View file

@ -1,7 +0,0 @@
{"time":"5:41PM","level":"info","message":"Starting listener","listen":":8080","pid":37556}
{"time":"5:41PM","level":"debug","message":"Access","database":"myapp","host":"localhost:4962","pid":37556}
{"time":"5:41PM","level":"info","message":"Access","method":"GET","path":"/users","pid":37556,"resp_time":23}
{"time":"5:41PM","level":"info","message":"Access","method":"POST","path":"/posts","pid":37556,"resp_time":532}
{"time":"5:41PM","level":"warn","message":"Slow request","method":"POST","path":"/posts","pid":37556,"resp_time":532}
{"time":"5:41PM","level":"info","message":"Access","method":"GET","path":"/users","pid":37556,"resp_time":10}
{"time":"5:41PM","level":"error","message":"Database connection lost","database":"myapp","pid":37556,"error":"connection reset by peer"}

View file

@ -12,13 +12,13 @@ func isNilValue(i interface{}) bool {
return (*[2]uintptr)(unsafe.Pointer(&i))[1] == 0
}
func appendFields(dst []byte, fields interface{}, stack bool) []byte {
func appendFields(dst []byte, fields interface{}) []byte {
switch fields := fields.(type) {
case []interface{}:
if n := len(fields); n&0x1 == 1 { // odd number
fields = fields[:n-1]
}
dst = appendFieldList(dst, fields, stack)
dst = appendFieldList(dst, fields)
case map[string]interface{}:
keys := make([]string, 0, len(fields))
for key := range fields {
@ -28,13 +28,13 @@ func appendFields(dst []byte, fields interface{}, stack bool) []byte {
kv := make([]interface{}, 2)
for _, key := range keys {
kv[0], kv[1] = key, fields[key]
dst = appendFieldList(dst, kv, stack)
dst = appendFieldList(dst, kv)
}
}
return dst
}
func appendFieldList(dst []byte, kvList []interface{}, stack bool) []byte {
func appendFieldList(dst []byte, kvList []interface{}) []byte {
for i, n := 0, len(kvList); i < n; i += 2 {
key, val := kvList[i], kvList[i+1]
if key, ok := key.(string); ok {
@ -74,21 +74,6 @@ func appendFieldList(dst []byte, kvList []interface{}, stack bool) []byte {
default:
dst = enc.AppendInterface(dst, m)
}
if stack && ErrorStackMarshaler != nil {
dst = enc.AppendKey(dst, ErrorStackFieldName)
switch m := ErrorStackMarshaler(val).(type) {
case nil:
case error:
if m != nil && !isNilValue(m) {
dst = enc.AppendString(dst, m.Error())
}
case string:
dst = enc.AppendString(dst, m)
default:
dst = enc.AppendInterface(dst, m)
}
}
case []error:
dst = enc.AppendArrayStart(dst)
for i, err := range val {
@ -139,13 +124,13 @@ func appendFieldList(dst []byte, kvList []interface{}, stack bool) []byte {
case uint64:
dst = enc.AppendUint64(dst, val)
case float32:
dst = enc.AppendFloat32(dst, val, FloatingPointPrecision)
dst = enc.AppendFloat32(dst, val)
case float64:
dst = enc.AppendFloat64(dst, val, FloatingPointPrecision)
dst = enc.AppendFloat64(dst, val)
case time.Time:
dst = enc.AppendTime(dst, val, TimeFieldFormat)
case time.Duration:
dst = enc.AppendDuration(dst, val, DurationFieldUnit, DurationFieldInteger, FloatingPointPrecision)
dst = enc.AppendDuration(dst, val, DurationFieldUnit, DurationFieldInteger)
case *string:
if val != nil {
dst = enc.AppendString(dst, *val)
@ -220,13 +205,13 @@ func appendFieldList(dst []byte, kvList []interface{}, stack bool) []byte {
}
case *float32:
if val != nil {
dst = enc.AppendFloat32(dst, *val, FloatingPointPrecision)
dst = enc.AppendFloat32(dst, *val)
} else {
dst = enc.AppendNil(dst)
}
case *float64:
if val != nil {
dst = enc.AppendFloat64(dst, *val, FloatingPointPrecision)
dst = enc.AppendFloat64(dst, *val)
} else {
dst = enc.AppendNil(dst)
}
@ -238,7 +223,7 @@ func appendFieldList(dst []byte, kvList []interface{}, stack bool) []byte {
}
case *time.Duration:
if val != nil {
dst = enc.AppendDuration(dst, *val, DurationFieldUnit, DurationFieldInteger, FloatingPointPrecision)
dst = enc.AppendDuration(dst, *val, DurationFieldUnit, DurationFieldInteger)
} else {
dst = enc.AppendNil(dst)
}
@ -267,13 +252,13 @@ func appendFieldList(dst []byte, kvList []interface{}, stack bool) []byte {
case []uint64:
dst = enc.AppendUints64(dst, val)
case []float32:
dst = enc.AppendFloats32(dst, val, FloatingPointPrecision)
dst = enc.AppendFloats32(dst, val)
case []float64:
dst = enc.AppendFloats64(dst, val, FloatingPointPrecision)
dst = enc.AppendFloats64(dst, val)
case []time.Time:
dst = enc.AppendTimes(dst, val, TimeFieldFormat)
case []time.Duration:
dst = enc.AppendDurations(dst, val, DurationFieldUnit, DurationFieldInteger, FloatingPointPrecision)
dst = enc.AppendDurations(dst, val, DurationFieldUnit, DurationFieldInteger)
case nil:
dst = enc.AppendNil(dst)
case net.IP:

View file

@ -1,7 +1,6 @@
package zerolog
import (
"bytes"
"encoding/json"
"strconv"
"sync/atomic"
@ -20,10 +19,6 @@ const (
// TimeFormatUnixMicro defines a time format that makes time fields to be
// serialized as Unix timestamp integers in microseconds.
TimeFormatUnixMicro = "UNIXMICRO"
// TimeFormatUnixNano defines a time format that makes time fields to be
// serialized as Unix timestamp integers in nanoseconds.
TimeFormatUnixNano = "UNIXNANO"
)
var (
@ -66,7 +61,7 @@ var (
CallerSkipFrameCount = 2
// CallerMarshalFunc allows customization of global caller marshaling
CallerMarshalFunc = func(pc uintptr, file string, line int) string {
CallerMarshalFunc = func(file string, line int) string {
return file + ":" + strconv.Itoa(line)
}
@ -82,25 +77,11 @@ var (
}
// InterfaceMarshalFunc allows customization of interface marshaling.
// Default: "encoding/json.Marshal" with disabled HTML escaping
InterfaceMarshalFunc = func(v interface{}) ([]byte, error) {
var buf bytes.Buffer
encoder := json.NewEncoder(&buf)
encoder.SetEscapeHTML(false)
err := encoder.Encode(v)
if err != nil {
return nil, err
}
b := buf.Bytes()
if len(b) > 0 {
// Remove trailing \n which is added by Encode.
return b[:len(b)-1], nil
}
return b, nil
}
// Default: "encoding/json.Marshal"
InterfaceMarshalFunc = json.Marshal
// TimeFieldFormat defines the time format of the Time field type. If set to
// TimeFormatUnix, TimeFormatUnixMs, TimeFormatUnixMicro or TimeFormatUnixNano, the time is formatted as a UNIX
// TimeFormatUnix, TimeFormatUnixMs or TimeFormatUnixMicro, the time is formatted as an UNIX
// timestamp as integer.
TimeFieldFormat = time.RFC3339
@ -123,39 +104,6 @@ var (
// DefaultContextLogger is returned from Ctx() if there is no logger associated
// with the context.
DefaultContextLogger *Logger
// LevelColors are used by ConsoleWriter's consoleDefaultFormatLevel to color
// log levels.
LevelColors = map[Level]int{
TraceLevel: colorBlue,
DebugLevel: 0,
InfoLevel: colorGreen,
WarnLevel: colorYellow,
ErrorLevel: colorRed,
FatalLevel: colorRed,
PanicLevel: colorRed,
}
// FormattedLevels are used by ConsoleWriter's consoleDefaultFormatLevel
// for a short level name.
FormattedLevels = map[Level]string{
TraceLevel: "TRC",
DebugLevel: "DBG",
InfoLevel: "INF",
WarnLevel: "WRN",
ErrorLevel: "ERR",
FatalLevel: "FTL",
PanicLevel: "PNC",
}
// TriggerLevelWriterBufferReuseLimit is a limit in bytes that a buffer is dropped
// from the TriggerLevelWriter buffer pool if the buffer grows above the limit.
TriggerLevelWriterBufferReuseLimit = 64 * 1024
// FloatingPointPrecision, if set to a value other than -1, controls the number
// of digits when formatting float numbers in JSON. See strconv.FormatFloat for
// more details.
FloatingPointPrecision = -1
)
var (

View file

@ -27,7 +27,6 @@ const (
// Tag Sub-types.
additionalTypeTimestamp byte = 01
additionalTypeEmbeddedCBOR byte = 63
// Extended Tags - from https://www.iana.org/assignments/cbor-tags/cbor-tags.xhtml
additionalTypeTagNetworkAddr uint16 = 260
@ -68,7 +67,7 @@ const (
var IntegerTimeFieldFormat = time.RFC3339
// NanoTimeFieldFormat indicates the format of timestamp decoded
// from a float value (time in seconds and nanoseconds).
// from a float value (time in seconds and nano seconds).
var NanoTimeFieldFormat = time.RFC3339Nano
func appendCborTypePrefix(dst []byte, major byte, number uint64) []byte {
@ -92,8 +91,7 @@ func appendCborTypePrefix(dst []byte, major byte, number uint64) []byte {
minor = additionalTypeIntUint64
}
dst = append(dst, major|minor)
dst = append(dst, byte(major|minor))
byteCount--
for ; byteCount >= 0; byteCount-- {
dst = append(dst, byte(number>>(uint(byteCount)*8)))

View file

@ -5,7 +5,6 @@ package cbor
import (
"bufio"
"bytes"
"encoding/base64"
"fmt"
"io"
"math"
@ -44,7 +43,7 @@ func readByte(src *bufio.Reader) byte {
return b
}
func decodeIntAdditionalType(src *bufio.Reader, minor byte) int64 {
func decodeIntAdditonalType(src *bufio.Reader, minor byte) int64 {
val := int64(0)
if minor <= 23 {
val = int64(minor)
@ -78,7 +77,7 @@ func decodeInteger(src *bufio.Reader) int64 {
if major != majorTypeUnsignedInt && major != majorTypeNegativeInt {
panic(fmt.Errorf("Major type is: %d in decodeInteger!! (expected 0 or 1)", major))
}
val := decodeIntAdditionalType(src, minor)
val := decodeIntAdditonalType(src, minor)
if major == 0 {
return val
}
@ -95,7 +94,7 @@ func decodeFloat(src *bufio.Reader) (float64, int) {
switch minor {
case additionalTypeFloat16:
panic(fmt.Errorf("float16 is not supported in decodeFloat"))
panic(fmt.Errorf("float16 is not suppported in decodeFloat"))
case additionalTypeFloat32:
pb := readNBytes(src, 4)
@ -205,7 +204,7 @@ func decodeString(src *bufio.Reader, noQuotes bool) []byte {
if !noQuotes {
result = append(result, '"')
}
length := decodeIntAdditionalType(src, minor)
length := decodeIntAdditonalType(src, minor)
len := int(length)
pbs := readNBytes(src, len)
result = append(result, pbs...)
@ -214,31 +213,6 @@ func decodeString(src *bufio.Reader, noQuotes bool) []byte {
}
return append(result, '"')
}
func decodeStringToDataUrl(src *bufio.Reader, mimeType string) []byte {
pb := readByte(src)
major := pb & maskOutAdditionalType
minor := pb & maskOutMajorType
if major != majorTypeByteString {
panic(fmt.Errorf("Major type is: %d in decodeString", major))
}
length := decodeIntAdditionalType(src, minor)
l := int(length)
enc := base64.StdEncoding
lEnc := enc.EncodedLen(l)
result := make([]byte, len("\"data:;base64,\"")+len(mimeType)+lEnc)
dest := result
u := copy(dest, "\"data:")
dest = dest[u:]
u = copy(dest, mimeType)
dest = dest[u:]
u = copy(dest, ";base64,")
dest = dest[u:]
pbs := readNBytes(src, l)
enc.Encode(dest, pbs)
dest = dest[lEnc:]
dest[0] = '"'
return result
}
func decodeUTF8String(src *bufio.Reader) []byte {
pb := readByte(src)
@ -248,7 +222,7 @@ func decodeUTF8String(src *bufio.Reader) []byte {
panic(fmt.Errorf("Major type is: %d in decodeUTF8String", major))
}
result := []byte{'"'}
length := decodeIntAdditionalType(src, minor)
length := decodeIntAdditonalType(src, minor)
len := int(length)
pbs := readNBytes(src, len)
@ -264,7 +238,7 @@ func decodeUTF8String(src *bufio.Reader) []byte {
return append(dst, '"')
}
}
// The string has no need for encoding and therefore is directly
// The string has no need for encoding an therefore is directly
// appended to the byte slice.
result = append(result, pbs...)
return append(result, '"')
@ -283,7 +257,7 @@ func array2Json(src *bufio.Reader, dst io.Writer) {
if minor == additionalTypeInfiniteCount {
unSpecifiedCount = true
} else {
length := decodeIntAdditionalType(src, minor)
length := decodeIntAdditonalType(src, minor)
len = int(length)
}
for i := 0; unSpecifiedCount || i < len; i++ {
@ -292,7 +266,7 @@ func array2Json(src *bufio.Reader, dst io.Writer) {
if e != nil {
panic(e)
}
if pb[0] == majorTypeSimpleAndFloat|additionalTypeBreak {
if pb[0] == byte(majorTypeSimpleAndFloat|additionalTypeBreak) {
readByte(src)
break
}
@ -303,7 +277,7 @@ func array2Json(src *bufio.Reader, dst io.Writer) {
if e != nil {
panic(e)
}
if pb[0] == majorTypeSimpleAndFloat|additionalTypeBreak {
if pb[0] == byte(majorTypeSimpleAndFloat|additionalTypeBreak) {
readByte(src)
break
}
@ -327,7 +301,7 @@ func map2Json(src *bufio.Reader, dst io.Writer) {
if minor == additionalTypeInfiniteCount {
unSpecifiedCount = true
} else {
length := decodeIntAdditionalType(src, minor)
length := decodeIntAdditonalType(src, minor)
len = int(length)
}
dst.Write([]byte{'{'})
@ -337,7 +311,7 @@ func map2Json(src *bufio.Reader, dst io.Writer) {
if e != nil {
panic(e)
}
if pb[0] == majorTypeSimpleAndFloat|additionalTypeBreak {
if pb[0] == byte(majorTypeSimpleAndFloat|additionalTypeBreak) {
readByte(src)
break
}
@ -352,7 +326,7 @@ func map2Json(src *bufio.Reader, dst io.Writer) {
if e != nil {
panic(e)
}
if pb[0] == majorTypeSimpleAndFloat|additionalTypeBreak {
if pb[0] == byte(majorTypeSimpleAndFloat|additionalTypeBreak) {
readByte(src)
break
}
@ -375,24 +349,10 @@ func decodeTagData(src *bufio.Reader) []byte {
switch minor {
case additionalTypeTimestamp:
return decodeTimeStamp(src)
case additionalTypeIntUint8:
val := decodeIntAdditionalType(src, minor)
switch byte(val) {
case additionalTypeEmbeddedCBOR:
pb := readByte(src)
dataMajor := pb & maskOutAdditionalType
if dataMajor != majorTypeByteString {
panic(fmt.Errorf("Unsupported embedded Type: %d in decodeEmbeddedCBOR", dataMajor))
}
src.UnreadByte()
return decodeStringToDataUrl(src, "application/cbor")
default:
panic(fmt.Errorf("Unsupported Additional Tag Type: %d in decodeTagData", val))
}
// Tag value is larger than 256 (so uint16).
case additionalTypeIntUint16:
val := decodeIntAdditionalType(src, minor)
val := decodeIntAdditonalType(src, minor)
switch uint16(val) {
case additionalTypeEmbeddedJSON:
@ -423,7 +383,7 @@ func decodeTagData(src *bufio.Reader) []byte {
case additionalTypeTagNetworkPrefix:
pb := readByte(src)
if pb != majorTypeMap|0x1 {
if pb != byte(majorTypeMap|0x1) {
panic(fmt.Errorf("IP Prefix is NOT of MAP of 1 elements as expected"))
}
octets := decodeString(src, true)

View file

@ -1,14 +1,12 @@
package cbor
import "fmt"
// AppendStrings encodes and adds an array of strings to the dst byte array.
func (e Encoder) AppendStrings(dst []byte, vals []string) []byte {
major := majorTypeArray
l := len(vals)
if l <= additionalMax {
lb := byte(l)
dst = append(dst, major|lb)
dst = append(dst, byte(major|lb))
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
@ -25,38 +23,13 @@ func (Encoder) AppendString(dst []byte, s string) []byte {
l := len(s)
if l <= additionalMax {
lb := byte(l)
dst = append(dst, major|lb)
dst = append(dst, byte(major|lb))
} else {
dst = appendCborTypePrefix(dst, majorTypeUtf8String, uint64(l))
}
return append(dst, s...)
}
// AppendStringers encodes and adds an array of Stringer values
// to the dst byte array.
func (e Encoder) AppendStringers(dst []byte, vals []fmt.Stringer) []byte {
if len(vals) == 0 {
return e.AppendArrayEnd(e.AppendArrayStart(dst))
}
dst = e.AppendArrayStart(dst)
dst = e.AppendStringer(dst, vals[0])
if len(vals) > 1 {
for _, val := range vals[1:] {
dst = e.AppendStringer(dst, val)
}
}
return e.AppendArrayEnd(dst)
}
// AppendStringer encodes and adds the Stringer value to the dst
// byte array.
func (e Encoder) AppendStringer(dst []byte, val fmt.Stringer) []byte {
if val == nil {
return e.AppendNil(dst)
}
return e.AppendString(dst, val.String())
}
// AppendBytes encodes and adds an array of bytes to the dst byte array.
func (Encoder) AppendBytes(dst, s []byte) []byte {
major := majorTypeByteString
@ -64,7 +37,7 @@ func (Encoder) AppendBytes(dst, s []byte) []byte {
l := len(s)
if l <= additionalMax {
lb := byte(l)
dst = append(dst, major|lb)
dst = append(dst, byte(major|lb))
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
@ -77,7 +50,7 @@ func AppendEmbeddedJSON(dst, s []byte) []byte {
minor := additionalTypeEmbeddedJSON
// Append the TAG to indicate this is Embedded JSON.
dst = append(dst, major|additionalTypeIntUint16)
dst = append(dst, byte(major|additionalTypeIntUint16))
dst = append(dst, byte(minor>>8))
dst = append(dst, byte(minor&0xff))
@ -87,29 +60,7 @@ func AppendEmbeddedJSON(dst, s []byte) []byte {
l := len(s)
if l <= additionalMax {
lb := byte(l)
dst = append(dst, major|lb)
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
return append(dst, s...)
}
// AppendEmbeddedCBOR adds a tag and embeds input CBOR as such.
func AppendEmbeddedCBOR(dst, s []byte) []byte {
major := majorTypeTags
minor := additionalTypeEmbeddedCBOR
// Append the TAG to indicate this is Embedded JSON.
dst = append(dst, major|additionalTypeIntUint8)
dst = append(dst, minor)
// Append the CBOR Object as Byte String.
major = majorTypeByteString
l := len(s)
if l <= additionalMax {
lb := byte(l)
dst = append(dst, major|lb)
dst = append(dst, byte(major|lb))
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}

View file

@ -7,7 +7,7 @@ import (
func appendIntegerTimestamp(dst []byte, t time.Time) []byte {
major := majorTypeTags
minor := additionalTypeTimestamp
dst = append(dst, major|minor)
dst = append(dst, byte(major|minor))
secs := t.Unix()
var val uint64
if secs < 0 {
@ -17,19 +17,19 @@ func appendIntegerTimestamp(dst []byte, t time.Time) []byte {
major = majorTypeUnsignedInt
val = uint64(secs)
}
dst = appendCborTypePrefix(dst, major, val)
dst = appendCborTypePrefix(dst, major, uint64(val))
return dst
}
func (e Encoder) appendFloatTimestamp(dst []byte, t time.Time) []byte {
major := majorTypeTags
minor := additionalTypeTimestamp
dst = append(dst, major|minor)
dst = append(dst, byte(major|minor))
secs := t.Unix()
nanos := t.Nanosecond()
var val float64
val = float64(secs)*1.0 + float64(nanos)*1e-9
return e.AppendFloat64(dst, val, -1)
val = float64(secs)*1.0 + float64(nanos)*1E-9
return e.AppendFloat64(dst, val)
}
// AppendTime encodes and adds a timestamp to the dst byte array.
@ -50,7 +50,7 @@ func (e Encoder) AppendTimes(dst []byte, vals []time.Time, unused string) []byte
}
if l <= additionalMax {
lb := byte(l)
dst = append(dst, major|lb)
dst = append(dst, byte(major|lb))
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
@ -64,17 +64,17 @@ func (e Encoder) AppendTimes(dst []byte, vals []time.Time, unused string) []byte
// AppendDuration encodes and adds a duration to the dst byte array.
// useInt field indicates whether to store the duration as seconds (integer) or
// as seconds+nanoseconds (float).
func (e Encoder) AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool, unused int) []byte {
func (e Encoder) AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool) []byte {
if useInt {
return e.AppendInt64(dst, int64(d/unit))
}
return e.AppendFloat64(dst, float64(d)/float64(unit), unused)
return e.AppendFloat64(dst, float64(d)/float64(unit))
}
// AppendDurations encodes and adds an array of durations to the dst byte array.
// useInt field indicates whether to store the duration as seconds (integer) or
// as seconds+nanoseconds (float).
func (e Encoder) AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool, unused int) []byte {
func (e Encoder) AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool) []byte {
major := majorTypeArray
l := len(vals)
if l == 0 {
@ -82,12 +82,12 @@ func (e Encoder) AppendDurations(dst []byte, vals []time.Duration, unit time.Dur
}
if l <= additionalMax {
lb := byte(l)
dst = append(dst, major|lb)
dst = append(dst, byte(major|lb))
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
for _, d := range vals {
dst = e.AppendDuration(dst, d, unit, useInt, unused)
dst = e.AppendDuration(dst, d, unit, useInt)
}
return dst
}

View file

@ -4,22 +4,21 @@ import (
"fmt"
"math"
"net"
"reflect"
)
// AppendNil inserts a 'Nil' object into the dst byte array.
func (Encoder) AppendNil(dst []byte) []byte {
return append(dst, majorTypeSimpleAndFloat|additionalTypeNull)
return append(dst, byte(majorTypeSimpleAndFloat|additionalTypeNull))
}
// AppendBeginMarker inserts a map start into the dst byte array.
func (Encoder) AppendBeginMarker(dst []byte) []byte {
return append(dst, majorTypeMap|additionalTypeInfiniteCount)
return append(dst, byte(majorTypeMap|additionalTypeInfiniteCount))
}
// AppendEndMarker inserts a map end into the dst byte array.
func (Encoder) AppendEndMarker(dst []byte) []byte {
return append(dst, majorTypeSimpleAndFloat|additionalTypeBreak)
return append(dst, byte(majorTypeSimpleAndFloat|additionalTypeBreak))
}
// AppendObjectData takes an object in form of a byte array and appends to dst.
@ -31,12 +30,12 @@ func (Encoder) AppendObjectData(dst []byte, o []byte) []byte {
// AppendArrayStart adds markers to indicate the start of an array.
func (Encoder) AppendArrayStart(dst []byte) []byte {
return append(dst, majorTypeArray|additionalTypeInfiniteCount)
return append(dst, byte(majorTypeArray|additionalTypeInfiniteCount))
}
// AppendArrayEnd adds markers to indicate the end of an array.
func (Encoder) AppendArrayEnd(dst []byte) []byte {
return append(dst, majorTypeSimpleAndFloat|additionalTypeBreak)
return append(dst, byte(majorTypeSimpleAndFloat|additionalTypeBreak))
}
// AppendArrayDelim adds markers to indicate end of a particular array element.
@ -57,7 +56,7 @@ func (Encoder) AppendBool(dst []byte, val bool) []byte {
if val {
b = additionalTypeBoolTrue
}
return append(dst, majorTypeSimpleAndFloat|b)
return append(dst, byte(majorTypeSimpleAndFloat|b))
}
// AppendBools encodes and inserts an array of boolean values into the dst byte array.
@ -69,7 +68,7 @@ func (e Encoder) AppendBools(dst []byte, vals []bool) []byte {
}
if l <= additionalMax {
lb := byte(l)
dst = append(dst, major|lb)
dst = append(dst, byte(major|lb))
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
@ -89,7 +88,7 @@ func (Encoder) AppendInt(dst []byte, val int) []byte {
}
if contentVal <= additionalMax {
lb := byte(contentVal)
dst = append(dst, major|lb)
dst = append(dst, byte(major|lb))
} else {
dst = appendCborTypePrefix(dst, major, uint64(contentVal))
}
@ -105,7 +104,7 @@ func (e Encoder) AppendInts(dst []byte, vals []int) []byte {
}
if l <= additionalMax {
lb := byte(l)
dst = append(dst, major|lb)
dst = append(dst, byte(major|lb))
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
@ -129,7 +128,7 @@ func (e Encoder) AppendInts8(dst []byte, vals []int8) []byte {
}
if l <= additionalMax {
lb := byte(l)
dst = append(dst, major|lb)
dst = append(dst, byte(major|lb))
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
@ -153,7 +152,7 @@ func (e Encoder) AppendInts16(dst []byte, vals []int16) []byte {
}
if l <= additionalMax {
lb := byte(l)
dst = append(dst, major|lb)
dst = append(dst, byte(major|lb))
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
@ -177,7 +176,7 @@ func (e Encoder) AppendInts32(dst []byte, vals []int32) []byte {
}
if l <= additionalMax {
lb := byte(l)
dst = append(dst, major|lb)
dst = append(dst, byte(major|lb))
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
@ -197,7 +196,7 @@ func (Encoder) AppendInt64(dst []byte, val int64) []byte {
}
if contentVal <= additionalMax {
lb := byte(contentVal)
dst = append(dst, major|lb)
dst = append(dst, byte(major|lb))
} else {
dst = appendCborTypePrefix(dst, major, uint64(contentVal))
}
@ -213,7 +212,7 @@ func (e Encoder) AppendInts64(dst []byte, vals []int64) []byte {
}
if l <= additionalMax {
lb := byte(l)
dst = append(dst, major|lb)
dst = append(dst, byte(major|lb))
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
@ -237,7 +236,7 @@ func (e Encoder) AppendUints(dst []byte, vals []uint) []byte {
}
if l <= additionalMax {
lb := byte(l)
dst = append(dst, major|lb)
dst = append(dst, byte(major|lb))
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
@ -261,7 +260,7 @@ func (e Encoder) AppendUints8(dst []byte, vals []uint8) []byte {
}
if l <= additionalMax {
lb := byte(l)
dst = append(dst, major|lb)
dst = append(dst, byte(major|lb))
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
@ -285,7 +284,7 @@ func (e Encoder) AppendUints16(dst []byte, vals []uint16) []byte {
}
if l <= additionalMax {
lb := byte(l)
dst = append(dst, major|lb)
dst = append(dst, byte(major|lb))
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
@ -309,7 +308,7 @@ func (e Encoder) AppendUints32(dst []byte, vals []uint32) []byte {
}
if l <= additionalMax {
lb := byte(l)
dst = append(dst, major|lb)
dst = append(dst, byte(major|lb))
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
@ -325,9 +324,9 @@ func (Encoder) AppendUint64(dst []byte, val uint64) []byte {
contentVal := val
if contentVal <= additionalMax {
lb := byte(contentVal)
dst = append(dst, major|lb)
dst = append(dst, byte(major|lb))
} else {
dst = appendCborTypePrefix(dst, major, contentVal)
dst = appendCborTypePrefix(dst, major, uint64(contentVal))
}
return dst
}
@ -341,7 +340,7 @@ func (e Encoder) AppendUints64(dst []byte, vals []uint64) []byte {
}
if l <= additionalMax {
lb := byte(l)
dst = append(dst, major|lb)
dst = append(dst, byte(major|lb))
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
@ -352,7 +351,7 @@ func (e Encoder) AppendUints64(dst []byte, vals []uint64) []byte {
}
// AppendFloat32 encodes and inserts a single precision float value into the dst byte array.
func (Encoder) AppendFloat32(dst []byte, val float32, unused int) []byte {
func (Encoder) AppendFloat32(dst []byte, val float32) []byte {
switch {
case math.IsNaN(float64(val)):
return append(dst, "\xfa\x7f\xc0\x00\x00"...)
@ -368,11 +367,11 @@ func (Encoder) AppendFloat32(dst []byte, val float32, unused int) []byte {
for i := uint(0); i < 4; i++ {
buf[i] = byte(n >> ((3 - i) * 8))
}
return append(append(dst, major|subType), buf[0], buf[1], buf[2], buf[3])
return append(append(dst, byte(major|subType)), buf[0], buf[1], buf[2], buf[3])
}
// AppendFloats32 encodes and inserts an array of single precision float value into the dst byte array.
func (e Encoder) AppendFloats32(dst []byte, vals []float32, unused int) []byte {
func (e Encoder) AppendFloats32(dst []byte, vals []float32) []byte {
major := majorTypeArray
l := len(vals)
if l == 0 {
@ -380,18 +379,18 @@ func (e Encoder) AppendFloats32(dst []byte, vals []float32, unused int) []byte {
}
if l <= additionalMax {
lb := byte(l)
dst = append(dst, major|lb)
dst = append(dst, byte(major|lb))
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
for _, v := range vals {
dst = e.AppendFloat32(dst, v, unused)
dst = e.AppendFloat32(dst, v)
}
return dst
}
// AppendFloat64 encodes and inserts a double precision float value into the dst byte array.
func (Encoder) AppendFloat64(dst []byte, val float64, unused int) []byte {
func (Encoder) AppendFloat64(dst []byte, val float64) []byte {
switch {
case math.IsNaN(val):
return append(dst, "\xfb\x7f\xf8\x00\x00\x00\x00\x00\x00"...)
@ -403,7 +402,7 @@ func (Encoder) AppendFloat64(dst []byte, val float64, unused int) []byte {
major := majorTypeSimpleAndFloat
subType := additionalTypeFloat64
n := math.Float64bits(val)
dst = append(dst, major|subType)
dst = append(dst, byte(major|subType))
for i := uint(1); i <= 8; i++ {
b := byte(n >> ((8 - i) * 8))
dst = append(dst, b)
@ -412,7 +411,7 @@ func (Encoder) AppendFloat64(dst []byte, val float64, unused int) []byte {
}
// AppendFloats64 encodes and inserts an array of double precision float values into the dst byte array.
func (e Encoder) AppendFloats64(dst []byte, vals []float64, unused int) []byte {
func (e Encoder) AppendFloats64(dst []byte, vals []float64) []byte {
major := majorTypeArray
l := len(vals)
if l == 0 {
@ -420,12 +419,12 @@ func (e Encoder) AppendFloats64(dst []byte, vals []float64, unused int) []byte {
}
if l <= additionalMax {
lb := byte(l)
dst = append(dst, major|lb)
dst = append(dst, byte(major|lb))
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
for _, v := range vals {
dst = e.AppendFloat64(dst, v, unused)
dst = e.AppendFloat64(dst, v)
}
return dst
}
@ -439,17 +438,9 @@ func (e Encoder) AppendInterface(dst []byte, i interface{}) []byte {
return AppendEmbeddedJSON(dst, marshaled)
}
// AppendType appends the parameter type (as a string) to the input byte slice.
func (e Encoder) AppendType(dst []byte, i interface{}) []byte {
if i == nil {
return e.AppendString(dst, "<nil>")
}
return e.AppendString(dst, reflect.TypeOf(i).String())
}
// AppendIPAddr encodes and inserts an IP Address (IPv4 or IPv6).
func (e Encoder) AppendIPAddr(dst []byte, ip net.IP) []byte {
dst = append(dst, majorTypeTags|additionalTypeIntUint16)
dst = append(dst, byte(majorTypeTags|additionalTypeIntUint16))
dst = append(dst, byte(additionalTypeTagNetworkAddr>>8))
dst = append(dst, byte(additionalTypeTagNetworkAddr&0xff))
return e.AppendBytes(dst, ip)
@ -457,21 +448,21 @@ func (e Encoder) AppendIPAddr(dst []byte, ip net.IP) []byte {
// AppendIPPrefix encodes and inserts an IP Address Prefix (Address + Mask Length).
func (e Encoder) AppendIPPrefix(dst []byte, pfx net.IPNet) []byte {
dst = append(dst, majorTypeTags|additionalTypeIntUint16)
dst = append(dst, byte(majorTypeTags|additionalTypeIntUint16))
dst = append(dst, byte(additionalTypeTagNetworkPrefix>>8))
dst = append(dst, byte(additionalTypeTagNetworkPrefix&0xff))
// Prefix is a tuple (aka MAP of 1 pair of elements) -
// first element is prefix, second is mask length.
dst = append(dst, majorTypeMap|0x1)
dst = append(dst, byte(majorTypeMap|0x1))
dst = e.AppendBytes(dst, pfx.IP)
maskLen, _ := pfx.Mask.Size()
return e.AppendUint8(dst, uint8(maskLen))
}
// AppendMACAddr encodes and inserts a Hardware (MAC) address.
// AppendMACAddr encodes and inserts an Hardware (MAC) address.
func (e Encoder) AppendMACAddr(dst []byte, ha net.HardwareAddr) []byte {
dst = append(dst, majorTypeTags|additionalTypeIntUint16)
dst = append(dst, byte(majorTypeTags|additionalTypeIntUint16))
dst = append(dst, byte(additionalTypeTagNetworkAddr>>8))
dst = append(dst, byte(additionalTypeTagNetworkAddr&0xff))
return e.AppendBytes(dst, ha)
@ -479,7 +470,7 @@ func (e Encoder) AppendMACAddr(dst []byte, ha net.HardwareAddr) []byte {
// AppendHex adds a TAG and inserts a hex bytes as a string.
func (e Encoder) AppendHex(dst []byte, val []byte) []byte {
dst = append(dst, majorTypeTags|additionalTypeIntUint16)
dst = append(dst, byte(majorTypeTags|additionalTypeIntUint16))
dst = append(dst, byte(additionalTypeTagHexString>>8))
dst = append(dst, byte(additionalTypeTagHexString&0xff))
return e.AppendBytes(dst, val)

View file

@ -1,9 +1,6 @@
package json
import (
"fmt"
"unicode/utf8"
)
import "unicode/utf8"
const hex = "0123456789abcdef"
@ -37,7 +34,7 @@ func (e Encoder) AppendStrings(dst []byte, vals []string) []byte {
//
// The operation loops though each byte in the string looking
// for characters that need json or utf8 encoding. If the string
// does not need encoding, then the string is appended in its
// does not need encoding, then the string is appended in it's
// entirety to the byte slice.
// If we encounter a byte that does need encoding, switch up
// the operation and perform a byte-by-byte read-encode-append.
@ -56,39 +53,14 @@ func (Encoder) AppendString(dst []byte, s string) []byte {
return append(dst, '"')
}
}
// The string has no need for encoding and therefore is directly
// The string has no need for encoding an therefore is directly
// appended to the byte slice.
dst = append(dst, s...)
// End with a double quote
return append(dst, '"')
}
// AppendStringers encodes the provided Stringer list to json and
// appends the encoded Stringer list to the input byte slice.
func (e Encoder) AppendStringers(dst []byte, vals []fmt.Stringer) []byte {
if len(vals) == 0 {
return append(dst, '[', ']')
}
dst = append(dst, '[')
dst = e.AppendStringer(dst, vals[0])
if len(vals) > 1 {
for _, val := range vals[1:] {
dst = e.AppendStringer(append(dst, ','), val)
}
}
return append(dst, ']')
}
// AppendStringer encodes the input Stringer to json and appends the
// encoded Stringer value to the input byte slice.
func (e Encoder) AppendStringer(dst []byte, val fmt.Stringer) []byte {
if val == nil {
return e.AppendInterface(dst, nil)
}
return e.AppendString(dst, val.String())
}
//// appendStringComplex is used by appendString to take over an in
// appendStringComplex is used by appendString to take over an in
// progress JSON string encoding that encountered a character that needs
// to be encoded.
func appendStringComplex(dst []byte, s string, i int) []byte {
@ -99,7 +71,7 @@ func appendStringComplex(dst []byte, s string, i int) []byte {
r, size := utf8.DecodeRuneInString(s[i:])
if r == utf8.RuneError && size == 1 {
// In case of error, first append previous simple characters to
// the byte slice if any and append a replacement character code
// the byte slice if any and append a remplacement character code
// in place of the invalid sequence.
if start < i {
dst = append(dst, s[start:i]...)

View file

@ -10,7 +10,6 @@ const (
timeFormatUnix = ""
timeFormatUnixMs = "UNIXMS"
timeFormatUnixMicro = "UNIXMICRO"
timeFormatUnixNano = "UNIXNANO"
)
// AppendTime formats the input time with the given format
@ -23,8 +22,6 @@ func (e Encoder) AppendTime(dst []byte, t time.Time, format string) []byte {
return e.AppendInt64(dst, t.UnixNano()/1000000)
case timeFormatUnixMicro:
return e.AppendInt64(dst, t.UnixNano()/1000)
case timeFormatUnixNano:
return e.AppendInt64(dst, t.UnixNano())
}
return append(t.AppendFormat(append(dst, '"'), format), '"')
}
@ -36,11 +33,7 @@ func (Encoder) AppendTimes(dst []byte, vals []time.Time, format string) []byte {
case timeFormatUnix:
return appendUnixTimes(dst, vals)
case timeFormatUnixMs:
return appendUnixNanoTimes(dst, vals, 1000000)
case timeFormatUnixMicro:
return appendUnixNanoTimes(dst, vals, 1000)
case timeFormatUnixNano:
return appendUnixNanoTimes(dst, vals, 1)
return appendUnixMsTimes(dst, vals)
}
if len(vals) == 0 {
return append(dst, '[', ']')
@ -71,15 +64,15 @@ func appendUnixTimes(dst []byte, vals []time.Time) []byte {
return dst
}
func appendUnixNanoTimes(dst []byte, vals []time.Time, div int64) []byte {
func appendUnixMsTimes(dst []byte, vals []time.Time) []byte {
if len(vals) == 0 {
return append(dst, '[', ']')
}
dst = append(dst, '[')
dst = strconv.AppendInt(dst, vals[0].UnixNano()/div, 10)
dst = strconv.AppendInt(dst, vals[0].UnixNano()/1000000, 10)
if len(vals) > 1 {
for _, t := range vals[1:] {
dst = strconv.AppendInt(append(dst, ','), t.UnixNano()/div, 10)
dst = strconv.AppendInt(append(dst, ','), t.UnixNano()/1000000, 10)
}
}
dst = append(dst, ']')
@ -88,24 +81,24 @@ func appendUnixNanoTimes(dst []byte, vals []time.Time, div int64) []byte {
// AppendDuration formats the input duration with the given unit & format
// and appends the encoded string to the input byte slice.
func (e Encoder) AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool, precision int) []byte {
func (e Encoder) AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool) []byte {
if useInt {
return strconv.AppendInt(dst, int64(d/unit), 10)
}
return e.AppendFloat64(dst, float64(d)/float64(unit), precision)
return e.AppendFloat64(dst, float64(d)/float64(unit))
}
// AppendDurations formats the input durations with the given unit & format
// and appends the encoded string list to the input byte slice.
func (e Encoder) AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool, precision int) []byte {
func (e Encoder) AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool) []byte {
if len(vals) == 0 {
return append(dst, '[', ']')
}
dst = append(dst, '[')
dst = e.AppendDuration(dst, vals[0], unit, useInt, precision)
dst = e.AppendDuration(dst, vals[0], unit, useInt)
if len(vals) > 1 {
for _, d := range vals[1:] {
dst = e.AppendDuration(append(dst, ','), d, unit, useInt, precision)
dst = e.AppendDuration(append(dst, ','), d, unit, useInt)
}
}
dst = append(dst, ']')

View file

@ -4,7 +4,6 @@ import (
"fmt"
"math"
"net"
"reflect"
"strconv"
)
@ -279,7 +278,7 @@ func (Encoder) AppendUints32(dst []byte, vals []uint32) []byte {
// AppendUint64 converts the input uint64 to a string and
// appends the encoded string to the input byte slice.
func (Encoder) AppendUint64(dst []byte, val uint64) []byte {
return strconv.AppendUint(dst, val, 10)
return strconv.AppendUint(dst, uint64(val), 10)
}
// AppendUints64 encodes the input uint64s to json and
@ -299,9 +298,9 @@ func (Encoder) AppendUints64(dst []byte, vals []uint64) []byte {
return dst
}
func appendFloat(dst []byte, val float64, bitSize, precision int) []byte {
func appendFloat(dst []byte, val float64, bitSize int) []byte {
// JSON does not permit NaN or Infinity. A typical JSON encoder would fail
// with an error, but a logging library wants the data to get through so we
// with an error, but a logging library wants the data to get thru so we
// make a tradeoff and store those types as string.
switch {
case math.IsNaN(val):
@ -311,47 +310,26 @@ func appendFloat(dst []byte, val float64, bitSize, precision int) []byte {
case math.IsInf(val, -1):
return append(dst, `"-Inf"`...)
}
// convert as if by es6 number to string conversion
// see also https://cs.opensource.google/go/go/+/refs/tags/go1.20.3:src/encoding/json/encode.go;l=573
strFmt := byte('f')
// If precision is set to a value other than -1, we always just format the float using that precision.
if precision == -1 {
// Use float32 comparisons for underlying float32 value to get precise cutoffs right.
if abs := math.Abs(val); abs != 0 {
if bitSize == 64 && (abs < 1e-6 || abs >= 1e21) || bitSize == 32 && (float32(abs) < 1e-6 || float32(abs) >= 1e21) {
strFmt = 'e'
}
}
}
dst = strconv.AppendFloat(dst, val, strFmt, precision, bitSize)
if strFmt == 'e' {
// Clean up e-09 to e-9
n := len(dst)
if n >= 4 && dst[n-4] == 'e' && dst[n-3] == '-' && dst[n-2] == '0' {
dst[n-2] = dst[n-1]
dst = dst[:n-1]
}
}
return dst
return strconv.AppendFloat(dst, val, 'f', -1, bitSize)
}
// AppendFloat32 converts the input float32 to a string and
// appends the encoded string to the input byte slice.
func (Encoder) AppendFloat32(dst []byte, val float32, precision int) []byte {
return appendFloat(dst, float64(val), 32, precision)
func (Encoder) AppendFloat32(dst []byte, val float32) []byte {
return appendFloat(dst, float64(val), 32)
}
// AppendFloats32 encodes the input float32s to json and
// appends the encoded string list to the input byte slice.
func (Encoder) AppendFloats32(dst []byte, vals []float32, precision int) []byte {
func (Encoder) AppendFloats32(dst []byte, vals []float32) []byte {
if len(vals) == 0 {
return append(dst, '[', ']')
}
dst = append(dst, '[')
dst = appendFloat(dst, float64(vals[0]), 32, precision)
dst = appendFloat(dst, float64(vals[0]), 32)
if len(vals) > 1 {
for _, val := range vals[1:] {
dst = appendFloat(append(dst, ','), float64(val), 32, precision)
dst = appendFloat(append(dst, ','), float64(val), 32)
}
}
dst = append(dst, ']')
@ -360,21 +338,21 @@ func (Encoder) AppendFloats32(dst []byte, vals []float32, precision int) []byte
// AppendFloat64 converts the input float64 to a string and
// appends the encoded string to the input byte slice.
func (Encoder) AppendFloat64(dst []byte, val float64, precision int) []byte {
return appendFloat(dst, val, 64, precision)
func (Encoder) AppendFloat64(dst []byte, val float64) []byte {
return appendFloat(dst, val, 64)
}
// AppendFloats64 encodes the input float64s to json and
// appends the encoded string list to the input byte slice.
func (Encoder) AppendFloats64(dst []byte, vals []float64, precision int) []byte {
func (Encoder) AppendFloats64(dst []byte, vals []float64) []byte {
if len(vals) == 0 {
return append(dst, '[', ']')
}
dst = append(dst, '[')
dst = appendFloat(dst, vals[0], 64, precision)
dst = appendFloat(dst, vals[0], 64)
if len(vals) > 1 {
for _, val := range vals[1:] {
dst = appendFloat(append(dst, ','), val, 64, precision)
dst = appendFloat(append(dst, ','), val, 64)
}
}
dst = append(dst, ']')
@ -391,14 +369,6 @@ func (e Encoder) AppendInterface(dst []byte, i interface{}) []byte {
return append(dst, marshaled...)
}
// AppendType appends the parameter type (as a string) to the input byte slice.
func (e Encoder) AppendType(dst []byte, i interface{}) []byte {
if i == nil {
return e.AppendString(dst, "<nil>")
}
return e.AppendString(dst, reflect.TypeOf(i).String())
}
// AppendObjectData takes in an object that is already in a byte array
// and adds it to the dst.
func (Encoder) AppendObjectData(dst []byte, o []byte) []byte {

101
vendor/github.com/rs/zerolog/log.go generated vendored
View file

@ -24,7 +24,7 @@
//
// Sub-loggers let you chain loggers with additional context:
//
// sublogger := log.With().Str("component", "foo").Logger()
// sublogger := log.With().Str("component": "foo").Logger()
// sublogger.Info().Msg("hello world")
// // Output: {"time":1494567715,"level":"info","message":"hello world","component":"foo"}
//
@ -82,9 +82,8 @@
// log.Warn().Msg("")
// // Output: {"level":"warn","severity":"warn"}
//
// # Caveats
//
// Field duplication:
// Caveats
//
// There is no fields deduplication out-of-the-box.
// Using the same key multiple times creates new key in final JSON each time.
@ -97,30 +96,14 @@
//
// In this case, many consumers will take the last value,
// but this is not guaranteed; check yours if in doubt.
//
// Concurrency safety:
//
// Be careful when calling UpdateContext. It is not concurrency safe. Use the With method to create a child logger:
//
// func handler(w http.ResponseWriter, r *http.Request) {
// // Create a child logger for concurrency safety
// logger := log.Logger.With().Logger()
//
// // Add context fields, for example User-Agent from HTTP headers
// logger.UpdateContext(func(c zerolog.Context) zerolog.Context {
// ...
// })
// }
package zerolog
import (
"context"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"strconv"
"strings"
)
// Level defines log levels.
@ -176,24 +159,24 @@ func (l Level) String() string {
// ParseLevel converts a level string into a zerolog Level value.
// returns an error if the input string does not match known values.
func ParseLevel(levelStr string) (Level, error) {
switch {
case strings.EqualFold(levelStr, LevelFieldMarshalFunc(TraceLevel)):
switch levelStr {
case LevelFieldMarshalFunc(TraceLevel):
return TraceLevel, nil
case strings.EqualFold(levelStr, LevelFieldMarshalFunc(DebugLevel)):
case LevelFieldMarshalFunc(DebugLevel):
return DebugLevel, nil
case strings.EqualFold(levelStr, LevelFieldMarshalFunc(InfoLevel)):
case LevelFieldMarshalFunc(InfoLevel):
return InfoLevel, nil
case strings.EqualFold(levelStr, LevelFieldMarshalFunc(WarnLevel)):
case LevelFieldMarshalFunc(WarnLevel):
return WarnLevel, nil
case strings.EqualFold(levelStr, LevelFieldMarshalFunc(ErrorLevel)):
case LevelFieldMarshalFunc(ErrorLevel):
return ErrorLevel, nil
case strings.EqualFold(levelStr, LevelFieldMarshalFunc(FatalLevel)):
case LevelFieldMarshalFunc(FatalLevel):
return FatalLevel, nil
case strings.EqualFold(levelStr, LevelFieldMarshalFunc(PanicLevel)):
case LevelFieldMarshalFunc(PanicLevel):
return PanicLevel, nil
case strings.EqualFold(levelStr, LevelFieldMarshalFunc(Disabled)):
case LevelFieldMarshalFunc(Disabled):
return Disabled, nil
case strings.EqualFold(levelStr, LevelFieldMarshalFunc(NoLevel)):
case LevelFieldMarshalFunc(NoLevel):
return NoLevel, nil
}
i, err := strconv.Atoi(levelStr)
@ -206,21 +189,6 @@ func ParseLevel(levelStr string) (Level, error) {
return Level(i), nil
}
// UnmarshalText implements encoding.TextUnmarshaler to allow for easy reading from toml/yaml/json formats
func (l *Level) UnmarshalText(text []byte) error {
if l == nil {
return errors.New("can't unmarshal a nil *Level")
}
var err error
*l, err = ParseLevel(string(text))
return err
}
// MarshalText implements encoding.TextMarshaler to allow for easy writing into toml/yaml/json formats
func (l Level) MarshalText() ([]byte, error) {
return []byte(LevelFieldMarshalFunc(l)), nil
}
// A Logger represents an active logging object that generates lines
// of JSON output to an io.Writer. Each logging operation makes a single
// call to the Writer's Write method. There is no guarantee on access
@ -233,7 +201,6 @@ type Logger struct {
context []byte
hooks []Hook
stack bool
ctx context.Context
}
// New creates a root logger with given output writer. If the output writer implements
@ -245,11 +212,11 @@ type Logger struct {
// you may consider using sync wrapper.
func New(w io.Writer) Logger {
if w == nil {
w = io.Discard
w = ioutil.Discard
}
lw, ok := w.(LevelWriter)
if !ok {
lw = LevelWriterAdapter{w}
lw = levelWriterAdapter{w}
}
return Logger{w: lw, level: TraceLevel}
}
@ -291,8 +258,7 @@ func (l Logger) With() Context {
// UpdateContext updates the internal logger's context.
//
// Caution: This method is not concurrency safe.
// Use the With method to create a child logger before modifying the context from concurrent goroutines.
// Use this method with caution. If unsure, prefer the With method.
func (l *Logger) UpdateContext(update func(c Context) Context) {
if l == disabledLogger {
return
@ -325,13 +291,8 @@ func (l Logger) Sample(s Sampler) Logger {
}
// Hook returns a logger with the h Hook.
func (l Logger) Hook(hooks ...Hook) Logger {
if len(hooks) == 0 {
return l
}
newHooks := make([]Hook, len(l.hooks), len(l.hooks)+len(hooks))
copy(newHooks, l.hooks)
l.hooks = append(newHooks, hooks...)
func (l Logger) Hook(h Hook) Logger {
l.hooks = append(l.hooks, h)
return l
}
@ -387,14 +348,7 @@ func (l *Logger) Err(err error) *Event {
//
// You must call Msg on the returned event in order to send the event.
func (l *Logger) Fatal() *Event {
return l.newEvent(FatalLevel, func(msg string) {
if closer, ok := l.w.(io.Closer); ok {
// Close the writer to flush any buffered message. Otherwise the message
// will be lost as os.Exit() terminates the program immediately.
closer.Close()
}
os.Exit(1)
})
return l.newEvent(FatalLevel, func(msg string) { os.Exit(1) })
}
// Panic starts a new message with panic level. The panic() function
@ -407,7 +361,7 @@ func (l *Logger) Panic() *Event {
// WithLevel starts a new message with level. Unlike Fatal and Panic
// methods, WithLevel does not terminate the program or stop the ordinary
// flow of a goroutine when used with their respective levels.
// flow of a gourotine when used with their respective levels.
//
// You must call Msg on the returned event in order to send the event.
func (l *Logger) WithLevel(level Level) *Event {
@ -459,14 +413,6 @@ func (l *Logger) Printf(format string, v ...interface{}) {
}
}
// Println sends a log event using debug level and no extra field.
// Arguments are handled in the manner of fmt.Println.
func (l *Logger) Println(v ...interface{}) {
if e := l.Debug(); e.Enabled() {
e.CallerSkipFrame(1).Msg(fmt.Sprintln(v...))
}
}
// Write implements the io.Writer interface. This is useful to set as a writer
// for the standard library log.
func (l Logger) Write(p []byte) (n int, err error) {
@ -482,15 +428,11 @@ func (l Logger) Write(p []byte) (n int, err error) {
func (l *Logger) newEvent(level Level, done func(string)) *Event {
enabled := l.should(level)
if !enabled {
if done != nil {
done("")
}
return nil
}
e := newEvent(l.w, level)
e.done = done
e.ch = l.hooks
e.ctx = l.ctx
if level != NoLevel && LevelFieldName != "" {
e.Str(LevelFieldName, LevelFieldMarshalFunc(level))
}
@ -505,9 +447,6 @@ func (l *Logger) newEvent(level Level, done func(string)) *Event {
// should returns true if the log event should be logged.
func (l *Logger) should(lvl Level) bool {
if l.w == nil {
return false
}
if lvl < l.level || lvl < GlobalLevel() {
return false
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 116 KiB

After

Width:  |  Height:  |  Size: 82 KiB

View file

@ -84,7 +84,7 @@ func (s *BurstSampler) Sample(lvl Level) bool {
}
func (s *BurstSampler) inc() uint32 {
now := TimestampFunc().UnixNano()
now := time.Now().UnixNano()
resetAt := atomic.LoadInt64(&s.resetAt)
var c uint32
if now > resetAt {

View file

@ -78,12 +78,3 @@ func (sw syslogWriter) WriteLevel(level Level, p []byte) (n int, err error) {
n = len(p)
return
}
// Call the underlying writer's Close method if it is an io.Closer. Otherwise
// does nothing.
func (sw syslogWriter) Close() error {
if c, ok := sw.w.(io.Closer); ok {
return c.Close()
}
return nil
}

View file

@ -1,12 +1,7 @@
package zerolog
import (
"bytes"
"io"
"path"
"runtime"
"strconv"
"strings"
"sync"
)
@ -17,25 +12,14 @@ type LevelWriter interface {
WriteLevel(level Level, p []byte) (n int, err error)
}
// LevelWriterAdapter adapts an io.Writer to support the LevelWriter interface.
type LevelWriterAdapter struct {
type levelWriterAdapter struct {
io.Writer
}
// WriteLevel simply writes everything to the adapted writer, ignoring the level.
func (lw LevelWriterAdapter) WriteLevel(l Level, p []byte) (n int, err error) {
func (lw levelWriterAdapter) WriteLevel(l Level, p []byte) (n int, err error) {
return lw.Write(p)
}
// Call the underlying writer's Close method if it is an io.Closer. Otherwise
// does nothing.
func (lw LevelWriterAdapter) Close() error {
if closer, ok := lw.Writer.(io.Closer); ok {
return closer.Close()
}
return nil
}
type syncWriter struct {
mu sync.Mutex
lw LevelWriter
@ -49,7 +33,7 @@ func SyncWriter(w io.Writer) io.Writer {
if lw, ok := w.(LevelWriter); ok {
return &syncWriter{lw: lw}
}
return &syncWriter{lw: LevelWriterAdapter{w}}
return &syncWriter{lw: levelWriterAdapter{w}}
}
// Write implements the io.Writer interface.
@ -66,15 +50,6 @@ func (s *syncWriter) WriteLevel(l Level, p []byte) (n int, err error) {
return s.lw.WriteLevel(l, p)
}
func (s *syncWriter) Close() error {
s.mu.Lock()
defer s.mu.Unlock()
if closer, ok := s.lw.(io.Closer); ok {
return closer.Close()
}
return nil
}
type multiLevelWriter struct {
writers []LevelWriter
}
@ -107,20 +82,6 @@ func (t multiLevelWriter) WriteLevel(l Level, p []byte) (n int, err error) {
return n, err
}
// Calls close on all the underlying writers that are io.Closers. If any of the
// Close methods return an error, the remainder of the closers are not closed
// and the error is returned.
func (t multiLevelWriter) Close() error {
for _, w := range t.writers {
if closer, ok := w.(io.Closer); ok {
if err := closer.Close(); err != nil {
return err
}
}
}
return nil
}
// MultiLevelWriter creates a writer that duplicates its writes to all the
// provided writers, similar to the Unix tee(1) command. If some writers
// implement LevelWriter, their WriteLevel method will be used instead of Write.
@ -130,217 +91,8 @@ func MultiLevelWriter(writers ...io.Writer) LevelWriter {
if lw, ok := w.(LevelWriter); ok {
lwriters = append(lwriters, lw)
} else {
lwriters = append(lwriters, LevelWriterAdapter{w})
lwriters = append(lwriters, levelWriterAdapter{w})
}
}
return multiLevelWriter{lwriters}
}
// TestingLog is the logging interface of testing.TB.
type TestingLog interface {
Log(args ...interface{})
Logf(format string, args ...interface{})
Helper()
}
// TestWriter is a writer that writes to testing.TB.
type TestWriter struct {
T TestingLog
// Frame skips caller frames to capture the original file and line numbers.
Frame int
}
// NewTestWriter creates a writer that logs to the testing.TB.
func NewTestWriter(t TestingLog) TestWriter {
return TestWriter{T: t}
}
// Write to testing.TB.
func (t TestWriter) Write(p []byte) (n int, err error) {
t.T.Helper()
n = len(p)
// Strip trailing newline because t.Log always adds one.
p = bytes.TrimRight(p, "\n")
// Try to correct the log file and line number to the caller.
if t.Frame > 0 {
_, origFile, origLine, _ := runtime.Caller(1)
_, frameFile, frameLine, ok := runtime.Caller(1 + t.Frame)
if ok {
erase := strings.Repeat("\b", len(path.Base(origFile))+len(strconv.Itoa(origLine))+3)
t.T.Logf("%s%s:%d: %s", erase, path.Base(frameFile), frameLine, p)
return n, err
}
}
t.T.Log(string(p))
return n, err
}
// ConsoleTestWriter creates an option that correctly sets the file frame depth for testing.TB log.
func ConsoleTestWriter(t TestingLog) func(w *ConsoleWriter) {
return func(w *ConsoleWriter) {
w.Out = TestWriter{T: t, Frame: 6}
}
}
// FilteredLevelWriter writes only logs at Level or above to Writer.
//
// It should be used only in combination with MultiLevelWriter when you
// want to write to multiple destinations at different levels. Otherwise
// you should just set the level on the logger and filter events early.
// When using MultiLevelWriter then you set the level on the logger to
// the lowest of the levels you use for writers.
type FilteredLevelWriter struct {
Writer LevelWriter
Level Level
}
// Write writes to the underlying Writer.
func (w *FilteredLevelWriter) Write(p []byte) (int, error) {
return w.Writer.Write(p)
}
// WriteLevel calls WriteLevel of the underlying Writer only if the level is equal
// or above the Level.
func (w *FilteredLevelWriter) WriteLevel(level Level, p []byte) (int, error) {
if level >= w.Level {
return w.Writer.WriteLevel(level, p)
}
return len(p), nil
}
var triggerWriterPool = &sync.Pool{
New: func() interface{} {
return bytes.NewBuffer(make([]byte, 0, 1024))
},
}
// TriggerLevelWriter buffers log lines at the ConditionalLevel or below
// until a trigger level (or higher) line is emitted. Log lines with level
// higher than ConditionalLevel are always written out to the destination
// writer. If trigger never happens, buffered log lines are never written out.
//
// It can be used to configure "log level per request".
type TriggerLevelWriter struct {
// Destination writer. If LevelWriter is provided (usually), its WriteLevel is used
// instead of Write.
io.Writer
// ConditionalLevel is the level (and below) at which lines are buffered until
// a trigger level (or higher) line is emitted. Usually this is set to DebugLevel.
ConditionalLevel Level
// TriggerLevel is the lowest level that triggers the sending of the conditional
// level lines. Usually this is set to ErrorLevel.
TriggerLevel Level
buf *bytes.Buffer
triggered bool
mu sync.Mutex
}
func (w *TriggerLevelWriter) WriteLevel(l Level, p []byte) (n int, err error) {
w.mu.Lock()
defer w.mu.Unlock()
// At first trigger level or above log line, we flush the buffer and change the
// trigger state to triggered.
if !w.triggered && l >= w.TriggerLevel {
err := w.trigger()
if err != nil {
return 0, err
}
}
// Unless triggered, we buffer everything at and below ConditionalLevel.
if !w.triggered && l <= w.ConditionalLevel {
if w.buf == nil {
w.buf = triggerWriterPool.Get().(*bytes.Buffer)
}
// We prefix each log line with a byte with the level.
// Hopefully we will never have a level value which equals a newline
// (which could interfere with reconstruction of log lines in the trigger method).
w.buf.WriteByte(byte(l))
w.buf.Write(p)
return len(p), nil
}
// Anything above ConditionalLevel is always passed through.
// Once triggered, everything is passed through.
if lw, ok := w.Writer.(LevelWriter); ok {
return lw.WriteLevel(l, p)
}
return w.Write(p)
}
// trigger expects lock to be held.
func (w *TriggerLevelWriter) trigger() error {
if w.triggered {
return nil
}
w.triggered = true
if w.buf == nil {
return nil
}
p := w.buf.Bytes()
for len(p) > 0 {
// We do not use bufio.Scanner here because we already have full buffer
// in the memory and we do not want extra copying from the buffer to
// scanner's token slice, nor we want to hit scanner's token size limit,
// and we also want to preserve newlines.
i := bytes.IndexByte(p, '\n')
line := p[0 : i+1]
p = p[i+1:]
// We prefixed each log line with a byte with the level.
level := Level(line[0])
line = line[1:]
var err error
if lw, ok := w.Writer.(LevelWriter); ok {
_, err = lw.WriteLevel(level, line)
} else {
_, err = w.Write(line)
}
if err != nil {
return err
}
}
return nil
}
// Trigger forces flushing the buffer and change the trigger state to
// triggered, if the writer has not already been triggered before.
func (w *TriggerLevelWriter) Trigger() error {
w.mu.Lock()
defer w.mu.Unlock()
return w.trigger()
}
// Close closes the writer and returns the buffer to the pool.
func (w *TriggerLevelWriter) Close() error {
w.mu.Lock()
defer w.mu.Unlock()
if w.buf == nil {
return nil
}
// We return the buffer only if it has not grown above the limit.
// This prevents accumulation of large buffers in the pool just
// because occasionally a large buffer might be needed.
if w.buf.Cap() <= TriggerLevelWriterBufferReuseLimit {
w.buf.Reset()
triggerWriterPool.Put(w.buf)
}
w.buf = nil
return nil
}

View file

@ -221,15 +221,15 @@ var fsTypeMap = map[int64]string{
func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, error) {
useMounts := false
filename := common.HostProc("1/mountinfo")
filename := common.HostProc("self/mountinfo")
lines, err := common.ReadLines(filename)
if err != nil {
if err != err.(*os.PathError) {
return nil, err
}
// if kernel does not support 1/mountinfo, fallback to 1/mounts (<2.6.26)
// if kernel does not support self/mountinfo, fallback to self/mounts (<2.6.26)
useMounts = true
filename = common.HostProc("1/mounts")
filename = common.HostProc("self/mounts")
lines, err = common.ReadLines(filename)
if err != nil {
return nil, err
@ -261,7 +261,7 @@ func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, erro
}
}
} else {
// a line of 1/mountinfo has the following structure:
// a line of self/mountinfo has the following structure:
// 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue
// (1) (2) (3) (4) (5) (6) (7) (8) (9) (10) (11)

View file

@ -253,7 +253,7 @@ func Write(w io.Writer, order ByteOrder, data interface{}) error {
b[0] = *v
case uint8:
bs = b[:1]
b[0] = v
b[0] = byte(v)
case []uint8:
bs = v
case *int16:

View file

@ -94,7 +94,7 @@ func (i FakeInvoke) CommandWithContext(ctx context.Context, name string, arg ...
var ErrNotImplementedError = errors.New("not implemented yet")
// ReadFile reads contents from a file.
// ReadFile reads contents from a file
func ReadFile(filename string) (string, error) {
content, err := ioutil.ReadFile(filename)
@ -111,7 +111,7 @@ func ReadLines(filename string) ([]string, error) {
return ReadLinesOffsetN(filename, 0, -1)
}
// ReadLinesOffsetN reads contents from file and splits them by new line.
// ReadLines reads contents from file and splits them by new line.
// The offset tells at which line number to start.
// The count determines the number of lines to read (starting from offset):
// n >= 0: at most n lines
@ -165,7 +165,7 @@ func UintToString(orig []uint8) string {
size = i
break
}
ret[i] = o
ret[i] = byte(o)
}
if size == -1 {
size = len(orig)
@ -224,31 +224,31 @@ func ReadInts(filename string) ([]int64, error) {
return ret, nil
}
// HexToUint32 parses Hex to uint32 without error.
// Parse Hex to uint32 without error
func HexToUint32(hex string) uint32 {
vv, _ := strconv.ParseUint(hex, 16, 32)
return uint32(vv)
}
// mustParseInt32 parses to int32 without error.
// Parse to int32 without error
func mustParseInt32(val string) int32 {
vv, _ := strconv.ParseInt(val, 10, 32)
return int32(vv)
}
// mustParseUint64 parses to uint64 without error.
// Parse to uint64 without error
func mustParseUint64(val string) uint64 {
vv, _ := strconv.ParseInt(val, 10, 64)
return uint64(vv)
}
// mustParseFloat64 parses to Float64 without error.
// Parse to Float64 without error
func mustParseFloat64(val string) float64 {
vv, _ := strconv.ParseFloat(val, 64)
return vv
}
// StringsHas checks the target string slice contains src or not.
// StringsHas checks the target string slice contains src or not
func StringsHas(target []string, src string) bool {
for _, t := range target {
if strings.TrimSpace(t) == src {
@ -258,7 +258,7 @@ func StringsHas(target []string, src string) bool {
return false
}
// StringsContains checks the src in any string of the target string slice.
// StringsContains checks the src in any string of the target string slice
func StringsContains(target []string, src string) bool {
for _, t := range target {
if strings.Contains(t, src) {
@ -308,7 +308,7 @@ func PathExists(filename string) bool {
return false
}
// GetEnv retrieves the environment variable key. If it does not exist it returns the default.
//GetEnv retrieves the environment variable key. If it does not exist it returns the default.
func GetEnv(key string, dfault string, combineWith ...string) string {
value := os.Getenv(key)
if value == "" {

View file

@ -26,8 +26,8 @@ func DoSysctrl(mib string) ([]string, error) {
return []string{}, err
}
v := strings.Replace(string(out), "{ ", "", 1)
v = strings.Replace(v, " }", "", 1)
values := strings.Fields(v)
v = strings.Replace(string(v), " }", "", 1)
values := strings.Fields(string(v))
return values, nil
}
@ -55,6 +55,7 @@ func NumProcs() (uint64, error) {
}
func BootTimeWithContext(ctx context.Context) (uint64, error) {
system, role, err := Virtualization()
if err != nil {
return 0, err
@ -75,18 +76,6 @@ func BootTimeWithContext(ctx context.Context) (uint64, error) {
return 0, err
}
if statFile == "uptime" {
if len(lines) != 1 {
return 0, fmt.Errorf("wrong uptime format")
}
f := strings.Fields(lines[0])
b, err := strconv.ParseFloat(f[0], 64)
if err != nil {
return 0, err
}
t := uint64(time.Now().Unix()) - uint64(b)
return t, nil
}
if statFile == "stat" {
for _, line := range lines {
if strings.HasPrefix(line, "btime") {
@ -102,6 +91,17 @@ func BootTimeWithContext(ctx context.Context) (uint64, error) {
return t, nil
}
}
} else if statFile == "uptime" {
if len(lines) != 1 {
return 0, fmt.Errorf("wrong uptime format")
}
f := strings.Fields(lines[0])
b, err := strconv.ParseFloat(f[0], 64)
if err != nil {
return 0, err
}
t := uint64(time.Now().Unix()) - uint64(b)
return t, nil
}
return 0, fmt.Errorf("could not find btime")
@ -111,7 +111,7 @@ func Virtualization() (string, string, error) {
return VirtualizationWithContext(context.Background())
}
// required variables for concurrency safe virtualization caching.
// required variables for concurrency safe virtualization caching
var (
cachedVirtMap map[string]string
cachedVirtMutex sync.RWMutex
@ -137,27 +137,28 @@ func VirtualizationWithContext(ctx context.Context) (string, string, error) {
if PathExists(filepath.Join(filename, "capabilities")) {
contents, err := ReadLines(filepath.Join(filename, "capabilities"))
if err == nil && StringsContains(contents, "control_d") {
if err == nil {
if StringsContains(contents, "control_d") {
role = "host"
}
}
}
}
filename = HostProc("modules")
if PathExists(filename) {
contents, err := ReadLines(filename)
if err == nil {
switch {
case StringsContains(contents, "kvm"):
if StringsContains(contents, "kvm") {
system = "kvm"
role = "host"
case StringsContains(contents, "vboxdrv"):
} else if StringsContains(contents, "vboxdrv") {
system = "vbox"
role = "host"
case StringsContains(contents, "vboxguest"):
} else if StringsContains(contents, "vboxguest") {
system = "vbox"
role = "guest"
case StringsContains(contents, "vmware"):
} else if StringsContains(contents, "vmware") {
system = "vmware"
role = "guest"
}
@ -200,6 +201,7 @@ func VirtualizationWithContext(ctx context.Context) (string, string, error) {
if PathExists(filepath.Join(filename, "self", "status")) {
contents, err := ReadLines(filepath.Join(filename, "self", "status"))
if err == nil {
if StringsContains(contents, "s_context:") ||
StringsContains(contents, "VxID:") {
system = "linux-vserver"
@ -222,17 +224,16 @@ func VirtualizationWithContext(ctx context.Context) (string, string, error) {
if PathExists(filepath.Join(filename, "self", "cgroup")) {
contents, err := ReadLines(filepath.Join(filename, "self", "cgroup"))
if err == nil {
switch {
case StringsContains(contents, "lxc"):
if StringsContains(contents, "lxc") {
system = "lxc"
role = "guest"
case StringsContains(contents, "docker"):
} else if StringsContains(contents, "docker") {
system = "docker"
role = "guest"
case StringsContains(contents, "machine-rkt"):
} else if StringsContains(contents, "machine-rkt") {
system = "rkt"
role = "guest"
case PathExists("/usr/bin/lxc-version"):
} else if PathExists("/usr/bin/lxc-version") {
system = "lxc"
role = "host"
}
@ -280,7 +281,7 @@ func GetOSRelease() (platform string, version string, err error) {
return platform, version, nil
}
// trimQuotes removes quotes in the source string.
// Remove quotes of the source string
func trimQuotes(s string) string {
if len(s) >= 2 {
if s[0] == '"' && s[len(s)-1] == '"' {

View file

@ -41,7 +41,8 @@ func CallLsofWithContext(ctx context.Context, invoke Invoker, pid int32, args ..
}
func CallPgrepWithContext(ctx context.Context, invoke Invoker, pid int32) ([]int32, error) {
cmd := []string{"-P", strconv.Itoa(int(pid))}
var cmd []string
cmd = []string{"-P", strconv.Itoa(int(pid))}
pgrep, err := exec.LookPath("pgrep")
if err != nil {
return []int32{}, err

View file

@ -6,12 +6,11 @@ import (
"context"
"fmt"
"path/filepath"
"reflect"
"strings"
"syscall"
"unsafe"
"github.com/yusufpapurcu/wmi"
"github.com/StackExchange/wmi"
"golang.org/x/sys/windows"
)
@ -49,18 +48,11 @@ const (
PDH_INVALID_DATA = 0xc0000bc6
PDH_INVALID_HANDLE = 0xC0000bbc
PDH_NO_DATA = 0x800007d5
STATUS_BUFFER_OVERFLOW = 0x80000005
STATUS_BUFFER_TOO_SMALL = 0xC0000023
STATUS_INFO_LENGTH_MISMATCH = 0xC0000004
)
const (
ProcessBasicInformation = 0
ProcessWow64Information = 26
ProcessQueryInformation = windows.PROCESS_DUP_HANDLE | windows.PROCESS_QUERY_INFORMATION
SystemExtendedHandleInformationClass = 64
)
var (
@ -235,66 +227,3 @@ func ConvertDOSPath(p string) string {
}
return p
}
type NtStatus uint32
func (s NtStatus) Error() error {
if s == 0 {
return nil
}
return fmt.Errorf("NtStatus 0x%08x", uint32(s))
}
func (s NtStatus) IsError() bool {
return s>>30 == 3
}
type SystemExtendedHandleTableEntryInformation struct {
Object uintptr
UniqueProcessId uintptr
HandleValue uintptr
GrantedAccess uint32
CreatorBackTraceIndex uint16
ObjectTypeIndex uint16
HandleAttributes uint32
Reserved uint32
}
type SystemExtendedHandleInformation struct {
NumberOfHandles uintptr
Reserved uintptr
Handles [1]SystemExtendedHandleTableEntryInformation
}
// CallWithExpandingBuffer https://github.com/hillu/go-ntdll
func CallWithExpandingBuffer(fn func() NtStatus, buf *[]byte, resultLength *uint32) NtStatus {
for {
if st := fn(); st == STATUS_BUFFER_OVERFLOW || st == STATUS_BUFFER_TOO_SMALL || st == STATUS_INFO_LENGTH_MISMATCH {
if int(*resultLength) <= cap(*buf) {
(*reflect.SliceHeader)(unsafe.Pointer(buf)).Len = int(*resultLength)
} else {
*buf = make([]byte, int(*resultLength))
}
continue
} else {
if !st.IsError() {
*buf = (*buf)[:int(*resultLength)]
}
return st
}
}
}
func NtQuerySystemInformation(
SystemInformationClass uint32,
SystemInformation *byte,
SystemInformationLength uint32,
ReturnLength *uint32,
) NtStatus {
r0, _, _ := ProcNtQuerySystemInformation.Call(
uintptr(SystemInformationClass),
uintptr(unsafe.Pointer(SystemInformation)),
uintptr(SystemInformationLength),
uintptr(unsafe.Pointer(ReturnLength)))
return NtStatus(r0)
}

View file

@ -1,6 +0,0 @@
wmi
===
Package wmi provides a WQL interface to Windows WMI.
Note: It interfaces with WMI on the local machine, therefore it only runs on Windows.

3
vendor/golang.org/x/sys/AUTHORS generated vendored Normal file
View file

@ -0,0 +1,3 @@
# This source code refers to The Go Authors for copyright purposes.
# The master list of authors is in the main Go distribution,
# visible at http://tip.golang.org/AUTHORS.

3
vendor/golang.org/x/sys/CONTRIBUTORS generated vendored Normal file
View file

@ -0,0 +1,3 @@
# This source code was written by the Go contributors.
# The master list of contributors is in the main Go distribution,
# visible at http://tip.golang.org/CONTRIBUTORS.

View file

@ -0,0 +1,30 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package unsafeheader contains header declarations for the Go runtime's
// slice and string implementations.
//
// This package allows x/sys to use types equivalent to
// reflect.SliceHeader and reflect.StringHeader without introducing
// a dependency on the (relatively heavy) "reflect" package.
package unsafeheader
import (
"unsafe"
)
// Slice is the runtime representation of a slice.
// It cannot be used safely or portably and its representation may change in a later release.
type Slice struct {
Data unsafe.Pointer
Len int
Cap int
}
// String is the runtime representation of a string.
// It cannot be used safely or portably and its representation may change in a later release.
type String struct {
Data unsafe.Pointer
Len int
}

View file

@ -149,7 +149,7 @@ To add a constant, add the header that includes it to the appropriate variable.
Then, edit the regex (if necessary) to match the desired constant. Avoid making
the regex too broad to avoid matching unintended constants.
### internal/mkmerge
### mkmerge.go
This program is used to extract duplicate const, func, and type declarations
from the generated architecture-specific files listed below, and merge these

View file

@ -2,7 +2,9 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
//go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) && go1.9
// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
// +build go1.9
package unix

View file

@ -3,6 +3,7 @@
// license that can be found in the LICENSE file.
//go:build gc
// +build gc
#include "textflag.h"

View file

@ -3,6 +3,8 @@
// license that can be found in the LICENSE file.
//go:build (freebsd || netbsd || openbsd) && gc
// +build freebsd netbsd openbsd
// +build gc
#include "textflag.h"

View file

@ -3,6 +3,8 @@
// license that can be found in the LICENSE file.
//go:build (darwin || dragonfly || freebsd || netbsd || openbsd) && gc
// +build darwin dragonfly freebsd netbsd openbsd
// +build gc
#include "textflag.h"

View file

@ -3,6 +3,8 @@
// license that can be found in the LICENSE file.
//go:build (freebsd || netbsd || openbsd) && gc
// +build freebsd netbsd openbsd
// +build gc
#include "textflag.h"

View file

@ -3,6 +3,8 @@
// license that can be found in the LICENSE file.
//go:build (darwin || freebsd || netbsd || openbsd) && gc
// +build darwin freebsd netbsd openbsd
// +build gc
#include "textflag.h"

View file

@ -1,29 +0,0 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build (darwin || freebsd || netbsd || openbsd) && gc
#include "textflag.h"
//
// System call support for ppc64, BSD
//
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
TEXT ·Syscall(SB),NOSPLIT,$0-56
JMP syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-80
JMP syscall·Syscall6(SB)
TEXT ·Syscall9(SB),NOSPLIT,$0-104
JMP syscall·Syscall9(SB)
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
JMP syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
JMP syscall·RawSyscall6(SB)

Some files were not shown because too many files have changed in this diff Show more