fix auth; update deps
This commit is contained in:
2
vendor/github.com/gabriel-vasile/mimetype/LICENSE
generated
vendored
2
vendor/github.com/gabriel-vasile/mimetype/LICENSE
generated
vendored
@@ -1,6 +1,6 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2018-2020 Gabriel Vasile
|
||||
Copyright (c) 2018 Gabriel Vasile
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
||||
3
vendor/github.com/gabriel-vasile/mimetype/README.md
generated
vendored
3
vendor/github.com/gabriel-vasile/mimetype/README.md
generated
vendored
@@ -16,9 +16,6 @@
|
||||
<a href="https://goreportcard.com/report/github.com/gabriel-vasile/mimetype">
|
||||
<img alt="Go report card" src="https://goreportcard.com/badge/github.com/gabriel-vasile/mimetype">
|
||||
</a>
|
||||
<a href="https://codecov.io/gh/gabriel-vasile/mimetype">
|
||||
<img alt="Code coverage" src="https://codecov.io/gh/gabriel-vasile/mimetype/branch/master/graph/badge.svg?token=qcfJF1kkl2"/>
|
||||
</a>
|
||||
<a href="LICENSE">
|
||||
<img alt="License" src="https://img.shields.io/badge/License-MIT-green.svg">
|
||||
</a>
|
||||
|
||||
119
vendor/github.com/gabriel-vasile/mimetype/internal/magic/archive.go
generated
vendored
119
vendor/github.com/gabriel-vasile/mimetype/internal/magic/archive.go
generated
vendored
@@ -3,6 +3,7 @@ package magic
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -74,51 +75,87 @@ func CRX(raw []byte, limit uint32) bool {
|
||||
}
|
||||
|
||||
// Tar matches a (t)ape (ar)chive file.
|
||||
// Tar files are divided into 512 bytes records. First record contains a 257
|
||||
// bytes header padded with NUL.
|
||||
func Tar(raw []byte, _ uint32) bool {
|
||||
// The "magic" header field for files in in UStar (POSIX IEEE P1003.1) archives
|
||||
// has the prefix "ustar". The values of the remaining bytes in this field vary
|
||||
// by archiver implementation.
|
||||
if len(raw) >= 512 && bytes.HasPrefix(raw[257:], []byte{0x75, 0x73, 0x74, 0x61, 0x72}) {
|
||||
return true
|
||||
}
|
||||
const sizeRecord = 512
|
||||
|
||||
if len(raw) < 256 {
|
||||
// The structure of a tar header:
|
||||
// type TarHeader struct {
|
||||
// Name [100]byte
|
||||
// Mode [8]byte
|
||||
// Uid [8]byte
|
||||
// Gid [8]byte
|
||||
// Size [12]byte
|
||||
// Mtime [12]byte
|
||||
// Chksum [8]byte
|
||||
// Linkflag byte
|
||||
// Linkname [100]byte
|
||||
// Magic [8]byte
|
||||
// Uname [32]byte
|
||||
// Gname [32]byte
|
||||
// Devmajor [8]byte
|
||||
// Devminor [8]byte
|
||||
// }
|
||||
|
||||
if len(raw) < sizeRecord {
|
||||
return false
|
||||
}
|
||||
raw = raw[:sizeRecord]
|
||||
|
||||
// First 100 bytes of the header represent the file name.
|
||||
// Check if file looks like Gentoo GLEP binary package.
|
||||
if bytes.Contains(raw[:100], []byte("/gpkg-1\x00")) {
|
||||
return false
|
||||
}
|
||||
|
||||
// The older v7 format has no "magic" field, and therefore must be identified
|
||||
// with heuristics based on legal ranges of values for other header fields:
|
||||
// https://www.nationalarchives.gov.uk/PRONOM/Format/proFormatSearch.aspx?status=detailReport&id=385&strPageToDisplay=signatures
|
||||
rules := []struct {
|
||||
min, max uint8
|
||||
i int
|
||||
}{
|
||||
{0x21, 0xEF, 0},
|
||||
{0x30, 0x37, 105},
|
||||
{0x20, 0x37, 106},
|
||||
{0x00, 0x00, 107},
|
||||
{0x30, 0x37, 113},
|
||||
{0x20, 0x37, 114},
|
||||
{0x00, 0x00, 115},
|
||||
{0x30, 0x37, 121},
|
||||
{0x20, 0x37, 122},
|
||||
{0x00, 0x00, 123},
|
||||
{0x30, 0x37, 134},
|
||||
{0x30, 0x37, 146},
|
||||
{0x30, 0x37, 153},
|
||||
{0x00, 0x37, 154},
|
||||
// Get the checksum recorded into the file.
|
||||
recsum, err := tarParseOctal(raw[148:156])
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
for _, r := range rules {
|
||||
if raw[r.i] < r.min || raw[r.i] > r.max {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
for _, i := range []uint8{135, 147, 155} {
|
||||
if raw[i] != 0x00 && raw[i] != 0x20 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
sum1, sum2 := tarChksum(raw)
|
||||
return recsum == sum1 || recsum == sum2
|
||||
}
|
||||
|
||||
// tarParseOctal converts octal string to decimal int.
|
||||
func tarParseOctal(b []byte) (int64, error) {
|
||||
// Because unused fields are filled with NULs, we need to skip leading NULs.
|
||||
// Fields may also be padded with spaces or NULs.
|
||||
// So we remove leading and trailing NULs and spaces to be sure.
|
||||
b = bytes.Trim(b, " \x00")
|
||||
|
||||
if len(b) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
x, err := strconv.ParseUint(tarParseString(b), 8, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return int64(x), nil
|
||||
}
|
||||
|
||||
// tarParseString converts a NUL ended bytes slice to a string.
|
||||
func tarParseString(b []byte) string {
|
||||
if i := bytes.IndexByte(b, 0); i >= 0 {
|
||||
return string(b[:i])
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
// tarChksum computes the checksum for the header block b.
|
||||
// The actual checksum is written to same b block after it has been calculated.
|
||||
// Before calculation the bytes from b reserved for checksum have placeholder
|
||||
// value of ASCII space 0x20.
|
||||
// POSIX specifies a sum of the unsigned byte values, but the Sun tar used
|
||||
// signed byte values. We compute and return both.
|
||||
func tarChksum(b []byte) (unsigned, signed int64) {
|
||||
for i, c := range b {
|
||||
if 148 <= i && i < 156 {
|
||||
c = ' ' // Treat the checksum field itself as all spaces.
|
||||
}
|
||||
unsigned += int64(c)
|
||||
signed += int64(int8(c))
|
||||
}
|
||||
return unsigned, signed
|
||||
}
|
||||
|
||||
5
vendor/github.com/gabriel-vasile/mimetype/internal/magic/magic.go
generated
vendored
5
vendor/github.com/gabriel-vasile/mimetype/internal/magic/magic.go
generated
vendored
@@ -153,8 +153,11 @@ func ftyp(sigs ...[]byte) Detector {
|
||||
if len(raw) < 12 {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(raw[4:8], []byte("ftyp")) {
|
||||
return false
|
||||
}
|
||||
for _, s := range sigs {
|
||||
if bytes.Equal(raw[4:12], append([]byte("ftyp"), s...)) {
|
||||
if bytes.Equal(raw[8:12], s) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
44
vendor/github.com/gabriel-vasile/mimetype/internal/magic/text.go
generated
vendored
44
vendor/github.com/gabriel-vasile/mimetype/internal/magic/text.go
generated
vendored
@@ -1,7 +1,6 @@
|
||||
package magic
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -234,9 +233,10 @@ func GeoJSON(raw []byte, limit uint32) bool {
|
||||
// types.
|
||||
func NdJSON(raw []byte, limit uint32) bool {
|
||||
lCount, hasObjOrArr := 0, false
|
||||
sc := bufio.NewScanner(dropLastLine(raw, limit))
|
||||
for sc.Scan() {
|
||||
l := sc.Bytes()
|
||||
raw = dropLastLine(raw, limit)
|
||||
var l []byte
|
||||
for len(raw) != 0 {
|
||||
l, raw = scanLine(raw)
|
||||
// Empty lines are allowed in NDJSON.
|
||||
if l = trimRWS(trimLWS(l)); len(l) == 0 {
|
||||
continue
|
||||
@@ -301,20 +301,15 @@ func Svg(raw []byte, limit uint32) bool {
|
||||
}
|
||||
|
||||
// Srt matches a SubRip file.
|
||||
func Srt(in []byte, _ uint32) bool {
|
||||
s := bufio.NewScanner(bytes.NewReader(in))
|
||||
if !s.Scan() {
|
||||
return false
|
||||
}
|
||||
// First line must be 1.
|
||||
if s.Text() != "1" {
|
||||
return false
|
||||
}
|
||||
func Srt(raw []byte, _ uint32) bool {
|
||||
line, raw := scanLine(raw)
|
||||
|
||||
if !s.Scan() {
|
||||
// First line must be 1.
|
||||
if string(line) != "1" {
|
||||
return false
|
||||
}
|
||||
secondLine := s.Text()
|
||||
line, raw = scanLine(raw)
|
||||
secondLine := string(line)
|
||||
// Timestamp format (e.g: 00:02:16,612 --> 00:02:19,376) limits secondLine
|
||||
// length to exactly 29 characters.
|
||||
if len(secondLine) != 29 {
|
||||
@@ -325,14 +320,12 @@ func Srt(in []byte, _ uint32) bool {
|
||||
if strings.Contains(secondLine, ".") {
|
||||
return false
|
||||
}
|
||||
// For Go <1.17, comma is not recognised as a decimal separator by `time.Parse`.
|
||||
secondLine = strings.ReplaceAll(secondLine, ",", ".")
|
||||
// Second line must be a time range.
|
||||
ts := strings.Split(secondLine, " --> ")
|
||||
if len(ts) != 2 {
|
||||
return false
|
||||
}
|
||||
const layout = "15:04:05.000"
|
||||
const layout = "15:04:05,000"
|
||||
t0, err := time.Parse(layout, ts[0])
|
||||
if err != nil {
|
||||
return false
|
||||
@@ -345,8 +338,9 @@ func Srt(in []byte, _ uint32) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
line, _ = scanLine(raw)
|
||||
// A third line must exist and not be empty. This is the actual subtitle text.
|
||||
return s.Scan() && len(s.Bytes()) != 0
|
||||
return len(line) != 0
|
||||
}
|
||||
|
||||
// Vtt matches a Web Video Text Tracks (WebVTT) file. See
|
||||
@@ -373,3 +367,15 @@ func Vtt(raw []byte, limit uint32) bool {
|
||||
return bytes.Equal(raw, []byte{0xEF, 0xBB, 0xBF, 0x57, 0x45, 0x42, 0x56, 0x54, 0x54}) || // UTF-8 BOM and "WEBVTT"
|
||||
bytes.Equal(raw, []byte{0x57, 0x45, 0x42, 0x56, 0x54, 0x54}) // "WEBVTT"
|
||||
}
|
||||
|
||||
// dropCR drops a terminal \r from the data.
|
||||
func dropCR(data []byte) []byte {
|
||||
if len(data) > 0 && data[len(data)-1] == '\r' {
|
||||
return data[0 : len(data)-1]
|
||||
}
|
||||
return data
|
||||
}
|
||||
func scanLine(b []byte) (line, remainder []byte) {
|
||||
line, remainder, _ = bytes.Cut(b, []byte("\n"))
|
||||
return dropCR(line), remainder
|
||||
}
|
||||
|
||||
22
vendor/github.com/gabriel-vasile/mimetype/internal/magic/text_csv.go
generated
vendored
22
vendor/github.com/gabriel-vasile/mimetype/internal/magic/text_csv.go
generated
vendored
@@ -18,7 +18,7 @@ func Tsv(raw []byte, limit uint32) bool {
|
||||
}
|
||||
|
||||
func sv(in []byte, comma rune, limit uint32) bool {
|
||||
r := csv.NewReader(dropLastLine(in, limit))
|
||||
r := csv.NewReader(bytes.NewReader(dropLastLine(in, limit)))
|
||||
r.Comma = comma
|
||||
r.ReuseRecord = true
|
||||
r.LazyQuotes = true
|
||||
@@ -44,20 +44,14 @@ func sv(in []byte, comma rune, limit uint32) bool {
|
||||
// mimetype limits itself to ReadLimit bytes when performing a detection.
|
||||
// This means, for file formats like CSV for NDJSON, the last line of the input
|
||||
// can be an incomplete line.
|
||||
func dropLastLine(b []byte, cutAt uint32) io.Reader {
|
||||
if cutAt == 0 {
|
||||
return bytes.NewReader(b)
|
||||
func dropLastLine(b []byte, readLimit uint32) []byte {
|
||||
if readLimit == 0 || uint32(len(b)) < readLimit {
|
||||
return b
|
||||
}
|
||||
if uint32(len(b)) >= cutAt {
|
||||
for i := cutAt - 1; i > 0; i-- {
|
||||
if b[i] == '\n' {
|
||||
return bytes.NewReader(b[:i])
|
||||
}
|
||||
for i := len(b) - 1; i > 0; i-- {
|
||||
if b[i] == '\n' {
|
||||
return b[:i]
|
||||
}
|
||||
|
||||
// No newline was found between the 0 index and cutAt.
|
||||
return bytes.NewReader(b[:cutAt])
|
||||
}
|
||||
|
||||
return bytes.NewReader(b)
|
||||
return b
|
||||
}
|
||||
|
||||
8
vendor/github.com/gabriel-vasile/mimetype/mimetype.go
generated
vendored
8
vendor/github.com/gabriel-vasile/mimetype/mimetype.go
generated
vendored
@@ -7,14 +7,15 @@ package mimetype
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
"os"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
var defaultLimit uint32 = 3072
|
||||
|
||||
// readLimit is the maximum number of bytes from the input used when detecting.
|
||||
var readLimit uint32 = 3072
|
||||
var readLimit uint32 = defaultLimit
|
||||
|
||||
// Detect returns the MIME type found from the provided byte slice.
|
||||
//
|
||||
@@ -48,7 +49,7 @@ func DetectReader(r io.Reader) (*MIME, error) {
|
||||
// Using atomic because readLimit can be written at the same time in other goroutine.
|
||||
l := atomic.LoadUint32(&readLimit)
|
||||
if l == 0 {
|
||||
in, err = ioutil.ReadAll(r)
|
||||
in, err = io.ReadAll(r)
|
||||
if err != nil {
|
||||
return errMIME, err
|
||||
}
|
||||
@@ -103,6 +104,7 @@ func EqualsAny(s string, mimes ...string) bool {
|
||||
// SetLimit sets the maximum number of bytes read from input when detecting the MIME type.
|
||||
// Increasing the limit provides better detection for file formats which store
|
||||
// their magical numbers towards the end of the file: docx, pptx, xlsx, etc.
|
||||
// During detection data is read in a single block of size limit, i.e. it is not buffered.
|
||||
// A limit of 0 means the whole input file will be used.
|
||||
func SetLimit(limit uint32) {
|
||||
// Using atomic because readLimit can be read at the same time in other goroutine.
|
||||
|
||||
29
vendor/github.com/getsentry/sentry-go/CHANGELOG.md
generated
vendored
29
vendor/github.com/getsentry/sentry-go/CHANGELOG.md
generated
vendored
@@ -1,5 +1,34 @@
|
||||
# Changelog
|
||||
|
||||
## 0.28.1
|
||||
|
||||
The Sentry SDK team is happy to announce the immediate availability of Sentry Go SDK v0.28.1.
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- Implement `http.ResponseWriter` to hook into various parts of the response process ([#837](https://github.com/getsentry/sentry-go/pull/837))
|
||||
|
||||
## 0.28.0
|
||||
|
||||
The Sentry SDK team is happy to announce the immediate availability of Sentry Go SDK v0.28.0.
|
||||
|
||||
### Features
|
||||
|
||||
- Add a `Fiber` performance tracing & error reporting integration ([#795](https://github.com/getsentry/sentry-go/pull/795))
|
||||
- Add performance tracing to the `Echo` integration ([#722](https://github.com/getsentry/sentry-go/pull/722))
|
||||
- Add performance tracing to the `FastHTTP` integration ([#732](https://github.com/getsentry/sentry-go/pull/723))
|
||||
- Add performance tracing to the `Iris` integration ([#809](https://github.com/getsentry/sentry-go/pull/809))
|
||||
- Add performance tracing to the `Negroni` integration ([#808](https://github.com/getsentry/sentry-go/pull/808))
|
||||
- Add `FailureIssueThreshold` & `RecoveryThreshold` to `MonitorConfig` ([#775](https://github.com/getsentry/sentry-go/pull/775))
|
||||
- Use `errors.Unwrap()` to create exception groups ([#792](https://github.com/getsentry/sentry-go/pull/792))
|
||||
- Add support for matching on strings for `ClientOptions.IgnoreErrors` & `ClientOptions.IgnoreTransactions` ([#819](https://github.com/getsentry/sentry-go/pull/819))
|
||||
- Add `http.request.method` attribute for performance span data ([#786](https://github.com/getsentry/sentry-go/pull/786))
|
||||
- Accept `interface{}` for span data values ([#784](https://github.com/getsentry/sentry-go/pull/784))
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- Fix missing stack trace for parsing error in `logrusentry` ([#689](https://github.com/getsentry/sentry-go/pull/689))
|
||||
|
||||
## 0.27.0
|
||||
|
||||
The Sentry SDK team is happy to announce the immediate availability of Sentry Go SDK v0.27.0.
|
||||
|
||||
2
vendor/github.com/getsentry/sentry-go/README.md
generated
vendored
2
vendor/github.com/getsentry/sentry-go/README.md
generated
vendored
@@ -13,7 +13,6 @@
|
||||
[](https://github.com/getsentry/sentry-go/actions?query=workflow%3Ago-workflow)
|
||||
[](https://goreportcard.com/report/github.com/getsentry/sentry-go)
|
||||
[](https://discord.gg/Ww9hbqr)
|
||||
[](https://godoc.org/github.com/getsentry/sentry-go)
|
||||
[](https://pkg.go.dev/github.com/getsentry/sentry-go)
|
||||
|
||||
`sentry-go` provides a Sentry client implementation for the Go programming
|
||||
@@ -85,7 +84,6 @@ checkout the official documentation:
|
||||
|
||||
- [Bug Tracker](https://github.com/getsentry/sentry-go/issues)
|
||||
- [GitHub Project](https://github.com/getsentry/sentry-go)
|
||||
- [](https://godoc.org/github.com/getsentry/sentry-go)
|
||||
- [](https://pkg.go.dev/github.com/getsentry/sentry-go)
|
||||
- [](https://docs.sentry.io/platforms/go/)
|
||||
- [](https://github.com/getsentry/sentry-go/discussions)
|
||||
|
||||
4
vendor/github.com/getsentry/sentry-go/check_in.go
generated
vendored
4
vendor/github.com/getsentry/sentry-go/check_in.go
generated
vendored
@@ -87,6 +87,10 @@ type MonitorConfig struct { //nolint: maligned // prefer readability over optima
|
||||
// A tz database string representing the timezone which the monitor's execution schedule is in.
|
||||
// See: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
|
||||
Timezone string `json:"timezone,omitempty"`
|
||||
// The number of consecutive failed check-ins it takes before an issue is created.
|
||||
FailureIssueThreshold int64 `json:"failure_issue_threshold,omitempty"`
|
||||
// The number of consecutive OK check-ins it takes before an issue is resolved.
|
||||
RecoveryThreshold int64 `json:"recovery_threshold,omitempty"`
|
||||
}
|
||||
|
||||
type CheckIn struct { //nolint: maligned // prefer readability over optimal memory layout
|
||||
|
||||
3
vendor/github.com/getsentry/sentry-go/dsn.go
generated
vendored
3
vendor/github.com/getsentry/sentry-go/dsn.go
generated
vendored
@@ -90,11 +90,10 @@ func NewDsn(rawURL string) (*Dsn, error) {
|
||||
// Port
|
||||
var port int
|
||||
if parsedURL.Port() != "" {
|
||||
parsedPort, err := strconv.Atoi(parsedURL.Port())
|
||||
port, err = strconv.Atoi(parsedURL.Port())
|
||||
if err != nil {
|
||||
return nil, &DsnParseError{"invalid port"}
|
||||
}
|
||||
port = parsedPort
|
||||
} else {
|
||||
port = scheme.defaultPort()
|
||||
}
|
||||
|
||||
4
vendor/github.com/getsentry/sentry-go/integrations.go
generated
vendored
4
vendor/github.com/getsentry/sentry-go/integrations.go
generated
vendored
@@ -140,7 +140,7 @@ func (iei *ignoreErrorsIntegration) processor(event *Event, _ *EventHint) *Event
|
||||
|
||||
for _, suspect := range suspects {
|
||||
for _, pattern := range iei.ignoreErrors {
|
||||
if pattern.Match([]byte(suspect)) {
|
||||
if pattern.Match([]byte(suspect)) || strings.Contains(suspect, pattern.String()) {
|
||||
Logger.Printf("Event dropped due to being matched by `IgnoreErrors` option."+
|
||||
"| Value matched: %s | Filter used: %s", suspect, pattern)
|
||||
return nil
|
||||
@@ -202,7 +202,7 @@ func (iei *ignoreTransactionsIntegration) processor(event *Event, _ *EventHint)
|
||||
}
|
||||
|
||||
for _, pattern := range iei.ignoreTransactions {
|
||||
if pattern.Match([]byte(suspect)) {
|
||||
if pattern.Match([]byte(suspect)) || strings.Contains(suspect, pattern.String()) {
|
||||
Logger.Printf("Transaction dropped due to being matched by `IgnoreTransactions` option."+
|
||||
"| Value matched: %s | Filter used: %s", suspect, pattern)
|
||||
return nil
|
||||
|
||||
92
vendor/github.com/getsentry/sentry-go/interfaces.go
generated
vendored
92
vendor/github.com/getsentry/sentry-go/interfaces.go
generated
vendored
@@ -3,6 +3,7 @@ package sentry
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
@@ -24,6 +25,9 @@ const profileType = "profile"
|
||||
// checkInType is the type of a check in event.
|
||||
const checkInType = "check_in"
|
||||
|
||||
// metricType is the type of a metric event.
|
||||
const metricType = "statsd"
|
||||
|
||||
// Level marks the severity of the event.
|
||||
type Level string
|
||||
|
||||
@@ -36,15 +40,6 @@ const (
|
||||
LevelFatal Level = "fatal"
|
||||
)
|
||||
|
||||
func getSensitiveHeaders() map[string]bool {
|
||||
return map[string]bool{
|
||||
"Authorization": true,
|
||||
"Cookie": true,
|
||||
"X-Forwarded-For": true,
|
||||
"X-Real-Ip": true,
|
||||
}
|
||||
}
|
||||
|
||||
// SdkInfo contains all metadata about about the SDK being used.
|
||||
type SdkInfo struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
@@ -170,6 +165,13 @@ type Request struct {
|
||||
Env map[string]string `json:"env,omitempty"`
|
||||
}
|
||||
|
||||
var sensitiveHeaders = map[string]struct{}{
|
||||
"Authorization": {},
|
||||
"Cookie": {},
|
||||
"X-Forwarded-For": {},
|
||||
"X-Real-Ip": {},
|
||||
}
|
||||
|
||||
// NewRequest returns a new Sentry Request from the given http.Request.
|
||||
//
|
||||
// NewRequest avoids operations that depend on network access. In particular, it
|
||||
@@ -200,7 +202,6 @@ func NewRequest(r *http.Request) *Request {
|
||||
env = map[string]string{"REMOTE_ADDR": addr, "REMOTE_PORT": port}
|
||||
}
|
||||
} else {
|
||||
sensitiveHeaders := getSensitiveHeaders()
|
||||
for k, v := range r.Header {
|
||||
if _, ok := sensitiveHeaders[k]; !ok {
|
||||
headers[k] = strings.Join(v, ",")
|
||||
@@ -222,11 +223,15 @@ func NewRequest(r *http.Request) *Request {
|
||||
|
||||
// Mechanism is the mechanism by which an exception was generated and handled.
|
||||
type Mechanism struct {
|
||||
Type string `json:"type,omitempty"`
|
||||
Description string `json:"description,omitempty"`
|
||||
HelpLink string `json:"help_link,omitempty"`
|
||||
Handled *bool `json:"handled,omitempty"`
|
||||
Data map[string]interface{} `json:"data,omitempty"`
|
||||
Type string `json:"type,omitempty"`
|
||||
Description string `json:"description,omitempty"`
|
||||
HelpLink string `json:"help_link,omitempty"`
|
||||
Source string `json:"source,omitempty"`
|
||||
Handled *bool `json:"handled,omitempty"`
|
||||
ParentID *int `json:"parent_id,omitempty"`
|
||||
ExceptionID int `json:"exception_id"`
|
||||
IsExceptionGroup bool `json:"is_exception_group,omitempty"`
|
||||
Data map[string]any `json:"data,omitempty"`
|
||||
}
|
||||
|
||||
// SetUnhandled indicates that the exception is an unhandled exception, i.e.
|
||||
@@ -318,6 +323,7 @@ type Event struct {
|
||||
Exception []Exception `json:"exception,omitempty"`
|
||||
DebugMeta *DebugMeta `json:"debug_meta,omitempty"`
|
||||
Attachments []*Attachment `json:"-"`
|
||||
Metrics []Metric `json:"-"`
|
||||
|
||||
// The fields below are only relevant for transactions.
|
||||
|
||||
@@ -339,27 +345,43 @@ type Event struct {
|
||||
// SetException appends the unwrapped errors to the event's exception list.
|
||||
//
|
||||
// maxErrorDepth is the maximum depth of the error chain we will look
|
||||
// into while unwrapping the errors.
|
||||
// into while unwrapping the errors. If maxErrorDepth is -1, we will
|
||||
// unwrap all errors in the chain.
|
||||
func (e *Event) SetException(exception error, maxErrorDepth int) {
|
||||
err := exception
|
||||
if err == nil {
|
||||
if exception == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for i := 0; i < maxErrorDepth && err != nil; i++ {
|
||||
err := exception
|
||||
|
||||
for i := 0; err != nil && (i < maxErrorDepth || maxErrorDepth == -1); i++ {
|
||||
// Add the current error to the exception slice with its details
|
||||
e.Exception = append(e.Exception, Exception{
|
||||
Value: err.Error(),
|
||||
Type: reflect.TypeOf(err).String(),
|
||||
Stacktrace: ExtractStacktrace(err),
|
||||
})
|
||||
switch previous := err.(type) {
|
||||
case interface{ Unwrap() error }:
|
||||
err = previous.Unwrap()
|
||||
case interface{ Cause() error }:
|
||||
err = previous.Cause()
|
||||
default:
|
||||
err = nil
|
||||
|
||||
// Attempt to unwrap the error using the standard library's Unwrap method.
|
||||
// If errors.Unwrap returns nil, it means either there is no error to unwrap,
|
||||
// or the error does not implement the Unwrap method.
|
||||
unwrappedErr := errors.Unwrap(err)
|
||||
|
||||
if unwrappedErr != nil {
|
||||
// The error was successfully unwrapped using the standard library's Unwrap method.
|
||||
err = unwrappedErr
|
||||
continue
|
||||
}
|
||||
|
||||
cause, ok := err.(interface{ Cause() error })
|
||||
if !ok {
|
||||
// We cannot unwrap the error further.
|
||||
break
|
||||
}
|
||||
|
||||
// The error implements the Cause method, indicating it may have been wrapped
|
||||
// using the github.com/pkg/errors package.
|
||||
err = cause.Cause()
|
||||
}
|
||||
|
||||
// Add a trace of the current stack to the most recent error in a chain if
|
||||
@@ -370,8 +392,23 @@ func (e *Event) SetException(exception error, maxErrorDepth int) {
|
||||
e.Exception[0].Stacktrace = NewStacktrace()
|
||||
}
|
||||
|
||||
if len(e.Exception) <= 1 {
|
||||
return
|
||||
}
|
||||
|
||||
// event.Exception should be sorted such that the most recent error is last.
|
||||
reverse(e.Exception)
|
||||
|
||||
for i := range e.Exception {
|
||||
e.Exception[i].Mechanism = &Mechanism{
|
||||
IsExceptionGroup: true,
|
||||
ExceptionID: i,
|
||||
}
|
||||
if i == 0 {
|
||||
continue
|
||||
}
|
||||
e.Exception[i].Mechanism.ParentID = Pointer(i - 1)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Event.Contexts map[string]interface{} => map[string]EventContext,
|
||||
@@ -492,13 +529,12 @@ func (e *Event) checkInMarshalJSON() ([]byte, error) {
|
||||
|
||||
// NewEvent creates a new Event.
|
||||
func NewEvent() *Event {
|
||||
event := Event{
|
||||
return &Event{
|
||||
Contexts: make(map[string]Context),
|
||||
Extra: make(map[string]interface{}),
|
||||
Tags: make(map[string]string),
|
||||
Modules: make(map[string]string),
|
||||
}
|
||||
return &event
|
||||
}
|
||||
|
||||
// Thread specifies threads that were running at the time of an event.
|
||||
|
||||
17
vendor/github.com/getsentry/sentry-go/internal/ratelimit/category.go
generated
vendored
17
vendor/github.com/getsentry/sentry-go/internal/ratelimit/category.go
generated
vendored
@@ -32,15 +32,14 @@ var knownCategories = map[Category]struct{}{
|
||||
|
||||
// String returns the category formatted for debugging.
|
||||
func (c Category) String() string {
|
||||
switch c {
|
||||
case "":
|
||||
if c == "" {
|
||||
return "CategoryAll"
|
||||
default:
|
||||
caser := cases.Title(language.English)
|
||||
rv := "Category"
|
||||
for _, w := range strings.Fields(string(c)) {
|
||||
rv += caser.String(w)
|
||||
}
|
||||
return rv
|
||||
}
|
||||
|
||||
caser := cases.Title(language.English)
|
||||
rv := "Category"
|
||||
for _, w := range strings.Fields(string(c)) {
|
||||
rv += caser.String(w)
|
||||
}
|
||||
return rv
|
||||
}
|
||||
|
||||
427
vendor/github.com/getsentry/sentry-go/metrics.go
generated
vendored
Normal file
427
vendor/github.com/getsentry/sentry-go/metrics.go
generated
vendored
Normal file
@@ -0,0 +1,427 @@
|
||||
package sentry
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"math"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type (
|
||||
NumberOrString interface {
|
||||
int | string
|
||||
}
|
||||
|
||||
void struct{}
|
||||
)
|
||||
|
||||
var (
|
||||
member void
|
||||
keyRegex = regexp.MustCompile(`[^a-zA-Z0-9_/.-]+`)
|
||||
valueRegex = regexp.MustCompile(`[^\w\d\s_:/@\.{}\[\]$-]+`)
|
||||
unitRegex = regexp.MustCompile(`[^a-z]+`)
|
||||
)
|
||||
|
||||
type MetricUnit struct {
|
||||
unit string
|
||||
}
|
||||
|
||||
func (m MetricUnit) toString() string {
|
||||
return m.unit
|
||||
}
|
||||
|
||||
func NanoSecond() MetricUnit {
|
||||
return MetricUnit{
|
||||
"nanosecond",
|
||||
}
|
||||
}
|
||||
|
||||
func MicroSecond() MetricUnit {
|
||||
return MetricUnit{
|
||||
"microsecond",
|
||||
}
|
||||
}
|
||||
|
||||
func MilliSecond() MetricUnit {
|
||||
return MetricUnit{
|
||||
"millisecond",
|
||||
}
|
||||
}
|
||||
|
||||
func Second() MetricUnit {
|
||||
return MetricUnit{
|
||||
"second",
|
||||
}
|
||||
}
|
||||
|
||||
func Minute() MetricUnit {
|
||||
return MetricUnit{
|
||||
"minute",
|
||||
}
|
||||
}
|
||||
|
||||
func Hour() MetricUnit {
|
||||
return MetricUnit{
|
||||
"hour",
|
||||
}
|
||||
}
|
||||
|
||||
func Day() MetricUnit {
|
||||
return MetricUnit{
|
||||
"day",
|
||||
}
|
||||
}
|
||||
|
||||
func Week() MetricUnit {
|
||||
return MetricUnit{
|
||||
"week",
|
||||
}
|
||||
}
|
||||
|
||||
func Bit() MetricUnit {
|
||||
return MetricUnit{
|
||||
"bit",
|
||||
}
|
||||
}
|
||||
|
||||
func Byte() MetricUnit {
|
||||
return MetricUnit{
|
||||
"byte",
|
||||
}
|
||||
}
|
||||
|
||||
func KiloByte() MetricUnit {
|
||||
return MetricUnit{
|
||||
"kilobyte",
|
||||
}
|
||||
}
|
||||
|
||||
func KibiByte() MetricUnit {
|
||||
return MetricUnit{
|
||||
"kibibyte",
|
||||
}
|
||||
}
|
||||
|
||||
func MegaByte() MetricUnit {
|
||||
return MetricUnit{
|
||||
"megabyte",
|
||||
}
|
||||
}
|
||||
|
||||
func MebiByte() MetricUnit {
|
||||
return MetricUnit{
|
||||
"mebibyte",
|
||||
}
|
||||
}
|
||||
|
||||
func GigaByte() MetricUnit {
|
||||
return MetricUnit{
|
||||
"gigabyte",
|
||||
}
|
||||
}
|
||||
|
||||
func GibiByte() MetricUnit {
|
||||
return MetricUnit{
|
||||
"gibibyte",
|
||||
}
|
||||
}
|
||||
|
||||
func TeraByte() MetricUnit {
|
||||
return MetricUnit{
|
||||
"terabyte",
|
||||
}
|
||||
}
|
||||
|
||||
func TebiByte() MetricUnit {
|
||||
return MetricUnit{
|
||||
"tebibyte",
|
||||
}
|
||||
}
|
||||
|
||||
func PetaByte() MetricUnit {
|
||||
return MetricUnit{
|
||||
"petabyte",
|
||||
}
|
||||
}
|
||||
|
||||
func PebiByte() MetricUnit {
|
||||
return MetricUnit{
|
||||
"pebibyte",
|
||||
}
|
||||
}
|
||||
|
||||
func ExaByte() MetricUnit {
|
||||
return MetricUnit{
|
||||
"exabyte",
|
||||
}
|
||||
}
|
||||
|
||||
func ExbiByte() MetricUnit {
|
||||
return MetricUnit{
|
||||
"exbibyte",
|
||||
}
|
||||
}
|
||||
|
||||
func Ratio() MetricUnit {
|
||||
return MetricUnit{
|
||||
"ratio",
|
||||
}
|
||||
}
|
||||
|
||||
func Percent() MetricUnit {
|
||||
return MetricUnit{
|
||||
"percent",
|
||||
}
|
||||
}
|
||||
|
||||
func CustomUnit(unit string) MetricUnit {
|
||||
return MetricUnit{
|
||||
unitRegex.ReplaceAllString(unit, ""),
|
||||
}
|
||||
}
|
||||
|
||||
type Metric interface {
|
||||
GetType() string
|
||||
GetTags() map[string]string
|
||||
GetKey() string
|
||||
GetUnit() string
|
||||
GetTimestamp() int64
|
||||
SerializeValue() string
|
||||
SerializeTags() string
|
||||
}
|
||||
|
||||
type abstractMetric struct {
|
||||
key string
|
||||
unit MetricUnit
|
||||
tags map[string]string
|
||||
// A unix timestamp (full seconds elapsed since 1970-01-01 00:00 UTC).
|
||||
timestamp int64
|
||||
}
|
||||
|
||||
func (am abstractMetric) GetTags() map[string]string {
|
||||
return am.tags
|
||||
}
|
||||
|
||||
func (am abstractMetric) GetKey() string {
|
||||
return am.key
|
||||
}
|
||||
|
||||
func (am abstractMetric) GetUnit() string {
|
||||
return am.unit.toString()
|
||||
}
|
||||
|
||||
func (am abstractMetric) GetTimestamp() int64 {
|
||||
return am.timestamp
|
||||
}
|
||||
|
||||
func (am abstractMetric) SerializeTags() string {
|
||||
var sb strings.Builder
|
||||
|
||||
values := make([]string, 0, len(am.tags))
|
||||
for k := range am.tags {
|
||||
values = append(values, k)
|
||||
}
|
||||
sortSlice(values)
|
||||
|
||||
for _, key := range values {
|
||||
val := sanitizeValue(am.tags[key])
|
||||
key = sanitizeKey(key)
|
||||
sb.WriteString(fmt.Sprintf("%s:%s,", key, val))
|
||||
}
|
||||
s := sb.String()
|
||||
if len(s) > 0 {
|
||||
s = s[:len(s)-1]
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Counter Metric.
|
||||
type CounterMetric struct {
|
||||
value float64
|
||||
abstractMetric
|
||||
}
|
||||
|
||||
func (c *CounterMetric) Add(value float64) {
|
||||
c.value += value
|
||||
}
|
||||
|
||||
func (c CounterMetric) GetType() string {
|
||||
return "c"
|
||||
}
|
||||
|
||||
func (c CounterMetric) SerializeValue() string {
|
||||
return fmt.Sprintf(":%v", c.value)
|
||||
}
|
||||
|
||||
// timestamp: A unix timestamp (full seconds elapsed since 1970-01-01 00:00 UTC).
|
||||
func NewCounterMetric(key string, unit MetricUnit, tags map[string]string, timestamp int64, value float64) CounterMetric {
|
||||
am := abstractMetric{
|
||||
key,
|
||||
unit,
|
||||
tags,
|
||||
timestamp,
|
||||
}
|
||||
|
||||
return CounterMetric{
|
||||
value,
|
||||
am,
|
||||
}
|
||||
}
|
||||
|
||||
// Distribution Metric.
|
||||
type DistributionMetric struct {
|
||||
values []float64
|
||||
abstractMetric
|
||||
}
|
||||
|
||||
func (d *DistributionMetric) Add(value float64) {
|
||||
d.values = append(d.values, value)
|
||||
}
|
||||
|
||||
func (d DistributionMetric) GetType() string {
|
||||
return "d"
|
||||
}
|
||||
|
||||
func (d DistributionMetric) SerializeValue() string {
|
||||
var sb strings.Builder
|
||||
for _, el := range d.values {
|
||||
sb.WriteString(fmt.Sprintf(":%v", el))
|
||||
}
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
// timestamp: A unix timestamp (full seconds elapsed since 1970-01-01 00:00 UTC).
|
||||
func NewDistributionMetric(key string, unit MetricUnit, tags map[string]string, timestamp int64, value float64) DistributionMetric {
|
||||
am := abstractMetric{
|
||||
key,
|
||||
unit,
|
||||
tags,
|
||||
timestamp,
|
||||
}
|
||||
|
||||
return DistributionMetric{
|
||||
[]float64{value},
|
||||
am,
|
||||
}
|
||||
}
|
||||
|
||||
// Gauge Metric.
|
||||
type GaugeMetric struct {
|
||||
last float64
|
||||
min float64
|
||||
max float64
|
||||
sum float64
|
||||
count float64
|
||||
abstractMetric
|
||||
}
|
||||
|
||||
func (g *GaugeMetric) Add(value float64) {
|
||||
g.last = value
|
||||
g.min = math.Min(g.min, value)
|
||||
g.max = math.Max(g.max, value)
|
||||
g.sum += value
|
||||
g.count++
|
||||
}
|
||||
|
||||
func (g GaugeMetric) GetType() string {
|
||||
return "g"
|
||||
}
|
||||
|
||||
func (g GaugeMetric) SerializeValue() string {
|
||||
return fmt.Sprintf(":%v:%v:%v:%v:%v", g.last, g.min, g.max, g.sum, g.count)
|
||||
}
|
||||
|
||||
// timestamp: A unix timestamp (full seconds elapsed since 1970-01-01 00:00 UTC).
|
||||
func NewGaugeMetric(key string, unit MetricUnit, tags map[string]string, timestamp int64, value float64) GaugeMetric {
|
||||
am := abstractMetric{
|
||||
key,
|
||||
unit,
|
||||
tags,
|
||||
timestamp,
|
||||
}
|
||||
|
||||
return GaugeMetric{
|
||||
value, // last
|
||||
value, // min
|
||||
value, // max
|
||||
value, // sum
|
||||
value, // count
|
||||
am,
|
||||
}
|
||||
}
|
||||
|
||||
// Set Metric.
|
||||
type SetMetric[T NumberOrString] struct {
|
||||
values map[T]void
|
||||
abstractMetric
|
||||
}
|
||||
|
||||
func (s *SetMetric[T]) Add(value T) {
|
||||
s.values[value] = member
|
||||
}
|
||||
|
||||
func (s SetMetric[T]) GetType() string {
|
||||
return "s"
|
||||
}
|
||||
|
||||
func (s SetMetric[T]) SerializeValue() string {
|
||||
_hash := func(s string) uint32 {
|
||||
return crc32.ChecksumIEEE([]byte(s))
|
||||
}
|
||||
|
||||
values := make([]T, 0, len(s.values))
|
||||
for k := range s.values {
|
||||
values = append(values, k)
|
||||
}
|
||||
sortSlice(values)
|
||||
|
||||
var sb strings.Builder
|
||||
for _, el := range values {
|
||||
switch any(el).(type) {
|
||||
case int:
|
||||
sb.WriteString(fmt.Sprintf(":%v", el))
|
||||
case string:
|
||||
s := fmt.Sprintf("%v", el)
|
||||
sb.WriteString(fmt.Sprintf(":%d", _hash(s)))
|
||||
}
|
||||
}
|
||||
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
// timestamp: A unix timestamp (full seconds elapsed since 1970-01-01 00:00 UTC).
|
||||
func NewSetMetric[T NumberOrString](key string, unit MetricUnit, tags map[string]string, timestamp int64, value T) SetMetric[T] {
|
||||
am := abstractMetric{
|
||||
key,
|
||||
unit,
|
||||
tags,
|
||||
timestamp,
|
||||
}
|
||||
|
||||
return SetMetric[T]{
|
||||
map[T]void{
|
||||
value: member,
|
||||
},
|
||||
am,
|
||||
}
|
||||
}
|
||||
|
||||
func sanitizeKey(s string) string {
|
||||
return keyRegex.ReplaceAllString(s, "_")
|
||||
}
|
||||
|
||||
func sanitizeValue(s string) string {
|
||||
return valueRegex.ReplaceAllString(s, "")
|
||||
}
|
||||
|
||||
type Ordered interface {
|
||||
~int | ~int8 | ~int16 | ~int32 | ~int64 | ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr | ~float32 | ~float64 | ~string
|
||||
}
|
||||
|
||||
func sortSlice[T Ordered](s []T) {
|
||||
sort.Slice(s, func(i, j int) bool {
|
||||
return s[i] < s[j]
|
||||
})
|
||||
}
|
||||
23
vendor/github.com/getsentry/sentry-go/sentry.go
generated
vendored
23
vendor/github.com/getsentry/sentry-go/sentry.go
generated
vendored
@@ -6,7 +6,7 @@ import (
|
||||
)
|
||||
|
||||
// The version of the SDK.
|
||||
const SDKVersion = "0.27.0"
|
||||
const SDKVersion = "0.28.1"
|
||||
|
||||
// apiVersion is the minimum version of the Sentry API compatible with the
|
||||
// sentry-go SDK.
|
||||
@@ -72,18 +72,17 @@ func Recover() *EventID {
|
||||
|
||||
// RecoverWithContext captures a panic and passes relevant context object.
|
||||
func RecoverWithContext(ctx context.Context) *EventID {
|
||||
if err := recover(); err != nil {
|
||||
var hub *Hub
|
||||
|
||||
if HasHubOnContext(ctx) {
|
||||
hub = GetHubFromContext(ctx)
|
||||
} else {
|
||||
hub = CurrentHub()
|
||||
}
|
||||
|
||||
return hub.RecoverWithContext(ctx, err)
|
||||
err := recover()
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
|
||||
hub := GetHubFromContext(ctx)
|
||||
if hub == nil {
|
||||
hub = CurrentHub()
|
||||
}
|
||||
|
||||
return hub.RecoverWithContext(ctx, err)
|
||||
}
|
||||
|
||||
// WithScope is a shorthand for CurrentHub().WithScope.
|
||||
|
||||
35
vendor/github.com/getsentry/sentry-go/traces_profiler.go
generated
vendored
35
vendor/github.com/getsentry/sentry-go/traces_profiler.go
generated
vendored
@@ -7,8 +7,8 @@ import (
|
||||
|
||||
// Checks whether the transaction should be profiled (according to ProfilesSampleRate)
|
||||
// and starts a profiler if so.
|
||||
func (span *Span) sampleTransactionProfile() {
|
||||
var sampleRate = span.clientOptions().ProfilesSampleRate
|
||||
func (s *Span) sampleTransactionProfile() {
|
||||
var sampleRate = s.clientOptions().ProfilesSampleRate
|
||||
switch {
|
||||
case sampleRate < 0.0 || sampleRate > 1.0:
|
||||
Logger.Printf("Skipping transaction profiling: ProfilesSampleRate out of range [0.0, 1.0]: %f\n", sampleRate)
|
||||
@@ -19,7 +19,7 @@ func (span *Span) sampleTransactionProfile() {
|
||||
if globalProfiler == nil {
|
||||
Logger.Println("Skipping transaction profiling: the profiler couldn't be started")
|
||||
} else {
|
||||
span.collectProfile = collectTransactionProfile
|
||||
s.collectProfile = collectTransactionProfile
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -69,22 +69,27 @@ func (info *profileInfo) UpdateFromEvent(event *Event) {
|
||||
info.Dist = event.Dist
|
||||
info.Transaction.ID = event.EventID
|
||||
|
||||
getStringFromContext := func(context map[string]interface{}, originalValue, key string) string {
|
||||
v, ok := context[key]
|
||||
if !ok {
|
||||
return originalValue
|
||||
}
|
||||
|
||||
if s, ok := v.(string); ok {
|
||||
return s
|
||||
}
|
||||
|
||||
return originalValue
|
||||
}
|
||||
|
||||
if runtimeContext, ok := event.Contexts["runtime"]; ok {
|
||||
if value, ok := runtimeContext["name"]; !ok {
|
||||
info.Runtime.Name = value.(string)
|
||||
}
|
||||
if value, ok := runtimeContext["version"]; !ok {
|
||||
info.Runtime.Version = value.(string)
|
||||
}
|
||||
info.Runtime.Name = getStringFromContext(runtimeContext, info.Runtime.Name, "name")
|
||||
info.Runtime.Version = getStringFromContext(runtimeContext, info.Runtime.Version, "version")
|
||||
}
|
||||
if osContext, ok := event.Contexts["os"]; ok {
|
||||
if value, ok := osContext["name"]; !ok {
|
||||
info.OS.Name = value.(string)
|
||||
}
|
||||
info.OS.Name = getStringFromContext(osContext, info.OS.Name, "name")
|
||||
}
|
||||
if deviceContext, ok := event.Contexts["device"]; ok {
|
||||
if value, ok := deviceContext["arch"]; !ok {
|
||||
info.Device.Architecture = value.(string)
|
||||
}
|
||||
info.Device.Architecture = getStringFromContext(deviceContext, info.Device.Architecture, "arch")
|
||||
}
|
||||
}
|
||||
|
||||
10
vendor/github.com/getsentry/sentry-go/tracing.go
generated
vendored
10
vendor/github.com/getsentry/sentry-go/tracing.go
generated
vendored
@@ -169,11 +169,11 @@ func StartSpan(ctx context.Context, operation string, options ...SpanOption) *Sp
|
||||
|
||||
span.Sampled = span.sample()
|
||||
|
||||
span.recorder = &spanRecorder{}
|
||||
if hasParent {
|
||||
span.recorder = parent.spanRecorder()
|
||||
} else {
|
||||
span.recorder = &spanRecorder{}
|
||||
}
|
||||
|
||||
span.recorder.record(&span)
|
||||
|
||||
hub := hubFromContext(ctx)
|
||||
@@ -226,7 +226,11 @@ func (s *Span) SetTag(name, value string) {
|
||||
// SetData sets a data on the span. It is recommended to use SetData instead of
|
||||
// accessing the data map directly as SetData takes care of initializing the map
|
||||
// when necessary.
|
||||
func (s *Span) SetData(name, value string) {
|
||||
func (s *Span) SetData(name string, value interface{}) {
|
||||
if value == nil {
|
||||
return
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
|
||||
108
vendor/github.com/getsentry/sentry-go/transport.go
generated
vendored
108
vendor/github.com/getsentry/sentry-go/transport.go
generated
vendored
@@ -2,6 +2,7 @@ package sentry
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
@@ -94,6 +95,55 @@ func getRequestBodyFromEvent(event *Event) []byte {
|
||||
return nil
|
||||
}
|
||||
|
||||
func marshalMetrics(metrics []Metric) []byte {
|
||||
var b bytes.Buffer
|
||||
for i, metric := range metrics {
|
||||
b.WriteString(metric.GetKey())
|
||||
if unit := metric.GetUnit(); unit != "" {
|
||||
b.WriteString(fmt.Sprintf("@%s", unit))
|
||||
}
|
||||
b.WriteString(fmt.Sprintf("%s|%s", metric.SerializeValue(), metric.GetType()))
|
||||
if serializedTags := metric.SerializeTags(); serializedTags != "" {
|
||||
b.WriteString(fmt.Sprintf("|#%s", serializedTags))
|
||||
}
|
||||
b.WriteString(fmt.Sprintf("|T%d", metric.GetTimestamp()))
|
||||
|
||||
if i < len(metrics)-1 {
|
||||
b.WriteString("\n")
|
||||
}
|
||||
}
|
||||
return b.Bytes()
|
||||
}
|
||||
|
||||
func encodeMetric(enc *json.Encoder, b io.Writer, metrics []Metric) error {
|
||||
body := marshalMetrics(metrics)
|
||||
// Item header
|
||||
err := enc.Encode(struct {
|
||||
Type string `json:"type"`
|
||||
Length int `json:"length"`
|
||||
}{
|
||||
Type: metricType,
|
||||
Length: len(body),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// metric payload
|
||||
if _, err = b.Write(body); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// "Envelopes should be terminated with a trailing newline."
|
||||
//
|
||||
// [1]: https://develop.sentry.dev/sdk/envelopes/#envelopes
|
||||
if _, err := b.Write([]byte("\n")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func encodeAttachment(enc *json.Encoder, b io.Writer, attachment *Attachment) error {
|
||||
// Attachment header
|
||||
err := enc.Encode(struct {
|
||||
@@ -175,11 +225,15 @@ func envelopeFromBody(event *Event, dsn *Dsn, sentAt time.Time, body json.RawMes
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if event.Type == transactionType || event.Type == checkInType {
|
||||
switch event.Type {
|
||||
case transactionType, checkInType:
|
||||
err = encodeEnvelopeItem(enc, event.Type, body)
|
||||
} else {
|
||||
case metricType:
|
||||
err = encodeMetric(enc, &b, event.Metrics)
|
||||
default:
|
||||
err = encodeEnvelopeItem(enc, eventType, body)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -206,7 +260,7 @@ func envelopeFromBody(event *Event, dsn *Dsn, sentAt time.Time, body json.RawMes
|
||||
return &b, nil
|
||||
}
|
||||
|
||||
func getRequestFromEvent(event *Event, dsn *Dsn) (r *http.Request, err error) {
|
||||
func getRequestFromEvent(ctx context.Context, event *Event, dsn *Dsn) (r *http.Request, err error) {
|
||||
defer func() {
|
||||
if r != nil {
|
||||
r.Header.Set("User-Agent", fmt.Sprintf("%s/%s", event.Sdk.Name, event.Sdk.Version))
|
||||
@@ -233,7 +287,13 @@ func getRequestFromEvent(event *Event, dsn *Dsn) (r *http.Request, err error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return http.NewRequest(
|
||||
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
|
||||
return http.NewRequestWithContext(
|
||||
ctx,
|
||||
http.MethodPost,
|
||||
dsn.GetAPIURL().String(),
|
||||
envelope,
|
||||
@@ -344,8 +404,13 @@ func (t *HTTPTransport) Configure(options ClientOptions) {
|
||||
})
|
||||
}
|
||||
|
||||
// SendEvent assembles a new packet out of Event and sends it to remote server.
|
||||
// SendEvent assembles a new packet out of Event and sends it to the remote server.
|
||||
func (t *HTTPTransport) SendEvent(event *Event) {
|
||||
t.SendEventWithContext(context.Background(), event)
|
||||
}
|
||||
|
||||
// SendEventWithContext assembles a new packet out of Event and sends it to the remote server.
|
||||
func (t *HTTPTransport) SendEventWithContext(ctx context.Context, event *Event) {
|
||||
if t.dsn == nil {
|
||||
return
|
||||
}
|
||||
@@ -356,7 +421,7 @@ func (t *HTTPTransport) SendEvent(event *Event) {
|
||||
return
|
||||
}
|
||||
|
||||
request, err := getRequestFromEvent(event, t.dsn)
|
||||
request, err := getRequestFromEvent(ctx, event, t.dsn)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -478,6 +543,13 @@ func (t *HTTPTransport) worker() {
|
||||
Logger.Printf("There was an issue with sending an event: %v", err)
|
||||
continue
|
||||
}
|
||||
if response.StatusCode >= 400 && response.StatusCode <= 599 {
|
||||
b, err := io.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
Logger.Printf("Error while reading response code: %v", err)
|
||||
}
|
||||
Logger.Printf("Sending %s failed with the following error: %s", eventType, string(b))
|
||||
}
|
||||
t.mu.Lock()
|
||||
t.limits.Merge(ratelimit.FromResponse(response))
|
||||
t.mu.Unlock()
|
||||
@@ -567,8 +639,13 @@ func (t *HTTPSyncTransport) Configure(options ClientOptions) {
|
||||
}
|
||||
}
|
||||
|
||||
// SendEvent assembles a new packet out of Event and sends it to remote server.
|
||||
// SendEvent assembles a new packet out of Event and sends it to the remote server.
|
||||
func (t *HTTPSyncTransport) SendEvent(event *Event) {
|
||||
t.SendEventWithContext(context.Background(), event)
|
||||
}
|
||||
|
||||
// SendEventWithContext assembles a new packet out of Event and sends it to the remote server.
|
||||
func (t *HTTPSyncTransport) SendEventWithContext(ctx context.Context, event *Event) {
|
||||
if t.dsn == nil {
|
||||
return
|
||||
}
|
||||
@@ -577,15 +654,18 @@ func (t *HTTPSyncTransport) SendEvent(event *Event) {
|
||||
return
|
||||
}
|
||||
|
||||
request, err := getRequestFromEvent(event, t.dsn)
|
||||
request, err := getRequestFromEvent(ctx, event, t.dsn)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var eventType string
|
||||
if event.Type == transactionType {
|
||||
switch {
|
||||
case event.Type == transactionType:
|
||||
eventType = "transaction"
|
||||
} else {
|
||||
case event.Type == metricType:
|
||||
eventType = metricType
|
||||
default:
|
||||
eventType = fmt.Sprintf("%s event", event.Level)
|
||||
}
|
||||
Logger.Printf(
|
||||
@@ -601,6 +681,14 @@ func (t *HTTPSyncTransport) SendEvent(event *Event) {
|
||||
Logger.Printf("There was an issue with sending an event: %v", err)
|
||||
return
|
||||
}
|
||||
if response.StatusCode >= 400 && response.StatusCode <= 599 {
|
||||
b, err := io.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
Logger.Printf("Error while reading response code: %v", err)
|
||||
}
|
||||
Logger.Printf("Sending %s failed with the following error: %s", eventType, string(b))
|
||||
}
|
||||
|
||||
t.mu.Lock()
|
||||
t.limits.Merge(ratelimit.FromResponse(response))
|
||||
t.mu.Unlock()
|
||||
|
||||
4
vendor/github.com/getsentry/sentry-go/util.go
generated
vendored
4
vendor/github.com/getsentry/sentry-go/util.go
generated
vendored
@@ -112,3 +112,7 @@ func revisionFromBuildInfo(info *debug.BuildInfo) string {
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func Pointer[T any](v T) *T {
|
||||
return &v
|
||||
}
|
||||
|
||||
18
vendor/github.com/rs/zerolog/README.md
generated
vendored
18
vendor/github.com/rs/zerolog/README.md
generated
vendored
@@ -60,7 +60,7 @@ func main() {
|
||||
// Output: {"time":1516134303,"level":"debug","message":"hello world"}
|
||||
```
|
||||
> Note: By default log writes to `os.Stderr`
|
||||
> Note: The default log level for `log.Print` is *debug*
|
||||
> Note: The default log level for `log.Print` is *trace*
|
||||
|
||||
### Contextual Logging
|
||||
|
||||
@@ -412,15 +412,7 @@ Equivalent of `Lshortfile`:
|
||||
|
||||
```go
|
||||
zerolog.CallerMarshalFunc = func(pc uintptr, file string, line int) string {
|
||||
short := file
|
||||
for i := len(file) - 1; i > 0; i-- {
|
||||
if file[i] == '/' {
|
||||
short = file[i+1:]
|
||||
break
|
||||
}
|
||||
}
|
||||
file = short
|
||||
return file + ":" + strconv.Itoa(line)
|
||||
return filepath.Base(file) + ":" + strconv.Itoa(line)
|
||||
}
|
||||
log.Logger = log.With().Caller().Logger()
|
||||
log.Info().Msg("hello world")
|
||||
@@ -646,10 +638,14 @@ Some settings can be changed and will be applied to all loggers:
|
||||
* `zerolog.LevelFieldName`: Can be set to customize level field name.
|
||||
* `zerolog.MessageFieldName`: Can be set to customize message field name.
|
||||
* `zerolog.ErrorFieldName`: Can be set to customize `Err` field name.
|
||||
* `zerolog.TimeFieldFormat`: Can be set to customize `Time` field value formatting. If set with `zerolog.TimeFormatUnix`, `zerolog.TimeFormatUnixMs` or `zerolog.TimeFormatUnixMicro`, times are formated as UNIX timestamp.
|
||||
* `zerolog.TimeFieldFormat`: Can be set to customize `Time` field value formatting. If set with `zerolog.TimeFormatUnix`, `zerolog.TimeFormatUnixMs` or `zerolog.TimeFormatUnixMicro`, times are formatted as UNIX timestamp.
|
||||
* `zerolog.DurationFieldUnit`: Can be set to customize the unit for time.Duration type fields added by `Dur` (default: `time.Millisecond`).
|
||||
* `zerolog.DurationFieldInteger`: If set to `true`, `Dur` fields are formatted as integers instead of floats (default: `false`).
|
||||
* `zerolog.ErrorHandler`: Called whenever zerolog fails to write an event on its output. If not set, an error is printed on the stderr. This handler must be thread safe and non-blocking.
|
||||
* `zerolog.FloatingPointPrecision`: If set to a value other than -1, controls the number
|
||||
of digits when formatting float numbers in JSON. See
|
||||
[strconv.FormatFloat](https://pkg.go.dev/strconv#FormatFloat)
|
||||
for more details.
|
||||
|
||||
## Field Types
|
||||
|
||||
|
||||
6
vendor/github.com/rs/zerolog/array.go
generated
vendored
6
vendor/github.com/rs/zerolog/array.go
generated
vendored
@@ -183,13 +183,13 @@ func (a *Array) Uint64(i uint64) *Array {
|
||||
|
||||
// Float32 appends f as a float32 to the array.
|
||||
func (a *Array) Float32(f float32) *Array {
|
||||
a.buf = enc.AppendFloat32(enc.AppendArrayDelim(a.buf), f)
|
||||
a.buf = enc.AppendFloat32(enc.AppendArrayDelim(a.buf), f, FloatingPointPrecision)
|
||||
return a
|
||||
}
|
||||
|
||||
// Float64 appends f as a float64 to the array.
|
||||
func (a *Array) Float64(f float64) *Array {
|
||||
a.buf = enc.AppendFloat64(enc.AppendArrayDelim(a.buf), f)
|
||||
a.buf = enc.AppendFloat64(enc.AppendArrayDelim(a.buf), f, FloatingPointPrecision)
|
||||
return a
|
||||
}
|
||||
|
||||
@@ -201,7 +201,7 @@ func (a *Array) Time(t time.Time) *Array {
|
||||
|
||||
// Dur appends d to the array.
|
||||
func (a *Array) Dur(d time.Duration) *Array {
|
||||
a.buf = enc.AppendDuration(enc.AppendArrayDelim(a.buf), d, DurationFieldUnit, DurationFieldInteger)
|
||||
a.buf = enc.AppendDuration(enc.AppendArrayDelim(a.buf), d, DurationFieldUnit, DurationFieldInteger, FloatingPointPrecision)
|
||||
return a
|
||||
}
|
||||
|
||||
|
||||
91
vendor/github.com/rs/zerolog/console.go
generated
vendored
91
vendor/github.com/rs/zerolog/console.go
generated
vendored
@@ -28,6 +28,8 @@ const (
|
||||
|
||||
colorBold = 1
|
||||
colorDarkGray = 90
|
||||
|
||||
unknownLevel = "???"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -57,12 +59,21 @@ type ConsoleWriter struct {
|
||||
// TimeFormat specifies the format for timestamp in output.
|
||||
TimeFormat string
|
||||
|
||||
// TimeLocation tells ConsoleWriter’s default FormatTimestamp
|
||||
// how to localize the time.
|
||||
TimeLocation *time.Location
|
||||
|
||||
// PartsOrder defines the order of parts in output.
|
||||
PartsOrder []string
|
||||
|
||||
// PartsExclude defines parts to not display in output.
|
||||
PartsExclude []string
|
||||
|
||||
// FieldsOrder defines the order of contextual fields in output.
|
||||
FieldsOrder []string
|
||||
|
||||
fieldIsOrdered map[string]int
|
||||
|
||||
// FieldsExclude defines contextual fields to not display in output.
|
||||
FieldsExclude []string
|
||||
|
||||
@@ -83,9 +94,9 @@ type ConsoleWriter struct {
|
||||
// NewConsoleWriter creates and initializes a new ConsoleWriter.
|
||||
func NewConsoleWriter(options ...func(w *ConsoleWriter)) ConsoleWriter {
|
||||
w := ConsoleWriter{
|
||||
Out: os.Stdout,
|
||||
TimeFormat: consoleDefaultTimeFormat,
|
||||
PartsOrder: consoleDefaultPartsOrder(),
|
||||
Out: os.Stdout,
|
||||
TimeFormat: consoleDefaultTimeFormat,
|
||||
PartsOrder: consoleDefaultPartsOrder(),
|
||||
}
|
||||
|
||||
for _, opt := range options {
|
||||
@@ -185,7 +196,12 @@ func (w ConsoleWriter) writeFields(evt map[string]interface{}, buf *bytes.Buffer
|
||||
}
|
||||
fields = append(fields, field)
|
||||
}
|
||||
sort.Strings(fields)
|
||||
|
||||
if len(w.FieldsOrder) > 0 {
|
||||
w.orderFields(fields)
|
||||
} else {
|
||||
sort.Strings(fields)
|
||||
}
|
||||
|
||||
// Write space only if something has already been written to the buffer, and if there are fields.
|
||||
if buf.Len() > 0 && len(fields) > 0 {
|
||||
@@ -284,7 +300,7 @@ func (w ConsoleWriter) writePart(buf *bytes.Buffer, evt map[string]interface{},
|
||||
}
|
||||
case TimestampFieldName:
|
||||
if w.FormatTimestamp == nil {
|
||||
f = consoleDefaultFormatTimestamp(w.TimeFormat, w.NoColor)
|
||||
f = consoleDefaultFormatTimestamp(w.TimeFormat, w.TimeLocation, w.NoColor)
|
||||
} else {
|
||||
f = w.FormatTimestamp
|
||||
}
|
||||
@@ -318,6 +334,32 @@ func (w ConsoleWriter) writePart(buf *bytes.Buffer, evt map[string]interface{},
|
||||
}
|
||||
}
|
||||
|
||||
// orderFields takes an array of field names and an array representing field order
|
||||
// and returns an array with any ordered fields at the beginning, in order,
|
||||
// and the remaining fields after in their original order.
|
||||
func (w ConsoleWriter) orderFields(fields []string) {
|
||||
if w.fieldIsOrdered == nil {
|
||||
w.fieldIsOrdered = make(map[string]int)
|
||||
for i, fieldName := range w.FieldsOrder {
|
||||
w.fieldIsOrdered[fieldName] = i
|
||||
}
|
||||
}
|
||||
sort.Slice(fields, func(i, j int) bool {
|
||||
ii, iOrdered := w.fieldIsOrdered[fields[i]]
|
||||
jj, jOrdered := w.fieldIsOrdered[fields[j]]
|
||||
if iOrdered && jOrdered {
|
||||
return ii < jj
|
||||
}
|
||||
if iOrdered {
|
||||
return true
|
||||
}
|
||||
if jOrdered {
|
||||
return false
|
||||
}
|
||||
return fields[i] < fields[j]
|
||||
})
|
||||
}
|
||||
|
||||
// needsQuote returns true when the string s should be quoted in output.
|
||||
func needsQuote(s string) bool {
|
||||
for i := range s {
|
||||
@@ -352,19 +394,23 @@ func consoleDefaultPartsOrder() []string {
|
||||
}
|
||||
}
|
||||
|
||||
func consoleDefaultFormatTimestamp(timeFormat string, noColor bool) Formatter {
|
||||
func consoleDefaultFormatTimestamp(timeFormat string, location *time.Location, noColor bool) Formatter {
|
||||
if timeFormat == "" {
|
||||
timeFormat = consoleDefaultTimeFormat
|
||||
}
|
||||
if location == nil {
|
||||
location = time.Local
|
||||
}
|
||||
|
||||
return func(i interface{}) string {
|
||||
t := "<nil>"
|
||||
switch tt := i.(type) {
|
||||
case string:
|
||||
ts, err := time.ParseInLocation(TimeFieldFormat, tt, time.Local)
|
||||
ts, err := time.ParseInLocation(TimeFieldFormat, tt, location)
|
||||
if err != nil {
|
||||
t = tt
|
||||
} else {
|
||||
t = ts.Local().Format(timeFormat)
|
||||
t = ts.In(location).Format(timeFormat)
|
||||
}
|
||||
case json.Number:
|
||||
i, err := tt.Int64()
|
||||
@@ -385,32 +431,37 @@ func consoleDefaultFormatTimestamp(timeFormat string, noColor bool) Formatter {
|
||||
}
|
||||
|
||||
ts := time.Unix(sec, nsec)
|
||||
t = ts.Format(timeFormat)
|
||||
t = ts.In(location).Format(timeFormat)
|
||||
}
|
||||
}
|
||||
return colorize(t, colorDarkGray, noColor)
|
||||
}
|
||||
}
|
||||
|
||||
func stripLevel(ll string) string {
|
||||
if len(ll) == 0 {
|
||||
return unknownLevel
|
||||
}
|
||||
if len(ll) > 3 {
|
||||
ll = ll[:3]
|
||||
}
|
||||
return strings.ToUpper(ll)
|
||||
}
|
||||
|
||||
func consoleDefaultFormatLevel(noColor bool) Formatter {
|
||||
return func(i interface{}) string {
|
||||
var l string
|
||||
if ll, ok := i.(string); ok {
|
||||
level, _ := ParseLevel(ll)
|
||||
fl, ok := FormattedLevels[level]
|
||||
if ok {
|
||||
l = colorize(fl, LevelColors[level], noColor)
|
||||
} else {
|
||||
l = strings.ToUpper(ll)[0:3]
|
||||
}
|
||||
} else {
|
||||
if i == nil {
|
||||
l = "???"
|
||||
} else {
|
||||
l = strings.ToUpper(fmt.Sprintf("%s", i))[0:3]
|
||||
return colorize(fl, LevelColors[level], noColor)
|
||||
}
|
||||
return stripLevel(ll)
|
||||
}
|
||||
return l
|
||||
if i == nil {
|
||||
return unknownLevel
|
||||
}
|
||||
return stripLevel(fmt.Sprintf("%s", i))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
22
vendor/github.com/rs/zerolog/context.go
generated
vendored
22
vendor/github.com/rs/zerolog/context.go
generated
vendored
@@ -325,25 +325,25 @@ func (c Context) Uints64(key string, i []uint64) Context {
|
||||
|
||||
// Float32 adds the field key with f as a float32 to the logger context.
|
||||
func (c Context) Float32(key string, f float32) Context {
|
||||
c.l.context = enc.AppendFloat32(enc.AppendKey(c.l.context, key), f)
|
||||
c.l.context = enc.AppendFloat32(enc.AppendKey(c.l.context, key), f, FloatingPointPrecision)
|
||||
return c
|
||||
}
|
||||
|
||||
// Floats32 adds the field key with f as a []float32 to the logger context.
|
||||
func (c Context) Floats32(key string, f []float32) Context {
|
||||
c.l.context = enc.AppendFloats32(enc.AppendKey(c.l.context, key), f)
|
||||
c.l.context = enc.AppendFloats32(enc.AppendKey(c.l.context, key), f, FloatingPointPrecision)
|
||||
return c
|
||||
}
|
||||
|
||||
// Float64 adds the field key with f as a float64 to the logger context.
|
||||
func (c Context) Float64(key string, f float64) Context {
|
||||
c.l.context = enc.AppendFloat64(enc.AppendKey(c.l.context, key), f)
|
||||
c.l.context = enc.AppendFloat64(enc.AppendKey(c.l.context, key), f, FloatingPointPrecision)
|
||||
return c
|
||||
}
|
||||
|
||||
// Floats64 adds the field key with f as a []float64 to the logger context.
|
||||
func (c Context) Floats64(key string, f []float64) Context {
|
||||
c.l.context = enc.AppendFloats64(enc.AppendKey(c.l.context, key), f)
|
||||
c.l.context = enc.AppendFloats64(enc.AppendKey(c.l.context, key), f, FloatingPointPrecision)
|
||||
return c
|
||||
}
|
||||
|
||||
@@ -365,13 +365,13 @@ func (c Context) Timestamp() Context {
|
||||
return c
|
||||
}
|
||||
|
||||
// Time adds the field key with t formated as string using zerolog.TimeFieldFormat.
|
||||
// Time adds the field key with t formatted as string using zerolog.TimeFieldFormat.
|
||||
func (c Context) Time(key string, t time.Time) Context {
|
||||
c.l.context = enc.AppendTime(enc.AppendKey(c.l.context, key), t, TimeFieldFormat)
|
||||
return c
|
||||
}
|
||||
|
||||
// Times adds the field key with t formated as string using zerolog.TimeFieldFormat.
|
||||
// Times adds the field key with t formatted as string using zerolog.TimeFieldFormat.
|
||||
func (c Context) Times(key string, t []time.Time) Context {
|
||||
c.l.context = enc.AppendTimes(enc.AppendKey(c.l.context, key), t, TimeFieldFormat)
|
||||
return c
|
||||
@@ -379,13 +379,13 @@ func (c Context) Times(key string, t []time.Time) Context {
|
||||
|
||||
// Dur adds the fields key with d divided by unit and stored as a float.
|
||||
func (c Context) Dur(key string, d time.Duration) Context {
|
||||
c.l.context = enc.AppendDuration(enc.AppendKey(c.l.context, key), d, DurationFieldUnit, DurationFieldInteger)
|
||||
c.l.context = enc.AppendDuration(enc.AppendKey(c.l.context, key), d, DurationFieldUnit, DurationFieldInteger, FloatingPointPrecision)
|
||||
return c
|
||||
}
|
||||
|
||||
// Durs adds the fields key with d divided by unit and stored as a float.
|
||||
func (c Context) Durs(key string, d []time.Duration) Context {
|
||||
c.l.context = enc.AppendDurations(enc.AppendKey(c.l.context, key), d, DurationFieldUnit, DurationFieldInteger)
|
||||
c.l.context = enc.AppendDurations(enc.AppendKey(c.l.context, key), d, DurationFieldUnit, DurationFieldInteger, FloatingPointPrecision)
|
||||
return c
|
||||
}
|
||||
|
||||
@@ -409,6 +409,12 @@ func (c Context) Any(key string, i interface{}) Context {
|
||||
return c.Interface(key, i)
|
||||
}
|
||||
|
||||
// Reset removes all the context fields.
|
||||
func (c Context) Reset() Context {
|
||||
c.l.context = enc.AppendBeginMarker(make([]byte, 0, 500))
|
||||
return c
|
||||
}
|
||||
|
||||
type callerHook struct {
|
||||
callerSkipFrameCount int
|
||||
}
|
||||
|
||||
12
vendor/github.com/rs/zerolog/encoder.go
generated
vendored
12
vendor/github.com/rs/zerolog/encoder.go
generated
vendored
@@ -13,13 +13,13 @@ type encoder interface {
|
||||
AppendBool(dst []byte, val bool) []byte
|
||||
AppendBools(dst []byte, vals []bool) []byte
|
||||
AppendBytes(dst, s []byte) []byte
|
||||
AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool) []byte
|
||||
AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool) []byte
|
||||
AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool, precision int) []byte
|
||||
AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool, precision int) []byte
|
||||
AppendEndMarker(dst []byte) []byte
|
||||
AppendFloat32(dst []byte, val float32) []byte
|
||||
AppendFloat64(dst []byte, val float64) []byte
|
||||
AppendFloats32(dst []byte, vals []float32) []byte
|
||||
AppendFloats64(dst []byte, vals []float64) []byte
|
||||
AppendFloat32(dst []byte, val float32, precision int) []byte
|
||||
AppendFloat64(dst []byte, val float64, precision int) []byte
|
||||
AppendFloats32(dst []byte, vals []float32, precision int) []byte
|
||||
AppendFloats64(dst []byte, vals []float64, precision int) []byte
|
||||
AppendHex(dst, s []byte) []byte
|
||||
AppendIPAddr(dst []byte, ip net.IP) []byte
|
||||
AppendIPPrefix(dst []byte, pfx net.IPNet) []byte
|
||||
|
||||
14
vendor/github.com/rs/zerolog/event.go
generated
vendored
14
vendor/github.com/rs/zerolog/event.go
generated
vendored
@@ -644,7 +644,7 @@ func (e *Event) Float32(key string, f float32) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
e.buf = enc.AppendFloat32(enc.AppendKey(e.buf, key), f)
|
||||
e.buf = enc.AppendFloat32(enc.AppendKey(e.buf, key), f, FloatingPointPrecision)
|
||||
return e
|
||||
}
|
||||
|
||||
@@ -653,7 +653,7 @@ func (e *Event) Floats32(key string, f []float32) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
e.buf = enc.AppendFloats32(enc.AppendKey(e.buf, key), f)
|
||||
e.buf = enc.AppendFloats32(enc.AppendKey(e.buf, key), f, FloatingPointPrecision)
|
||||
return e
|
||||
}
|
||||
|
||||
@@ -662,7 +662,7 @@ func (e *Event) Float64(key string, f float64) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
e.buf = enc.AppendFloat64(enc.AppendKey(e.buf, key), f)
|
||||
e.buf = enc.AppendFloat64(enc.AppendKey(e.buf, key), f, FloatingPointPrecision)
|
||||
return e
|
||||
}
|
||||
|
||||
@@ -671,7 +671,7 @@ func (e *Event) Floats64(key string, f []float64) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
e.buf = enc.AppendFloats64(enc.AppendKey(e.buf, key), f)
|
||||
e.buf = enc.AppendFloats64(enc.AppendKey(e.buf, key), f, FloatingPointPrecision)
|
||||
return e
|
||||
}
|
||||
|
||||
@@ -713,7 +713,7 @@ func (e *Event) Dur(key string, d time.Duration) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
e.buf = enc.AppendDuration(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger)
|
||||
e.buf = enc.AppendDuration(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger, FloatingPointPrecision)
|
||||
return e
|
||||
}
|
||||
|
||||
@@ -724,7 +724,7 @@ func (e *Event) Durs(key string, d []time.Duration) *Event {
|
||||
if e == nil {
|
||||
return e
|
||||
}
|
||||
e.buf = enc.AppendDurations(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger)
|
||||
e.buf = enc.AppendDurations(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger, FloatingPointPrecision)
|
||||
return e
|
||||
}
|
||||
|
||||
@@ -739,7 +739,7 @@ func (e *Event) TimeDiff(key string, t time.Time, start time.Time) *Event {
|
||||
if t.After(start) {
|
||||
d = t.Sub(start)
|
||||
}
|
||||
e.buf = enc.AppendDuration(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger)
|
||||
e.buf = enc.AppendDuration(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger, FloatingPointPrecision)
|
||||
return e
|
||||
}
|
||||
|
||||
|
||||
18
vendor/github.com/rs/zerolog/fields.go
generated
vendored
18
vendor/github.com/rs/zerolog/fields.go
generated
vendored
@@ -139,13 +139,13 @@ func appendFieldList(dst []byte, kvList []interface{}, stack bool) []byte {
|
||||
case uint64:
|
||||
dst = enc.AppendUint64(dst, val)
|
||||
case float32:
|
||||
dst = enc.AppendFloat32(dst, val)
|
||||
dst = enc.AppendFloat32(dst, val, FloatingPointPrecision)
|
||||
case float64:
|
||||
dst = enc.AppendFloat64(dst, val)
|
||||
dst = enc.AppendFloat64(dst, val, FloatingPointPrecision)
|
||||
case time.Time:
|
||||
dst = enc.AppendTime(dst, val, TimeFieldFormat)
|
||||
case time.Duration:
|
||||
dst = enc.AppendDuration(dst, val, DurationFieldUnit, DurationFieldInteger)
|
||||
dst = enc.AppendDuration(dst, val, DurationFieldUnit, DurationFieldInteger, FloatingPointPrecision)
|
||||
case *string:
|
||||
if val != nil {
|
||||
dst = enc.AppendString(dst, *val)
|
||||
@@ -220,13 +220,13 @@ func appendFieldList(dst []byte, kvList []interface{}, stack bool) []byte {
|
||||
}
|
||||
case *float32:
|
||||
if val != nil {
|
||||
dst = enc.AppendFloat32(dst, *val)
|
||||
dst = enc.AppendFloat32(dst, *val, FloatingPointPrecision)
|
||||
} else {
|
||||
dst = enc.AppendNil(dst)
|
||||
}
|
||||
case *float64:
|
||||
if val != nil {
|
||||
dst = enc.AppendFloat64(dst, *val)
|
||||
dst = enc.AppendFloat64(dst, *val, FloatingPointPrecision)
|
||||
} else {
|
||||
dst = enc.AppendNil(dst)
|
||||
}
|
||||
@@ -238,7 +238,7 @@ func appendFieldList(dst []byte, kvList []interface{}, stack bool) []byte {
|
||||
}
|
||||
case *time.Duration:
|
||||
if val != nil {
|
||||
dst = enc.AppendDuration(dst, *val, DurationFieldUnit, DurationFieldInteger)
|
||||
dst = enc.AppendDuration(dst, *val, DurationFieldUnit, DurationFieldInteger, FloatingPointPrecision)
|
||||
} else {
|
||||
dst = enc.AppendNil(dst)
|
||||
}
|
||||
@@ -267,13 +267,13 @@ func appendFieldList(dst []byte, kvList []interface{}, stack bool) []byte {
|
||||
case []uint64:
|
||||
dst = enc.AppendUints64(dst, val)
|
||||
case []float32:
|
||||
dst = enc.AppendFloats32(dst, val)
|
||||
dst = enc.AppendFloats32(dst, val, FloatingPointPrecision)
|
||||
case []float64:
|
||||
dst = enc.AppendFloats64(dst, val)
|
||||
dst = enc.AppendFloats64(dst, val, FloatingPointPrecision)
|
||||
case []time.Time:
|
||||
dst = enc.AppendTimes(dst, val, TimeFieldFormat)
|
||||
case []time.Duration:
|
||||
dst = enc.AppendDurations(dst, val, DurationFieldUnit, DurationFieldInteger)
|
||||
dst = enc.AppendDurations(dst, val, DurationFieldUnit, DurationFieldInteger, FloatingPointPrecision)
|
||||
case nil:
|
||||
dst = enc.AppendNil(dst)
|
||||
case net.IP:
|
||||
|
||||
24
vendor/github.com/rs/zerolog/globals.go
generated
vendored
24
vendor/github.com/rs/zerolog/globals.go
generated
vendored
@@ -1,6 +1,7 @@
|
||||
package zerolog
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"strconv"
|
||||
"sync/atomic"
|
||||
@@ -81,8 +82,22 @@ var (
|
||||
}
|
||||
|
||||
// InterfaceMarshalFunc allows customization of interface marshaling.
|
||||
// Default: "encoding/json.Marshal"
|
||||
InterfaceMarshalFunc = json.Marshal
|
||||
// Default: "encoding/json.Marshal" with disabled HTML escaping
|
||||
InterfaceMarshalFunc = func(v interface{}) ([]byte, error) {
|
||||
var buf bytes.Buffer
|
||||
encoder := json.NewEncoder(&buf)
|
||||
encoder.SetEscapeHTML(false)
|
||||
err := encoder.Encode(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b := buf.Bytes()
|
||||
if len(b) > 0 {
|
||||
// Remove trailing \n which is added by Encode.
|
||||
return b[:len(b)-1], nil
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// TimeFieldFormat defines the time format of the Time field type. If set to
|
||||
// TimeFormatUnix, TimeFormatUnixMs, TimeFormatUnixMicro or TimeFormatUnixNano, the time is formatted as a UNIX
|
||||
@@ -136,6 +151,11 @@ var (
|
||||
// TriggerLevelWriterBufferReuseLimit is a limit in bytes that a buffer is dropped
|
||||
// from the TriggerLevelWriter buffer pool if the buffer grows above the limit.
|
||||
TriggerLevelWriterBufferReuseLimit = 64 * 1024
|
||||
|
||||
// FloatingPointPrecision, if set to a value other than -1, controls the number
|
||||
// of digits when formatting float numbers in JSON. See strconv.FormatFloat for
|
||||
// more details.
|
||||
FloatingPointPrecision = -1
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
2
vendor/github.com/rs/zerolog/internal/cbor/decode_stream.go
generated
vendored
2
vendor/github.com/rs/zerolog/internal/cbor/decode_stream.go
generated
vendored
@@ -95,7 +95,7 @@ func decodeFloat(src *bufio.Reader) (float64, int) {
|
||||
|
||||
switch minor {
|
||||
case additionalTypeFloat16:
|
||||
panic(fmt.Errorf("float16 is not suppported in decodeFloat"))
|
||||
panic(fmt.Errorf("float16 is not supported in decodeFloat"))
|
||||
|
||||
case additionalTypeFloat32:
|
||||
pb := readNBytes(src, 4)
|
||||
|
||||
10
vendor/github.com/rs/zerolog/internal/cbor/time.go
generated
vendored
10
vendor/github.com/rs/zerolog/internal/cbor/time.go
generated
vendored
@@ -29,7 +29,7 @@ func (e Encoder) appendFloatTimestamp(dst []byte, t time.Time) []byte {
|
||||
nanos := t.Nanosecond()
|
||||
var val float64
|
||||
val = float64(secs)*1.0 + float64(nanos)*1e-9
|
||||
return e.AppendFloat64(dst, val)
|
||||
return e.AppendFloat64(dst, val, -1)
|
||||
}
|
||||
|
||||
// AppendTime encodes and adds a timestamp to the dst byte array.
|
||||
@@ -64,17 +64,17 @@ func (e Encoder) AppendTimes(dst []byte, vals []time.Time, unused string) []byte
|
||||
// AppendDuration encodes and adds a duration to the dst byte array.
|
||||
// useInt field indicates whether to store the duration as seconds (integer) or
|
||||
// as seconds+nanoseconds (float).
|
||||
func (e Encoder) AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool) []byte {
|
||||
func (e Encoder) AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool, unused int) []byte {
|
||||
if useInt {
|
||||
return e.AppendInt64(dst, int64(d/unit))
|
||||
}
|
||||
return e.AppendFloat64(dst, float64(d)/float64(unit))
|
||||
return e.AppendFloat64(dst, float64(d)/float64(unit), unused)
|
||||
}
|
||||
|
||||
// AppendDurations encodes and adds an array of durations to the dst byte array.
|
||||
// useInt field indicates whether to store the duration as seconds (integer) or
|
||||
// as seconds+nanoseconds (float).
|
||||
func (e Encoder) AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool) []byte {
|
||||
func (e Encoder) AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool, unused int) []byte {
|
||||
major := majorTypeArray
|
||||
l := len(vals)
|
||||
if l == 0 {
|
||||
@@ -87,7 +87,7 @@ func (e Encoder) AppendDurations(dst []byte, vals []time.Duration, unit time.Dur
|
||||
dst = appendCborTypePrefix(dst, major, uint64(l))
|
||||
}
|
||||
for _, d := range vals {
|
||||
dst = e.AppendDuration(dst, d, unit, useInt)
|
||||
dst = e.AppendDuration(dst, d, unit, useInt, unused)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
12
vendor/github.com/rs/zerolog/internal/cbor/types.go
generated
vendored
12
vendor/github.com/rs/zerolog/internal/cbor/types.go
generated
vendored
@@ -352,7 +352,7 @@ func (e Encoder) AppendUints64(dst []byte, vals []uint64) []byte {
|
||||
}
|
||||
|
||||
// AppendFloat32 encodes and inserts a single precision float value into the dst byte array.
|
||||
func (Encoder) AppendFloat32(dst []byte, val float32) []byte {
|
||||
func (Encoder) AppendFloat32(dst []byte, val float32, unused int) []byte {
|
||||
switch {
|
||||
case math.IsNaN(float64(val)):
|
||||
return append(dst, "\xfa\x7f\xc0\x00\x00"...)
|
||||
@@ -372,7 +372,7 @@ func (Encoder) AppendFloat32(dst []byte, val float32) []byte {
|
||||
}
|
||||
|
||||
// AppendFloats32 encodes and inserts an array of single precision float value into the dst byte array.
|
||||
func (e Encoder) AppendFloats32(dst []byte, vals []float32) []byte {
|
||||
func (e Encoder) AppendFloats32(dst []byte, vals []float32, unused int) []byte {
|
||||
major := majorTypeArray
|
||||
l := len(vals)
|
||||
if l == 0 {
|
||||
@@ -385,13 +385,13 @@ func (e Encoder) AppendFloats32(dst []byte, vals []float32) []byte {
|
||||
dst = appendCborTypePrefix(dst, major, uint64(l))
|
||||
}
|
||||
for _, v := range vals {
|
||||
dst = e.AppendFloat32(dst, v)
|
||||
dst = e.AppendFloat32(dst, v, unused)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// AppendFloat64 encodes and inserts a double precision float value into the dst byte array.
|
||||
func (Encoder) AppendFloat64(dst []byte, val float64) []byte {
|
||||
func (Encoder) AppendFloat64(dst []byte, val float64, unused int) []byte {
|
||||
switch {
|
||||
case math.IsNaN(val):
|
||||
return append(dst, "\xfb\x7f\xf8\x00\x00\x00\x00\x00\x00"...)
|
||||
@@ -412,7 +412,7 @@ func (Encoder) AppendFloat64(dst []byte, val float64) []byte {
|
||||
}
|
||||
|
||||
// AppendFloats64 encodes and inserts an array of double precision float values into the dst byte array.
|
||||
func (e Encoder) AppendFloats64(dst []byte, vals []float64) []byte {
|
||||
func (e Encoder) AppendFloats64(dst []byte, vals []float64, unused int) []byte {
|
||||
major := majorTypeArray
|
||||
l := len(vals)
|
||||
if l == 0 {
|
||||
@@ -425,7 +425,7 @@ func (e Encoder) AppendFloats64(dst []byte, vals []float64) []byte {
|
||||
dst = appendCborTypePrefix(dst, major, uint64(l))
|
||||
}
|
||||
for _, v := range vals {
|
||||
dst = e.AppendFloat64(dst, v)
|
||||
dst = e.AppendFloat64(dst, v, unused)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
10
vendor/github.com/rs/zerolog/internal/json/time.go
generated
vendored
10
vendor/github.com/rs/zerolog/internal/json/time.go
generated
vendored
@@ -88,24 +88,24 @@ func appendUnixNanoTimes(dst []byte, vals []time.Time, div int64) []byte {
|
||||
|
||||
// AppendDuration formats the input duration with the given unit & format
|
||||
// and appends the encoded string to the input byte slice.
|
||||
func (e Encoder) AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool) []byte {
|
||||
func (e Encoder) AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool, precision int) []byte {
|
||||
if useInt {
|
||||
return strconv.AppendInt(dst, int64(d/unit), 10)
|
||||
}
|
||||
return e.AppendFloat64(dst, float64(d)/float64(unit))
|
||||
return e.AppendFloat64(dst, float64(d)/float64(unit), precision)
|
||||
}
|
||||
|
||||
// AppendDurations formats the input durations with the given unit & format
|
||||
// and appends the encoded string list to the input byte slice.
|
||||
func (e Encoder) AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool) []byte {
|
||||
func (e Encoder) AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool, precision int) []byte {
|
||||
if len(vals) == 0 {
|
||||
return append(dst, '[', ']')
|
||||
}
|
||||
dst = append(dst, '[')
|
||||
dst = e.AppendDuration(dst, vals[0], unit, useInt)
|
||||
dst = e.AppendDuration(dst, vals[0], unit, useInt, precision)
|
||||
if len(vals) > 1 {
|
||||
for _, d := range vals[1:] {
|
||||
dst = e.AppendDuration(append(dst, ','), d, unit, useInt)
|
||||
dst = e.AppendDuration(append(dst, ','), d, unit, useInt, precision)
|
||||
}
|
||||
}
|
||||
dst = append(dst, ']')
|
||||
|
||||
45
vendor/github.com/rs/zerolog/internal/json/types.go
generated
vendored
45
vendor/github.com/rs/zerolog/internal/json/types.go
generated
vendored
@@ -299,7 +299,7 @@ func (Encoder) AppendUints64(dst []byte, vals []uint64) []byte {
|
||||
return dst
|
||||
}
|
||||
|
||||
func appendFloat(dst []byte, val float64, bitSize int) []byte {
|
||||
func appendFloat(dst []byte, val float64, bitSize, precision int) []byte {
|
||||
// JSON does not permit NaN or Infinity. A typical JSON encoder would fail
|
||||
// with an error, but a logging library wants the data to get through so we
|
||||
// make a tradeoff and store those types as string.
|
||||
@@ -311,26 +311,47 @@ func appendFloat(dst []byte, val float64, bitSize int) []byte {
|
||||
case math.IsInf(val, -1):
|
||||
return append(dst, `"-Inf"`...)
|
||||
}
|
||||
return strconv.AppendFloat(dst, val, 'f', -1, bitSize)
|
||||
// convert as if by es6 number to string conversion
|
||||
// see also https://cs.opensource.google/go/go/+/refs/tags/go1.20.3:src/encoding/json/encode.go;l=573
|
||||
strFmt := byte('f')
|
||||
// If precision is set to a value other than -1, we always just format the float using that precision.
|
||||
if precision == -1 {
|
||||
// Use float32 comparisons for underlying float32 value to get precise cutoffs right.
|
||||
if abs := math.Abs(val); abs != 0 {
|
||||
if bitSize == 64 && (abs < 1e-6 || abs >= 1e21) || bitSize == 32 && (float32(abs) < 1e-6 || float32(abs) >= 1e21) {
|
||||
strFmt = 'e'
|
||||
}
|
||||
}
|
||||
}
|
||||
dst = strconv.AppendFloat(dst, val, strFmt, precision, bitSize)
|
||||
if strFmt == 'e' {
|
||||
// Clean up e-09 to e-9
|
||||
n := len(dst)
|
||||
if n >= 4 && dst[n-4] == 'e' && dst[n-3] == '-' && dst[n-2] == '0' {
|
||||
dst[n-2] = dst[n-1]
|
||||
dst = dst[:n-1]
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// AppendFloat32 converts the input float32 to a string and
|
||||
// appends the encoded string to the input byte slice.
|
||||
func (Encoder) AppendFloat32(dst []byte, val float32) []byte {
|
||||
return appendFloat(dst, float64(val), 32)
|
||||
func (Encoder) AppendFloat32(dst []byte, val float32, precision int) []byte {
|
||||
return appendFloat(dst, float64(val), 32, precision)
|
||||
}
|
||||
|
||||
// AppendFloats32 encodes the input float32s to json and
|
||||
// appends the encoded string list to the input byte slice.
|
||||
func (Encoder) AppendFloats32(dst []byte, vals []float32) []byte {
|
||||
func (Encoder) AppendFloats32(dst []byte, vals []float32, precision int) []byte {
|
||||
if len(vals) == 0 {
|
||||
return append(dst, '[', ']')
|
||||
}
|
||||
dst = append(dst, '[')
|
||||
dst = appendFloat(dst, float64(vals[0]), 32)
|
||||
dst = appendFloat(dst, float64(vals[0]), 32, precision)
|
||||
if len(vals) > 1 {
|
||||
for _, val := range vals[1:] {
|
||||
dst = appendFloat(append(dst, ','), float64(val), 32)
|
||||
dst = appendFloat(append(dst, ','), float64(val), 32, precision)
|
||||
}
|
||||
}
|
||||
dst = append(dst, ']')
|
||||
@@ -339,21 +360,21 @@ func (Encoder) AppendFloats32(dst []byte, vals []float32) []byte {
|
||||
|
||||
// AppendFloat64 converts the input float64 to a string and
|
||||
// appends the encoded string to the input byte slice.
|
||||
func (Encoder) AppendFloat64(dst []byte, val float64) []byte {
|
||||
return appendFloat(dst, val, 64)
|
||||
func (Encoder) AppendFloat64(dst []byte, val float64, precision int) []byte {
|
||||
return appendFloat(dst, val, 64, precision)
|
||||
}
|
||||
|
||||
// AppendFloats64 encodes the input float64s to json and
|
||||
// appends the encoded string list to the input byte slice.
|
||||
func (Encoder) AppendFloats64(dst []byte, vals []float64) []byte {
|
||||
func (Encoder) AppendFloats64(dst []byte, vals []float64, precision int) []byte {
|
||||
if len(vals) == 0 {
|
||||
return append(dst, '[', ']')
|
||||
}
|
||||
dst = append(dst, '[')
|
||||
dst = appendFloat(dst, vals[0], 64)
|
||||
dst = appendFloat(dst, vals[0], 64, precision)
|
||||
if len(vals) > 1 {
|
||||
for _, val := range vals[1:] {
|
||||
dst = appendFloat(append(dst, ','), val, 64)
|
||||
dst = appendFloat(append(dst, ','), val, 64, precision)
|
||||
}
|
||||
}
|
||||
dst = append(dst, ']')
|
||||
|
||||
2
vendor/github.com/rs/zerolog/log.go
generated
vendored
2
vendor/github.com/rs/zerolog/log.go
generated
vendored
@@ -24,7 +24,7 @@
|
||||
//
|
||||
// Sub-loggers let you chain loggers with additional context:
|
||||
//
|
||||
// sublogger := log.With().Str("component": "foo").Logger()
|
||||
// sublogger := log.With().Str("component", "foo").Logger()
|
||||
// sublogger.Info().Msg("hello world")
|
||||
// // Output: {"time":1494567715,"level":"info","message":"hello world","component":"foo"}
|
||||
//
|
||||
|
||||
2
vendor/github.com/rs/zerolog/sampler.go
generated
vendored
2
vendor/github.com/rs/zerolog/sampler.go
generated
vendored
@@ -84,7 +84,7 @@ func (s *BurstSampler) Sample(lvl Level) bool {
|
||||
}
|
||||
|
||||
func (s *BurstSampler) inc() uint32 {
|
||||
now := time.Now().UnixNano()
|
||||
now := TimestampFunc().UnixNano()
|
||||
resetAt := atomic.LoadInt64(&s.resetAt)
|
||||
var c uint32
|
||||
if now > resetAt {
|
||||
|
||||
5
vendor/github.com/yuin/goldmark/extension/strikethrough.go
generated
vendored
5
vendor/github.com/yuin/goldmark/extension/strikethrough.go
generated
vendored
@@ -46,10 +46,11 @@ func (s *strikethroughParser) Trigger() []byte {
|
||||
func (s *strikethroughParser) Parse(parent gast.Node, block text.Reader, pc parser.Context) gast.Node {
|
||||
before := block.PrecendingCharacter()
|
||||
line, segment := block.PeekLine()
|
||||
node := parser.ScanDelimiter(line, before, 2, defaultStrikethroughDelimiterProcessor)
|
||||
if node == nil {
|
||||
node := parser.ScanDelimiter(line, before, 1, defaultStrikethroughDelimiterProcessor)
|
||||
if node == nil || node.OriginalLength > 2 || before == '~' {
|
||||
return nil
|
||||
}
|
||||
|
||||
node.Segment = segment.WithStop(segment.Start + node.OriginalLength)
|
||||
block.Advance(node.OriginalLength)
|
||||
pc.PushDelimiter(node)
|
||||
|
||||
6
vendor/github.com/yuin/goldmark/extension/table.go
generated
vendored
6
vendor/github.com/yuin/goldmark/extension/table.go
generated
vendored
@@ -492,7 +492,7 @@ func (r *TableHTMLRenderer) renderTableCell(
|
||||
tag = "th"
|
||||
}
|
||||
if entering {
|
||||
fmt.Fprintf(w, "<%s", tag)
|
||||
_, _ = fmt.Fprintf(w, "<%s", tag)
|
||||
if n.Alignment != ast.AlignNone {
|
||||
amethod := r.TableConfig.TableCellAlignMethod
|
||||
if amethod == TableCellAlignDefault {
|
||||
@@ -505,7 +505,7 @@ func (r *TableHTMLRenderer) renderTableCell(
|
||||
switch amethod {
|
||||
case TableCellAlignAttribute:
|
||||
if _, ok := n.AttributeString("align"); !ok { // Skip align render if overridden
|
||||
fmt.Fprintf(w, ` align="%s"`, n.Alignment.String())
|
||||
_, _ = fmt.Fprintf(w, ` align="%s"`, n.Alignment.String())
|
||||
}
|
||||
case TableCellAlignStyle:
|
||||
v, ok := n.AttributeString("style")
|
||||
@@ -528,7 +528,7 @@ func (r *TableHTMLRenderer) renderTableCell(
|
||||
}
|
||||
_ = w.WriteByte('>')
|
||||
} else {
|
||||
fmt.Fprintf(w, "</%s>\n", tag)
|
||||
_, _ = fmt.Fprintf(w, "</%s>\n", tag)
|
||||
}
|
||||
return gast.WalkContinue, nil
|
||||
}
|
||||
|
||||
3
vendor/github.com/yuin/goldmark/markdown.go
generated
vendored
3
vendor/github.com/yuin/goldmark/markdown.go
generated
vendored
@@ -2,12 +2,13 @@
|
||||
package goldmark
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/yuin/goldmark/parser"
|
||||
"github.com/yuin/goldmark/renderer"
|
||||
"github.com/yuin/goldmark/renderer/html"
|
||||
"github.com/yuin/goldmark/text"
|
||||
"github.com/yuin/goldmark/util"
|
||||
"io"
|
||||
)
|
||||
|
||||
// DefaultParser returns a new Parser that is configured by default values.
|
||||
|
||||
53
vendor/github.com/yuin/goldmark/parser/link.go
generated
vendored
53
vendor/github.com/yuin/goldmark/parser/link.go
generated
vendored
@@ -126,13 +126,13 @@ func (s *linkParser) Parse(parent ast.Node, block text.Reader, pc Context) ast.N
|
||||
if line[0] == '!' {
|
||||
if len(line) > 1 && line[1] == '[' {
|
||||
block.Advance(1)
|
||||
pc.Set(linkBottom, pc.LastDelimiter())
|
||||
pushLinkBottom(pc)
|
||||
return processLinkLabelOpen(block, segment.Start+1, true, pc)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if line[0] == '[' {
|
||||
pc.Set(linkBottom, pc.LastDelimiter())
|
||||
pushLinkBottom(pc)
|
||||
return processLinkLabelOpen(block, segment.Start, false, pc)
|
||||
}
|
||||
|
||||
@@ -143,6 +143,7 @@ func (s *linkParser) Parse(parent ast.Node, block text.Reader, pc Context) ast.N
|
||||
}
|
||||
last := tlist.(*linkLabelState).Last
|
||||
if last == nil {
|
||||
_ = popLinkBottom(pc)
|
||||
return nil
|
||||
}
|
||||
block.Advance(1)
|
||||
@@ -151,11 +152,13 @@ func (s *linkParser) Parse(parent ast.Node, block text.Reader, pc Context) ast.N
|
||||
// > A link label can have at most 999 characters inside the square brackets.
|
||||
if linkLabelStateLength(tlist.(*linkLabelState)) > 998 {
|
||||
ast.MergeOrReplaceTextSegment(last.Parent(), last, last.Segment)
|
||||
_ = popLinkBottom(pc)
|
||||
return nil
|
||||
}
|
||||
|
||||
if !last.IsImage && s.containsLink(last) { // a link in a link text is not allowed
|
||||
ast.MergeOrReplaceTextSegment(last.Parent(), last, last.Segment)
|
||||
_ = popLinkBottom(pc)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -169,6 +172,7 @@ func (s *linkParser) Parse(parent ast.Node, block text.Reader, pc Context) ast.N
|
||||
link, hasValue = s.parseReferenceLink(parent, last, block, pc)
|
||||
if link == nil && hasValue {
|
||||
ast.MergeOrReplaceTextSegment(last.Parent(), last, last.Segment)
|
||||
_ = popLinkBottom(pc)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -182,12 +186,14 @@ func (s *linkParser) Parse(parent ast.Node, block text.Reader, pc Context) ast.N
|
||||
// > A link label can have at most 999 characters inside the square brackets.
|
||||
if len(maybeReference) > 999 {
|
||||
ast.MergeOrReplaceTextSegment(last.Parent(), last, last.Segment)
|
||||
_ = popLinkBottom(pc)
|
||||
return nil
|
||||
}
|
||||
|
||||
ref, ok := pc.Reference(util.ToLinkReference(maybeReference))
|
||||
if !ok {
|
||||
ast.MergeOrReplaceTextSegment(last.Parent(), last, last.Segment)
|
||||
_ = popLinkBottom(pc)
|
||||
return nil
|
||||
}
|
||||
link = ast.NewLink()
|
||||
@@ -230,11 +236,7 @@ func processLinkLabelOpen(block text.Reader, pos int, isImage bool, pc Context)
|
||||
}
|
||||
|
||||
func (s *linkParser) processLinkLabel(parent ast.Node, link *ast.Link, last *linkLabelState, pc Context) {
|
||||
var bottom ast.Node
|
||||
if v := pc.Get(linkBottom); v != nil {
|
||||
bottom = v.(ast.Node)
|
||||
}
|
||||
pc.Set(linkBottom, nil)
|
||||
bottom := popLinkBottom(pc)
|
||||
ProcessDelimiters(bottom, pc)
|
||||
for c := last.NextSibling(); c != nil; {
|
||||
next := c.NextSibling()
|
||||
@@ -395,6 +397,43 @@ func parseLinkTitle(block text.Reader) ([]byte, bool) {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func pushLinkBottom(pc Context) {
|
||||
bottoms := pc.Get(linkBottom)
|
||||
b := pc.LastDelimiter()
|
||||
if bottoms == nil {
|
||||
pc.Set(linkBottom, b)
|
||||
return
|
||||
}
|
||||
if s, ok := bottoms.([]ast.Node); ok {
|
||||
pc.Set(linkBottom, append(s, b))
|
||||
return
|
||||
}
|
||||
pc.Set(linkBottom, []ast.Node{bottoms.(ast.Node), b})
|
||||
}
|
||||
|
||||
func popLinkBottom(pc Context) ast.Node {
|
||||
bottoms := pc.Get(linkBottom)
|
||||
if bottoms == nil {
|
||||
return nil
|
||||
}
|
||||
if v, ok := bottoms.(ast.Node); ok {
|
||||
pc.Set(linkBottom, nil)
|
||||
return v
|
||||
}
|
||||
s := bottoms.([]ast.Node)
|
||||
v := s[len(s)-1]
|
||||
n := s[0 : len(s)-1]
|
||||
switch len(n) {
|
||||
case 0:
|
||||
pc.Set(linkBottom, nil)
|
||||
case 1:
|
||||
pc.Set(linkBottom, n[0])
|
||||
default:
|
||||
pc.Set(linkBottom, s[0:len(s)-1])
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func (s *linkParser) CloseBlock(parent ast.Node, block text.Reader, pc Context) {
|
||||
pc.Set(linkBottom, nil)
|
||||
tlist := pc.Get(linkLabelStateKey)
|
||||
|
||||
38
vendor/github.com/yuin/goldmark/renderer/html/html.go
generated
vendored
38
vendor/github.com/yuin/goldmark/renderer/html/html.go
generated
vendored
@@ -445,7 +445,7 @@ func (r *Renderer) renderList(w util.BufWriter, source []byte, node ast.Node, en
|
||||
_ = w.WriteByte('<')
|
||||
_, _ = w.WriteString(tag)
|
||||
if n.IsOrdered() && n.Start != 1 {
|
||||
fmt.Fprintf(w, " start=\"%d\"", n.Start)
|
||||
_, _ = fmt.Fprintf(w, " start=\"%d\"", n.Start)
|
||||
}
|
||||
if n.Attributes() != nil {
|
||||
RenderAttributes(w, n, ListAttributeFilter)
|
||||
@@ -680,7 +680,7 @@ func (r *Renderer) renderImage(w util.BufWriter, source []byte, node ast.Node, e
|
||||
_, _ = w.Write(util.EscapeHTML(util.URLEscape(n.Destination, true)))
|
||||
}
|
||||
_, _ = w.WriteString(`" alt="`)
|
||||
_, _ = w.Write(nodeToHTMLText(n, source))
|
||||
r.renderAttribute(w, source, n)
|
||||
_ = w.WriteByte('"')
|
||||
if n.Title != nil {
|
||||
_, _ = w.WriteString(` title="`)
|
||||
@@ -770,6 +770,23 @@ func (r *Renderer) renderString(w util.BufWriter, source []byte, node ast.Node,
|
||||
return ast.WalkContinue, nil
|
||||
}
|
||||
|
||||
func (r *Renderer) renderAttribute(w util.BufWriter, source []byte, n ast.Node) {
|
||||
for c := n.FirstChild(); c != nil; c = c.NextSibling() {
|
||||
if s, ok := c.(*ast.String); ok {
|
||||
_, _ = r.renderString(w, source, s, true)
|
||||
} else if t, ok := c.(*ast.String); ok {
|
||||
_, _ = r.renderText(w, source, t, true)
|
||||
} else if !c.HasChildren() {
|
||||
r.Writer.Write(w, c.Text(source))
|
||||
if t, ok := c.(*ast.Text); ok && t.SoftLineBreak() {
|
||||
_ = w.WriteByte('\n')
|
||||
}
|
||||
} else {
|
||||
r.renderAttribute(w, source, c)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var dataPrefix = []byte("data-")
|
||||
|
||||
// RenderAttributes renders given node's attributes.
|
||||
@@ -1007,20 +1024,3 @@ func IsDangerousURL(url []byte) bool {
|
||||
return hasPrefix(url, bJs) || hasPrefix(url, bVb) ||
|
||||
hasPrefix(url, bFile) || hasPrefix(url, bData)
|
||||
}
|
||||
|
||||
func nodeToHTMLText(n ast.Node, source []byte) []byte {
|
||||
var buf bytes.Buffer
|
||||
for c := n.FirstChild(); c != nil; c = c.NextSibling() {
|
||||
if s, ok := c.(*ast.String); ok && s.IsCode() {
|
||||
buf.Write(s.Text(source))
|
||||
} else if !c.HasChildren() {
|
||||
buf.Write(util.EscapeHTML(c.Text(source)))
|
||||
if t, ok := c.(*ast.Text); ok && t.SoftLineBreak() {
|
||||
buf.WriteByte('\n')
|
||||
}
|
||||
} else {
|
||||
buf.Write(nodeToHTMLText(c, source))
|
||||
}
|
||||
}
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user