automatically ignore known forwarded addresses, fixes #64

This commit is contained in:
Aine
2023-09-18 12:35:37 +03:00
parent e90925eceb
commit 60b4386dd8
187 changed files with 4070 additions and 2667 deletions

View File

@@ -24,7 +24,7 @@ Find out [who uses zerolog](https://github.com/rs/zerolog/wiki/Who-uses-zerolog)
* [Sampling](#log-sampling)
* [Hooks](#hooks)
* [Contextual fields](#contextual-logging)
* `context.Context` integration
* [`context.Context` integration](#contextcontext-integration)
* [Integration with `net/http`](#integration-with-nethttp)
* [JSON and CBOR encoding formats](#binary-encoding)
* [Pretty logging for development](#pretty-logging)
@@ -499,7 +499,7 @@ log.Ctx(ctx).Info().Msg("hello world")
### Set as standard logger output
```go
log := zerolog.New(os.Stdout).With().
stdlog := zerolog.New(os.Stdout).With().
Str("foo", "bar").
Logger()
@@ -511,6 +511,58 @@ stdlog.Print("hello world")
// Output: {"foo":"bar","message":"hello world"}
```
### context.Context integration
Go contexts are commonly passed throughout Go code, and this can help you pass
your Logger into places it might otherwise be hard to inject. The `Logger`
instance may be attached to Go context (`context.Context`) using
`Logger.WithContext(ctx)` and extracted from it using `zerolog.Ctx(ctx)`.
For example:
```go
func f() {
logger := zerolog.New(os.Stdout)
ctx := context.Background()
// Attach the Logger to the context.Context
ctx = logger.WithContext(ctx)
someFunc(ctx)
}
func someFunc(ctx context.Context) {
// Get Logger from the go Context. if it's nil, then
// `zerolog.DefaultContextLogger` is returned, if
// `DefaultContextLogger` is nil, then a disabled logger is returned.
logger := zerolog.Ctx(ctx)
logger.Info().Msg("Hello")
}
```
A second form of `context.Context` integration allows you to pass the current
context.Context into the logged event, and retrieve it from hooks. This can be
useful to log trace and span IDs or other information stored in the go context,
and facilitates the unification of logging and tracing in some systems:
```go
type TracingHook struct{}
func (h TracingHook) Run(e *zerolog.Event, level zerolog.Level, msg string) {
ctx := e.Ctx()
spanId := getSpanIdFromContext(ctx) // as per your tracing framework
e.Str("span-id", spanId)
}
func f() {
// Setup the logger
logger := zerolog.New(os.Stdout)
logger = logger.Hook(TracingHook{})
ctx := context.Background()
// Use the Ctx function to make the context available to the hook
logger.Info().Ctx(ctx).Msg("Hello")
}
```
### Integration with `net/http`
The `github.com/rs/zerolog/hlog` package provides some helpers to integrate zerolog with `http.Handler`.
@@ -703,6 +755,8 @@ Log a static string, without any context or `printf`-style templating:
## Caveats
### Field duplication
Note that zerolog does no de-duplication of fields. Using the same key multiple times creates multiple keys in final JSON:
```go
@@ -714,3 +768,19 @@ logger.Info().
```
In this case, many consumers will take the last value, but this is not guaranteed; check yours if in doubt.
### Concurrency safety
Be careful when calling UpdateContext. It is not concurrency safe. Use the With method to create a child logger:
```go
func handler(w http.ResponseWriter, r *http.Request) {
// Create a child logger for concurrency safety
logger := log.Logger.With().Logger()
// Add context fields, for example User-Agent from HTTP headers
logger.UpdateContext(func(c zerolog.Context) zerolog.Context {
...
})
}
```

View File

@@ -1,6 +1,7 @@
package zerolog
import (
"context"
"fmt"
"io/ioutil"
"math"
@@ -165,6 +166,15 @@ func (c Context) Err(err error) Context {
return c.AnErr(ErrorFieldName, err)
}
// Ctx adds the context.Context to the logger context. The context.Context is
// not rendered in the error message, but is made available for hooks to use.
// A typical use case is to extract tracing information from the
// context.Context.
func (c Context) Ctx(ctx context.Context) Context {
c.l.ctx = ctx
return c
}
// Bool adds the field key with val as a bool to the logger context.
func (c Context) Bool(key string, b bool) Context {
c.l.context = enc.AppendBool(enc.AppendKey(c.l.context, key), b)
@@ -329,8 +339,9 @@ func (ts timestampHook) Run(e *Event, level Level, msg string) {
var th = timestampHook{}
// Timestamp adds the current local time as UNIX timestamp to the logger context with the "time" key.
// Timestamp adds the current local time to the logger context with the "time" key, formatted using zerolog.TimeFieldFormat.
// To customize the key name, change zerolog.TimestampFieldName.
// To customize the time format, change zerolog.TimeFieldFormat.
//
// NOTE: It won't dedupe the "time" key if the *Context has one already.
func (c Context) Timestamp() Context {

View File

@@ -24,6 +24,9 @@ func init() {
func appendJSON(dst []byte, j []byte) []byte {
return cbor.AppendEmbeddedJSON(dst, j)
}
func appendCBOR(dst []byte, c []byte) []byte {
return cbor.AppendEmbeddedCBOR(dst, c)
}
// decodeIfBinaryToString - converts a binary formatted log msg to a
// JSON formatted String Log message.

View File

@@ -6,6 +6,7 @@ package zerolog
// JSON encoded byte stream.
import (
"encoding/base64"
"github.com/rs/zerolog/internal/json"
)
@@ -25,6 +26,17 @@ func init() {
func appendJSON(dst []byte, j []byte) []byte {
return append(dst, j...)
}
func appendCBOR(dst []byte, cbor []byte) []byte {
dst = append(dst, []byte("\"data:application/cbor;base64,")...)
l := len(dst)
enc := base64.StdEncoding
n := enc.EncodedLen(len(cbor))
for i := 0; i < n; i++ {
dst = append(dst, '.')
}
enc.Encode(dst[l:], cbor)
return append(dst, '"')
}
func decodeIfBinaryToString(in []byte) string {
return string(in)

View File

@@ -1,6 +1,7 @@
package zerolog
import (
"context"
"fmt"
"net"
"os"
@@ -24,9 +25,10 @@ type Event struct {
w LevelWriter
level Level
done func(msg string)
stack bool // enable error stack trace
ch []Hook // hooks from context
skipFrame int // The number of additional frames to skip when printing the caller.
stack bool // enable error stack trace
ch []Hook // hooks from context
skipFrame int // The number of additional frames to skip when printing the caller.
ctx context.Context // Optional Go context for event
}
func putEvent(e *Event) {
@@ -318,6 +320,18 @@ func (e *Event) RawJSON(key string, b []byte) *Event {
return e
}
// RawCBOR adds already encoded CBOR to the log line under key.
//
// No sanity check is performed on b
// Note: The full featureset of CBOR is supported as data will not be mapped to json but stored as data-url
func (e *Event) RawCBOR(key string, b []byte) *Event {
if e == nil {
return e
}
e.buf = appendCBOR(enc.AppendKey(e.buf, key), b)
return e
}
// AnErr adds the field key with serialized err to the *Event context.
// If err is nil, no field is added.
func (e *Event) AnErr(key string, err error) *Event {
@@ -405,6 +419,28 @@ func (e *Event) Stack() *Event {
return e
}
// Ctx adds the Go Context to the *Event context. The context is not rendered
// in the output message, but is available to hooks and to Func() calls via the
// GetCtx() accessor. A typical use case is to extract tracing information from
// the Go Ctx.
func (e *Event) Ctx(ctx context.Context) *Event {
if e != nil {
e.ctx = ctx
}
return e
}
// GetCtx retrieves the Go context.Context which is optionally stored in the
// Event. This allows Hooks and functions passed to Func() to retrieve values
// which are stored in the context.Context. This can be useful in tracing,
// where span information is commonly propagated in the context.Context.
func (e *Event) GetCtx() context.Context {
if e == nil || e.ctx == nil {
return context.Background()
}
return e.ctx
}
// Bool adds the field key with val as a bool to the *Event context.
func (e *Event) Bool(key string, b bool) *Event {
if e == nil {

View File

@@ -26,7 +26,8 @@ const (
additionalTypeBreak byte = 31
// Tag Sub-types.
additionalTypeTimestamp byte = 01
additionalTypeTimestamp byte = 01
additionalTypeEmbeddedCBOR byte = 63
// Extended Tags - from https://www.iana.org/assignments/cbor-tags/cbor-tags.xhtml
additionalTypeTagNetworkAddr uint16 = 260

View File

@@ -5,6 +5,7 @@ package cbor
import (
"bufio"
"bytes"
"encoding/base64"
"fmt"
"io"
"math"
@@ -213,6 +214,31 @@ func decodeString(src *bufio.Reader, noQuotes bool) []byte {
}
return append(result, '"')
}
func decodeStringToDataUrl(src *bufio.Reader, mimeType string) []byte {
pb := readByte(src)
major := pb & maskOutAdditionalType
minor := pb & maskOutMajorType
if major != majorTypeByteString {
panic(fmt.Errorf("Major type is: %d in decodeString", major))
}
length := decodeIntAdditionalType(src, minor)
l := int(length)
enc := base64.StdEncoding
lEnc := enc.EncodedLen(l)
result := make([]byte, len("\"data:;base64,\"")+len(mimeType)+lEnc)
dest := result
u := copy(dest, "\"data:")
dest = dest[u:]
u = copy(dest, mimeType)
dest = dest[u:]
u = copy(dest, ";base64,")
dest = dest[u:]
pbs := readNBytes(src, l)
enc.Encode(dest, pbs)
dest = dest[lEnc:]
dest[0] = '"'
return result
}
func decodeUTF8String(src *bufio.Reader) []byte {
pb := readByte(src)
@@ -349,6 +375,20 @@ func decodeTagData(src *bufio.Reader) []byte {
switch minor {
case additionalTypeTimestamp:
return decodeTimeStamp(src)
case additionalTypeIntUint8:
val := decodeIntAdditionalType(src, minor)
switch byte(val) {
case additionalTypeEmbeddedCBOR:
pb := readByte(src)
dataMajor := pb & maskOutAdditionalType
if dataMajor != majorTypeByteString {
panic(fmt.Errorf("Unsupported embedded Type: %d in decodeEmbeddedCBOR", dataMajor))
}
src.UnreadByte()
return decodeStringToDataUrl(src, "application/cbor")
default:
panic(fmt.Errorf("Unsupported Additional Tag Type: %d in decodeTagData", val))
}
// Tag value is larger than 256 (so uint16).
case additionalTypeIntUint16:

View File

@@ -93,3 +93,25 @@ func AppendEmbeddedJSON(dst, s []byte) []byte {
}
return append(dst, s...)
}
// AppendEmbeddedCBOR adds a tag and embeds input CBOR as such.
func AppendEmbeddedCBOR(dst, s []byte) []byte {
major := majorTypeTags
minor := additionalTypeEmbeddedCBOR
// Append the TAG to indicate this is Embedded JSON.
dst = append(dst, major|additionalTypeIntUint8)
dst = append(dst, minor)
// Append the CBOR Object as Byte String.
major = majorTypeByteString
l := len(s)
if l <= additionalMax {
lb := byte(l)
dst = append(dst, major|lb)
} else {
dst = appendCborTypePrefix(dst, major, uint64(l))
}
return append(dst, s...)
}

27
vendor/github.com/rs/zerolog/log.go generated vendored
View File

@@ -82,8 +82,9 @@
// log.Warn().Msg("")
// // Output: {"level":"warn","severity":"warn"}
//
// # Caveats
//
// Caveats
// Field duplication:
//
// There is no fields deduplication out-of-the-box.
// Using the same key multiple times creates new key in final JSON each time.
@@ -96,9 +97,24 @@
//
// In this case, many consumers will take the last value,
// but this is not guaranteed; check yours if in doubt.
//
// Concurrency safety:
//
// Be careful when calling UpdateContext. It is not concurrency safe. Use the With method to create a child logger:
//
// func handler(w http.ResponseWriter, r *http.Request) {
// // Create a child logger for concurrency safety
// logger := log.Logger.With().Logger()
//
// // Add context fields, for example User-Agent from HTTP headers
// logger.UpdateContext(func(c zerolog.Context) zerolog.Context {
// ...
// })
// }
package zerolog
import (
"context"
"errors"
"fmt"
"io"
@@ -218,6 +234,7 @@ type Logger struct {
context []byte
hooks []Hook
stack bool
ctx context.Context
}
// New creates a root logger with given output writer. If the output writer implements
@@ -275,7 +292,8 @@ func (l Logger) With() Context {
// UpdateContext updates the internal logger's context.
//
// Use this method with caution. If unsure, prefer the With method.
// Caution: This method is not concurrency safe.
// Use the With method to create a child logger before modifying the context from concurrent goroutines.
func (l *Logger) UpdateContext(update func(c Context) Context) {
if l == disabledLogger {
return
@@ -309,7 +327,9 @@ func (l Logger) Sample(s Sampler) Logger {
// Hook returns a logger with the h Hook.
func (l Logger) Hook(h Hook) Logger {
l.hooks = append(l.hooks, h)
newHooks := make([]Hook, len(l.hooks), len(l.hooks)+1)
copy(newHooks, l.hooks)
l.hooks = append(newHooks, h)
return l
}
@@ -453,6 +473,7 @@ func (l *Logger) newEvent(level Level, done func(string)) *Event {
e := newEvent(l.w, level)
e.done = done
e.ch = l.hooks
e.ctx = l.ctx
if level != NoLevel && LevelFieldName != "" {
e.Str(LevelFieldName, LevelFieldMarshalFunc(level))
}

View File

@@ -211,6 +211,7 @@ There are currently the following built-in modifiers:
- `@tostr`: Converts json to a string. Wraps a json string.
- `@fromstr`: Converts a string from json. Unwraps a json string.
- `@group`: Groups arrays of objects. See [e4fc67c](https://github.com/tidwall/gjson/commit/e4fc67c92aeebf2089fabc7872f010e340d105db).
- `@dig`: Search for a value without providing its entire path. See [e8e87f2](https://github.com/tidwall/gjson/commit/e8e87f2a00dc41f3aba5631094e21f59a8cf8cbf).
### Modifier arguments

View File

@@ -137,12 +137,21 @@ next major release.*
The `~` (tilde) operator will convert a value to a boolean before comparison.
Supported tilde comparison type are:
```
~true Converts true-ish values to true
~false Converts false-ish and non-existent values to true
~null Converts null and non-existent values to true
~* Converts any existing value to true
```
For example, using the following JSON:
```json
{
"vals": [
{ "a": 1, "b": true },
{ "a": 1, "b": "data" },
{ "a": 2, "b": true },
{ "a": 3, "b": false },
{ "a": 4, "b": "0" },
@@ -157,15 +166,23 @@ For example, using the following JSON:
}
```
You can now query for all true(ish) or false(ish) values:
To query for all true-ish or false-ish values:
```
vals.#(b==~true)#.a >> [1,2,6,7,8]
vals.#(b==~true)#.a >> [2,6,7,8]
vals.#(b==~false)#.a >> [3,4,5,9,10,11]
```
The last value which was non-existent is treated as `false`
To query for null and explicit value existence:
```
vals.#(b==~null)#.a >> [10,11]
vals.#(b==~*)#.a >> [1,2,3,4,5,6,7,8,9,10]
vals.#(b!=~*)#.a >> [11]
```
### Dot vs Pipe
The `.` is standard separator, but it's also possible to use a `|`.
@@ -241,6 +258,7 @@ There are currently the following built-in modifiers:
- `@tostr`: Converts json to a string. Wraps a json string.
- `@fromstr`: Converts a string from json. Unwraps a json string.
- `@group`: Groups arrays of objects. See [e4fc67c](https://github.com/tidwall/gjson/commit/e4fc67c92aeebf2089fabc7872f010e340d105db).
- `@dig`: Search for a value without providing its entire path. See [e8e87f2](https://github.com/tidwall/gjson/commit/e8e87f2a00dc41f3aba5631094e21f59a8cf8cbf).
#### Modifier arguments

View File

@@ -645,9 +645,9 @@ func tostr(json string) (raw string, str string) {
// Exists returns true if value exists.
//
// if gjson.Get(json, "name.last").Exists(){
// println("value exists")
// }
// if gjson.Get(json, "name.last").Exists(){
// println("value exists")
// }
func (t Result) Exists() bool {
return t.Type != Null || len(t.Raw) != 0
}
@@ -661,7 +661,6 @@ func (t Result) Exists() bool {
// nil, for JSON null
// map[string]interface{}, for JSON objects
// []interface{}, for JSON arrays
//
func (t Result) Value() interface{} {
if t.Type == String {
return t.Str
@@ -826,19 +825,28 @@ func parseArrayPath(path string) (r arrayPathResult) {
}
// splitQuery takes a query and splits it into three parts:
// path, op, middle, and right.
//
// path, op, middle, and right.
//
// So for this query:
// #(first_name=="Murphy").last
//
// #(first_name=="Murphy").last
//
// Becomes
// first_name # path
// =="Murphy" # middle
// .last # right
//
// first_name # path
// =="Murphy" # middle
// .last # right
//
// Or,
// #(service_roles.#(=="one")).cap
//
// #(service_roles.#(=="one")).cap
//
// Becomes
// service_roles.#(=="one") # path
// # middle
// .cap # right
//
// service_roles.#(=="one") # path
// # middle
// .cap # right
func parseQuery(query string) (
path, op, value, remain string, i int, vesc, ok bool,
) {
@@ -1251,15 +1259,74 @@ func matchLimit(str, pattern string) bool {
return matched
}
func falseish(t Result) bool {
switch t.Type {
case Null:
return true
case False:
return true
case String:
b, err := strconv.ParseBool(strings.ToLower(t.Str))
if err != nil {
return false
}
return !b
case Number:
return t.Num == 0
default:
return false
}
}
func trueish(t Result) bool {
switch t.Type {
case True:
return true
case String:
b, err := strconv.ParseBool(strings.ToLower(t.Str))
if err != nil {
return false
}
return b
case Number:
return t.Num != 0
default:
return false
}
}
func nullish(t Result) bool {
return t.Type == Null
}
func queryMatches(rp *arrayPathResult, value Result) bool {
rpv := rp.query.value
if len(rpv) > 0 && rpv[0] == '~' {
// convert to bool
rpv = rpv[1:]
if value.Bool() {
value = Result{Type: True}
} else {
value = Result{Type: False}
if len(rpv) > 0 {
if rpv[0] == '~' {
// convert to bool
rpv = rpv[1:]
var ish, ok bool
switch rpv {
case "*":
ish, ok = value.Exists(), true
case "null":
ish, ok = nullish(value), true
case "true":
ish, ok = trueish(value), true
case "false":
ish, ok = falseish(value), true
}
if ok {
rpv = "true"
if ish {
value = Result{Type: True}
} else {
value = Result{Type: False}
}
} else {
rpv = ""
value = Result{}
}
}
}
if !value.Exists() {
@@ -1918,23 +1985,23 @@ type parseContext struct {
// the '#' character.
// The dot and wildcard character can be escaped with '\'.
//
// {
// "name": {"first": "Tom", "last": "Anderson"},
// "age":37,
// "children": ["Sara","Alex","Jack"],
// "friends": [
// {"first": "James", "last": "Murphy"},
// {"first": "Roger", "last": "Craig"}
// ]
// }
// "name.last" >> "Anderson"
// "age" >> 37
// "children" >> ["Sara","Alex","Jack"]
// "children.#" >> 3
// "children.1" >> "Alex"
// "child*.2" >> "Jack"
// "c?ildren.0" >> "Sara"
// "friends.#.first" >> ["James","Roger"]
// {
// "name": {"first": "Tom", "last": "Anderson"},
// "age":37,
// "children": ["Sara","Alex","Jack"],
// "friends": [
// {"first": "James", "last": "Murphy"},
// {"first": "Roger", "last": "Craig"}
// ]
// }
// "name.last" >> "Anderson"
// "age" >> 37
// "children" >> ["Sara","Alex","Jack"]
// "children.#" >> 3
// "children.1" >> "Alex"
// "child*.2" >> "Jack"
// "c?ildren.0" >> "Sara"
// "friends.#.first" >> ["James","Roger"]
//
// This function expects that the json is well-formed, and does not validate.
// Invalid json will not panic, but it may return back unexpected results.
@@ -2126,8 +2193,7 @@ func unescape(json string) string {
// The caseSensitive paramater is used when the tokens are Strings.
// The order when comparing two different type is:
//
// Null < False < Number < String < True < JSON
//
// Null < False < Number < String < True < JSON
func (t Result) Less(token Result, caseSensitive bool) bool {
if t.Type < token.Type {
return true
@@ -2556,11 +2622,10 @@ func validnull(data []byte, i int) (outi int, ok bool) {
// Valid returns true if the input is valid json.
//
// if !gjson.Valid(json) {
// return errors.New("invalid json")
// }
// value := gjson.Get(json, "name.last")
//
// if !gjson.Valid(json) {
// return errors.New("invalid json")
// }
// value := gjson.Get(json, "name.last")
func Valid(json string) bool {
_, ok := validpayload(stringBytes(json), 0)
return ok
@@ -2568,13 +2633,12 @@ func Valid(json string) bool {
// ValidBytes returns true if the input is valid json.
//
// if !gjson.Valid(json) {
// return errors.New("invalid json")
// }
// value := gjson.Get(json, "name.last")
// if !gjson.Valid(json) {
// return errors.New("invalid json")
// }
// value := gjson.Get(json, "name.last")
//
// If working with bytes, this method preferred over ValidBytes(string(data))
//
func ValidBytes(json []byte) bool {
_, ok := validpayload(json, 0)
return ok
@@ -2690,6 +2754,7 @@ func execModifier(json, path string) (pathOut, res string, ok bool) {
var parsedArgs bool
switch pathOut[0] {
case '{', '[', '"':
// json arg
res := Parse(pathOut)
if res.Exists() {
args = squash(pathOut)
@@ -2698,14 +2763,20 @@ func execModifier(json, path string) (pathOut, res string, ok bool) {
}
}
if !parsedArgs {
idx := strings.IndexByte(pathOut, '|')
if idx == -1 {
args = pathOut
pathOut = ""
} else {
args = pathOut[:idx]
pathOut = pathOut[idx:]
// simple arg
i := 0
for ; i < len(pathOut); i++ {
if pathOut[i] == '|' {
break
}
switch pathOut[i] {
case '{', '[', '"', '(':
s := squash(pathOut[i:])
i += len(s) - 1
}
}
args = pathOut[:i]
pathOut = pathOut[i:]
}
}
return pathOut, fn(json, args), true
@@ -2725,19 +2796,24 @@ func unwrap(json string) string {
// DisableModifiers will disable the modifier syntax
var DisableModifiers = false
var modifiers = map[string]func(json, arg string) string{
"pretty": modPretty,
"ugly": modUgly,
"reverse": modReverse,
"this": modThis,
"flatten": modFlatten,
"join": modJoin,
"valid": modValid,
"keys": modKeys,
"values": modValues,
"tostr": modToStr,
"fromstr": modFromStr,
"group": modGroup,
var modifiers map[string]func(json, arg string) string
func init() {
modifiers = map[string]func(json, arg string) string{
"pretty": modPretty,
"ugly": modUgly,
"reverse": modReverse,
"this": modThis,
"flatten": modFlatten,
"join": modJoin,
"valid": modValid,
"keys": modKeys,
"values": modValues,
"tostr": modToStr,
"fromstr": modFromStr,
"group": modGroup,
"dig": modDig,
}
}
// AddModifier binds a custom modifier command to the GJSON syntax.
@@ -2848,9 +2924,13 @@ func modReverse(json, arg string) string {
}
// @flatten an array with child arrays.
// [1,[2],[3,4],[5,[6,7]]] -> [1,2,3,4,5,[6,7]]
//
// [1,[2],[3,4],[5,[6,7]]] -> [1,2,3,4,5,[6,7]]
//
// The {"deep":true} arg can be provide for deep flattening.
// [1,[2],[3,4],[5,[6,7]]] -> [1,2,3,4,5,6,7]
//
// [1,[2],[3,4],[5,[6,7]]] -> [1,2,3,4,5,6,7]
//
// The original json is returned when the json is not an array.
func modFlatten(json, arg string) string {
res := Parse(json)
@@ -2895,7 +2975,8 @@ func modFlatten(json, arg string) string {
}
// @keys extracts the keys from an object.
// {"first":"Tom","last":"Smith"} -> ["first","last"]
//
// {"first":"Tom","last":"Smith"} -> ["first","last"]
func modKeys(json, arg string) string {
v := Parse(json)
if !v.Exists() {
@@ -2922,7 +3003,8 @@ func modKeys(json, arg string) string {
}
// @values extracts the values from an object.
// {"first":"Tom","last":"Smith"} -> ["Tom","Smith"]
//
// {"first":"Tom","last":"Smith"} -> ["Tom","Smith"]
func modValues(json, arg string) string {
v := Parse(json)
if !v.Exists() {
@@ -2947,11 +3029,17 @@ func modValues(json, arg string) string {
}
// @join multiple objects into a single object.
// [{"first":"Tom"},{"last":"Smith"}] -> {"first","Tom","last":"Smith"}
//
// [{"first":"Tom"},{"last":"Smith"}] -> {"first","Tom","last":"Smith"}
//
// The arg can be "true" to specify that duplicate keys should be preserved.
// [{"first":"Tom","age":37},{"age":41}] -> {"first","Tom","age":37,"age":41}
//
// [{"first":"Tom","age":37},{"age":41}] -> {"first","Tom","age":37,"age":41}
//
// Without preserved keys:
// [{"first":"Tom","age":37},{"age":41}] -> {"first","Tom","age":41}
//
// [{"first":"Tom","age":37},{"age":41}] -> {"first","Tom","age":41}
//
// The original json is returned when the json is not an object.
func modJoin(json, arg string) string {
res := Parse(json)
@@ -3024,7 +3112,8 @@ func modValid(json, arg string) string {
}
// @fromstr converts a string to json
// "{\"id\":1023,\"name\":\"alert\"}" -> {"id":1023,"name":"alert"}
//
// "{\"id\":1023,\"name\":\"alert\"}" -> {"id":1023,"name":"alert"}
func modFromStr(json, arg string) string {
if !Valid(json) {
return ""
@@ -3033,7 +3122,8 @@ func modFromStr(json, arg string) string {
}
// @tostr converts a string to json
// {"id":1023,"name":"alert"} -> "{\"id\":1023,\"name\":\"alert\"}"
//
// {"id":1023,"name":"alert"} -> "{\"id\":1023,\"name\":\"alert\"}"
func modToStr(str, arg string) string {
return string(AppendJSONString(nil, str))
}
@@ -3210,11 +3300,11 @@ func revSquash(json string) string {
// Paths returns the original GJSON paths for a Result where the Result came
// from a simple query path that returns an array, like:
//
// gjson.Get(json, "friends.#.first")
// gjson.Get(json, "friends.#.first")
//
// The returned value will be in the form of a JSON array:
//
// ["friends.0.first","friends.1.first","friends.2.first"]
// ["friends.0.first","friends.1.first","friends.2.first"]
//
// The param 'json' must be the original JSON used when calling Get.
//
@@ -3239,11 +3329,11 @@ func (t Result) Paths(json string) []string {
// Path returns the original GJSON path for a Result where the Result came
// from a simple path that returns a single value, like:
//
// gjson.Get(json, "friends.#(last=Murphy)")
// gjson.Get(json, "friends.#(last=Murphy)")
//
// The returned value will be in the form of a JSON string:
//
// "friends.0"
// "friends.0"
//
// The param 'json' must be the original JSON used when calling Get.
//
@@ -3357,3 +3447,30 @@ func escapeComp(comp string) string {
}
return comp
}
func parseRecursiveDescent(all []Result, parent Result, path string) []Result {
if res := parent.Get(path); res.Exists() {
all = append(all, res)
}
if parent.IsArray() || parent.IsObject() {
parent.ForEach(func(_, val Result) bool {
all = parseRecursiveDescent(all, val, path)
return true
})
}
return all
}
func modDig(json, arg string) string {
all := parseRecursiveDescent(nil, Parse(json), arg)
var out []byte
out = append(out, '[')
for i, res := range all {
if i > 0 {
out = append(out, ',')
}
out = append(out, res.Raw...)
}
out = append(out, ']')
return string(out)
}

105
vendor/github.com/yuin/goldmark/.golangci.yml generated vendored Normal file
View File

@@ -0,0 +1,105 @@
run:
deadline: 10m
issues:
exclude-use-default: false
exclude-rules:
- path: _test.go
linters:
- errcheck
- lll
exclude:
- "Package util"
linters:
disable-all: true
enable:
- errcheck
- gosimple
- govet
- ineffassign
- staticcheck
- typecheck
- unused
- gofmt
- godot
- makezero
- misspell
- revive
- wastedassign
- lll
linters-settings:
revive:
severity: "warning"
confidence: 0.8
rules:
- name: blank-imports
severity: warning
disabled: false
- name: context-as-argument
severity: warning
disabled: false
- name: context-keys-type
severity: warning
disabled: false
- name: dot-imports
severity: warning
disabled: false
- name: error-return
severity: warning
disabled: false
- name: error-strings
severity: warning
disabled: false
- name: error-naming
severity: warning
disabled: false
- name: exported
severity: warning
disabled: false
- name: increment-decrement
severity: warning
disabled: false
- name: var-naming
severity: warning
disabled: false
- name: var-declaration
severity: warning
disabled: false
- name: package-comments
severity: warning
disabled: false
- name: range
severity: warning
disabled: false
- name: receiver-naming
severity: warning
disabled: false
- name: time-naming
severity: warning
disabled: false
- name: unexported-return
severity: warning
disabled: false
- name: indent-error-flow
severity: warning
disabled: false
- name: errorf
severity: warning
disabled: false
- name: empty-block
severity: warning
disabled: true
- name: superfluous-else
severity: warning
disabled: false
- name: unused-parameter
severity: warning
disabled: true
- name: unreachable-code
severity: warning
disabled: false
- name: redefines-builtin-id
severity: warning
disabled: false

View File

@@ -1,4 +1,7 @@
.PHONY: test fuzz
.PHONY: test fuzz lint
lint:
golangci-lint run -c .golangci.yml ./...
test:
go test -coverprofile=profile.out -coverpkg=github.com/yuin/goldmark,github.com/yuin/goldmark/ast,github.com/yuin/goldmark/extension,github.com/yuin/goldmark/extension/ast,github.com/yuin/goldmark/parser,github.com/yuin/goldmark/renderer,github.com/yuin/goldmark/renderer/html,github.com/yuin/goldmark/text,github.com/yuin/goldmark/util ./...

View File

@@ -440,6 +440,9 @@ Extensions
- [goldmark-pdf](https://github.com/stephenafamo/goldmark-pdf): A PDF renderer that can be passed to `goldmark.WithRenderer()`.
- [goldmark-hashtag](https://github.com/abhinav/goldmark-hashtag): Adds support for `#hashtag`-based tagging to goldmark.
- [goldmark-wikilink](https://github.com/abhinav/goldmark-wikilink): Adds support for `[[wiki]]`-style links to goldmark.
- [goldmark-anchor](https://github.com/abhinav/goldmark-anchor): Adds anchors (permalinks) next to all headers in a document.
- [goldmark-figure](https://github.com/mangoumbrella/goldmark-figure): Adds support for rendering paragraphs starting with an image to `<figure>` elements.
- [goldmark-frontmatter](https://github.com/abhinav/goldmark-frontmatter): Adds support for YAML, TOML, and custom front matter to documents.
- [goldmark-toc](https://github.com/abhinav/goldmark-toc): Adds support for generating tables-of-contents for goldmark documents.
- [goldmark-mermaid](https://github.com/abhinav/goldmark-mermaid): Adds support for rendering [Mermaid](https://mermaid-js.github.io/mermaid/) diagrams in goldmark documents.
- [goldmark-pikchr](https://github.com/jchenry/goldmark-pikchr): Adds support for rendering [Pikchr](https://pikchr.org/home/doc/trunk/homepage.md) diagrams in goldmark documents.
@@ -448,6 +451,7 @@ Extensions
- [goldmark-fences](https://github.com/stefanfritsch/goldmark-fences): Support for pandoc-style [fenced divs](https://pandoc.org/MANUAL.html#divs-and-spans) in goldmark.
- [goldmark-d2](https://github.com/FurqanSoftware/goldmark-d2): Adds support for [D2](https://d2lang.com/) diagrams.
- [goldmark-katex](https://github.com/FurqanSoftware/goldmark-katex): Adds support for [KaTeX](https://katex.org/) math and equations.
- [goldmark-img64](https://github.com/tenkoh/goldmark-img64): Adds support for embedding images into the document as DataURL (base64 encoded).
goldmark internal(for extension developers)

View File

@@ -39,7 +39,7 @@ func NewNodeKind(name string) NodeKind {
return kindMax
}
// An Attribute is an attribute of the Node
// An Attribute is an attribute of the Node.
type Attribute struct {
Name []byte
Value interface{}
@@ -248,7 +248,7 @@ func (n *BaseNode) RemoveChildren(self Node) {
n.childCount = 0
}
// SortChildren implements Node.SortChildren
// SortChildren implements Node.SortChildren.
func (n *BaseNode) SortChildren(comparator func(n1, n2 Node) int) {
var sorted Node
current := n.firstChild
@@ -358,7 +358,7 @@ func (n *BaseNode) InsertBefore(self, v1, insertee Node) {
}
}
// OwnerDocument implements Node.OwnerDocument
// OwnerDocument implements Node.OwnerDocument.
func (n *BaseNode) OwnerDocument() *Document {
d := n.Parent()
for {
@@ -399,7 +399,7 @@ func (n *BaseNode) SetAttribute(name []byte, value interface{}) {
n.attributes = append(n.attributes, Attribute{name, value})
}
// SetAttributeString implements Node.SetAttributeString
// SetAttributeString implements Node.SetAttributeString.
func (n *BaseNode) SetAttributeString(name string, value interface{}) {
n.SetAttribute(util.StringToReadOnlyBytes(name), value)
}
@@ -422,12 +422,12 @@ func (n *BaseNode) AttributeString(s string) (interface{}, bool) {
return n.Attribute(util.StringToReadOnlyBytes(s))
}
// Attributes implements Node.Attributes
// Attributes implements Node.Attributes.
func (n *BaseNode) Attributes() []Attribute {
return n.attributes
}
// RemoveAttributes implements Node.RemoveAttributes
// RemoveAttributes implements Node.RemoveAttributes.
func (n *BaseNode) RemoveAttributes() {
n.attributes = nil
}

View File

@@ -14,12 +14,12 @@ type BaseBlock struct {
lines *textm.Segments
}
// Type implements Node.Type
// Type implements Node.Type.
func (b *BaseBlock) Type() NodeType {
return TypeBlock
}
// IsRaw implements Node.IsRaw
// IsRaw implements Node.IsRaw.
func (b *BaseBlock) IsRaw() bool {
return false
}
@@ -34,7 +34,7 @@ func (b *BaseBlock) SetBlankPreviousLines(v bool) {
b.blankPreviousLines = v
}
// Lines implements Node.Lines
// Lines implements Node.Lines.
func (b *BaseBlock) Lines() *textm.Segments {
if b.lines == nil {
b.lines = textm.NewSegments()
@@ -42,7 +42,7 @@ func (b *BaseBlock) Lines() *textm.Segments {
return b.lines
}
// SetLines implements Node.SetLines
// SetLines implements Node.SetLines.
func (b *BaseBlock) SetLines(v *textm.Segments) {
b.lines = v
}
@@ -72,7 +72,7 @@ func (n *Document) Kind() NodeKind {
return KindDocument
}
// OwnerDocument implements Node.OwnerDocument
// OwnerDocument implements Node.OwnerDocument.
func (n *Document) OwnerDocument() *Document {
return n
}
@@ -431,19 +431,19 @@ func NewListItem(offset int) *ListItem {
type HTMLBlockType int
const (
// HTMLBlockType1 represents type 1 html blocks
// HTMLBlockType1 represents type 1 html blocks.
HTMLBlockType1 HTMLBlockType = iota + 1
// HTMLBlockType2 represents type 2 html blocks
// HTMLBlockType2 represents type 2 html blocks.
HTMLBlockType2
// HTMLBlockType3 represents type 3 html blocks
// HTMLBlockType3 represents type 3 html blocks.
HTMLBlockType3
// HTMLBlockType4 represents type 4 html blocks
// HTMLBlockType4 represents type 4 html blocks.
HTMLBlockType4
// HTMLBlockType5 represents type 5 html blocks
// HTMLBlockType5 represents type 5 html blocks.
HTMLBlockType5
// HTMLBlockType6 represents type 6 html blocks
// HTMLBlockType6 represents type 6 html blocks.
HTMLBlockType6
// HTMLBlockType7 represents type 7 html blocks
// HTMLBlockType7 represents type 7 html blocks.
HTMLBlockType7
)

View File

@@ -13,12 +13,12 @@ type BaseInline struct {
BaseNode
}
// Type implements Node.Type
// Type implements Node.Type.
func (b *BaseInline) Type() NodeType {
return TypeInline
}
// IsRaw implements Node.IsRaw
// IsRaw implements Node.IsRaw.
func (b *BaseInline) IsRaw() bool {
return false
}
@@ -33,12 +33,12 @@ func (b *BaseInline) SetBlankPreviousLines(v bool) {
panic("can not call with inline nodes.")
}
// Lines implements Node.Lines
// Lines implements Node.Lines.
func (b *BaseInline) Lines() *textm.Segments {
panic("can not call with inline nodes.")
}
// SetLines implements Node.SetLines
// SetLines implements Node.SetLines.
func (b *BaseInline) SetLines(v *textm.Segments) {
panic("can not call with inline nodes.")
}
@@ -132,7 +132,8 @@ func (n *Text) Merge(node Node, source []byte) bool {
if !ok {
return false
}
if n.Segment.Stop != t.Segment.Start || t.Segment.Padding != 0 || source[n.Segment.Stop-1] == '\n' || t.IsRaw() != n.IsRaw() {
if n.Segment.Stop != t.Segment.Start || t.Segment.Padding != 0 ||
source[n.Segment.Stop-1] == '\n' || t.IsRaw() != n.IsRaw() {
return false
}
n.Segment.Stop = t.Segment.Stop
@@ -214,7 +215,7 @@ func MergeOrReplaceTextSegment(parent Node, n Node, s textm.Segment) {
}
}
// A String struct is a textual content that has a concrete value
// A String struct is a textual content that has a concrete value.
type String struct {
BaseInline
@@ -305,7 +306,7 @@ func (n *CodeSpan) IsBlank(source []byte) bool {
return true
}
// Dump implements Node.Dump
// Dump implements Node.Dump.
func (n *CodeSpan) Dump(source []byte, level int) {
DumpHelper(n, source, level, nil, nil)
}
@@ -467,7 +468,7 @@ type AutoLink struct {
// Inline implements Inline.Inline.
func (n *AutoLink) Inline() {}
// Dump implements Node.Dump
// Dump implements Node.Dump.
func (n *AutoLink) Dump(source []byte, level int) {
segment := n.value.Segment
m := map[string]string{

View File

@@ -88,7 +88,7 @@ type Footnote struct {
func (n *Footnote) Dump(source []byte, level int) {
m := map[string]string{}
m["Index"] = fmt.Sprintf("%v", n.Index)
m["Ref"] = fmt.Sprintf("%s", n.Ref)
m["Ref"] = string(n.Ref)
gast.DumpHelper(n, source, level, m, nil)
}

View File

@@ -2,8 +2,9 @@ package ast
import (
"fmt"
gast "github.com/yuin/goldmark/ast"
"strings"
gast "github.com/yuin/goldmark/ast"
)
// Alignment is a text alignment of table cells.
@@ -45,7 +46,7 @@ type Table struct {
Alignments []Alignment
}
// Dump implements Node.Dump
// Dump implements Node.Dump.
func (n *Table) Dump(source []byte, level int) {
gast.DumpHelper(n, source, level, nil, func(level int) {
indent := strings.Repeat(" ", level)

View File

@@ -29,6 +29,7 @@ type cjk struct {
EscapedSpace bool
}
// CJK is a goldmark extension that provides functionalities for CJK languages.
var CJK = NewCJK(WithEastAsianLineBreaks(), WithEscapedSpace())
// NewCJK returns a new extension with given options.

View File

@@ -113,7 +113,8 @@ func (b *definitionDescriptionParser) Trigger() []byte {
return []byte{':'}
}
func (b *definitionDescriptionParser) Open(parent gast.Node, reader text.Reader, pc parser.Context) (gast.Node, parser.State) {
func (b *definitionDescriptionParser) Open(
parent gast.Node, reader text.Reader, pc parser.Context) (gast.Node, parser.State) {
line, _ := reader.PeekLine()
pos := pc.BlockOffset()
indent := pc.BlockIndent()
@@ -199,7 +200,8 @@ func (r *DefinitionListHTMLRenderer) RegisterFuncs(reg renderer.NodeRendererFunc
// DefinitionListAttributeFilter defines attribute names which dl elements can have.
var DefinitionListAttributeFilter = html.GlobalAttributeFilter
func (r *DefinitionListHTMLRenderer) renderDefinitionList(w util.BufWriter, source []byte, n gast.Node, entering bool) (gast.WalkStatus, error) {
func (r *DefinitionListHTMLRenderer) renderDefinitionList(
w util.BufWriter, source []byte, n gast.Node, entering bool) (gast.WalkStatus, error) {
if entering {
if n.Attributes() != nil {
_, _ = w.WriteString("<dl")
@@ -217,7 +219,8 @@ func (r *DefinitionListHTMLRenderer) renderDefinitionList(w util.BufWriter, sour
// DefinitionTermAttributeFilter defines attribute names which dd elements can have.
var DefinitionTermAttributeFilter = html.GlobalAttributeFilter
func (r *DefinitionListHTMLRenderer) renderDefinitionTerm(w util.BufWriter, source []byte, n gast.Node, entering bool) (gast.WalkStatus, error) {
func (r *DefinitionListHTMLRenderer) renderDefinitionTerm(
w util.BufWriter, source []byte, n gast.Node, entering bool) (gast.WalkStatus, error) {
if entering {
if n.Attributes() != nil {
_, _ = w.WriteString("<dt")
@@ -235,7 +238,8 @@ func (r *DefinitionListHTMLRenderer) renderDefinitionTerm(w util.BufWriter, sour
// DefinitionDescriptionAttributeFilter defines attribute names which dd elements can have.
var DefinitionDescriptionAttributeFilter = html.GlobalAttributeFilter
func (r *DefinitionListHTMLRenderer) renderDefinitionDescription(w util.BufWriter, source []byte, node gast.Node, entering bool) (gast.WalkStatus, error) {
func (r *DefinitionListHTMLRenderer) renderDefinitionDescription(
w util.BufWriter, source []byte, node gast.Node, entering bool) (gast.WalkStatus, error) {
if entering {
n := node.(*ast.DefinitionDescription)
_, _ = w.WriteString("<dd")

View File

@@ -44,8 +44,8 @@ func (b *footnoteBlockParser) Open(parent gast.Node, reader text.Reader, pc pars
return nil, parser.NoChildren
}
open := pos + 1
closes := 0
closure := util.FindClosure(line[pos+1:], '[', ']', false, false)
var closes int
closure := util.FindClosure(line[pos+1:], '[', ']', false, false) //nolint:staticcheck
closes = pos + 1 + closure
next := closes + 1
if closure > -1 {
@@ -136,7 +136,7 @@ func (s *footnoteParser) Parse(parent gast.Node, block text.Reader, pc parser.Co
return nil
}
open := pos
closure := util.FindClosure(line[pos:], '[', ']', false, false)
closure := util.FindClosure(line[pos:], '[', ']', false, false) //nolint:staticcheck
if closure < 0 {
return nil
}
@@ -156,7 +156,7 @@ func (s *footnoteParser) Parse(parent gast.Node, block text.Reader, pc parser.Co
d := def.(*ast.Footnote)
if bytes.Equal(d.Ref, value) {
if d.Index < 0 {
list.Count += 1
list.Count++
d.Index = list.Count
}
index = d.Index
@@ -272,9 +272,9 @@ func (a *footnoteASTTransformer) Transform(node *gast.Document, reader text.Read
// FootnoteConfig holds configuration values for the footnote extension.
//
// Link* and Backlink* configurations have some variables:
// Occurrances of “^^” in the string will be replaced by the
// Occurrences of “^^” in the string will be replaced by the
// corresponding footnote number in the HTML output.
// Occurrances of “%%” will be replaced by a number for the
// Occurrences of “%%” will be replaced by a number for the
// reference (footnotes can have multiple references).
type FootnoteConfig struct {
html.Config
@@ -525,7 +525,8 @@ func (r *FootnoteHTMLRenderer) RegisterFuncs(reg renderer.NodeRendererFuncRegist
reg.Register(ast.KindFootnoteList, r.renderFootnoteList)
}
func (r *FootnoteHTMLRenderer) renderFootnoteLink(w util.BufWriter, source []byte, node gast.Node, entering bool) (gast.WalkStatus, error) {
func (r *FootnoteHTMLRenderer) renderFootnoteLink(
w util.BufWriter, source []byte, node gast.Node, entering bool) (gast.WalkStatus, error) {
if entering {
n := node.(*ast.FootnoteLink)
is := strconv.Itoa(n.Index)
@@ -556,7 +557,8 @@ func (r *FootnoteHTMLRenderer) renderFootnoteLink(w util.BufWriter, source []byt
return gast.WalkContinue, nil
}
func (r *FootnoteHTMLRenderer) renderFootnoteBacklink(w util.BufWriter, source []byte, node gast.Node, entering bool) (gast.WalkStatus, error) {
func (r *FootnoteHTMLRenderer) renderFootnoteBacklink(
w util.BufWriter, source []byte, node gast.Node, entering bool) (gast.WalkStatus, error) {
if entering {
n := node.(*ast.FootnoteBacklink)
is := strconv.Itoa(n.Index)
@@ -581,7 +583,8 @@ func (r *FootnoteHTMLRenderer) renderFootnoteBacklink(w util.BufWriter, source [
return gast.WalkContinue, nil
}
func (r *FootnoteHTMLRenderer) renderFootnote(w util.BufWriter, source []byte, node gast.Node, entering bool) (gast.WalkStatus, error) {
func (r *FootnoteHTMLRenderer) renderFootnote(
w util.BufWriter, source []byte, node gast.Node, entering bool) (gast.WalkStatus, error) {
n := node.(*ast.Footnote)
is := strconv.Itoa(n.Index)
if entering {
@@ -600,7 +603,8 @@ func (r *FootnoteHTMLRenderer) renderFootnote(w util.BufWriter, source []byte, n
return gast.WalkContinue, nil
}
func (r *FootnoteHTMLRenderer) renderFootnoteList(w util.BufWriter, source []byte, node gast.Node, entering bool) (gast.WalkStatus, error) {
func (r *FootnoteHTMLRenderer) renderFootnoteList(
w util.BufWriter, source []byte, node gast.Node, entering bool) (gast.WalkStatus, error) {
if entering {
_, _ = w.WriteString(`<div class="footnotes" role="doc-endnotes"`)
if node.Attributes() != nil {

View File

@@ -11,9 +11,9 @@ import (
"github.com/yuin/goldmark/util"
)
var wwwURLRegxp = regexp.MustCompile(`^www\.[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-z]+(?:[/#?][-a-zA-Z0-9@:%_\+.~#!?&/=\(\);,'">\^{}\[\]` + "`" + `]*)?`)
var wwwURLRegxp = regexp.MustCompile(`^www\.[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-z]+(?:[/#?][-a-zA-Z0-9@:%_\+.~#!?&/=\(\);,'">\^{}\[\]` + "`" + `]*)?`) //nolint:golint,lll
var urlRegexp = regexp.MustCompile(`^(?:http|https|ftp)://[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-z]+(?::\d+)?(?:[/#?][-a-zA-Z0-9@:%_+.~#$!?&/=\(\);,'">\^{}\[\]` + "`" + `]*)?`)
var urlRegexp = regexp.MustCompile(`^(?:http|https|ftp)://[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-z]+(?::\d+)?(?:[/#?][-a-zA-Z0-9@:%_+.~#$!?&/=\(\);,'">\^{}\[\]` + "`" + `]*)?`) //nolint:golint,lll
// An LinkifyConfig struct is a data structure that holds configuration of the
// Linkify extension.
@@ -92,9 +92,6 @@ func WithLinkifyURLRegexp(value *regexp.Regexp) LinkifyOption {
}
}
// WithLinkifyWWWRegexp is a functional option that specify
// a pattern of the URL without a protocol.
// This pattern must start with 'www.' .
type withLinkifyWWWRegexp struct {
value *regexp.Regexp
}
@@ -107,14 +104,15 @@ func (o *withLinkifyWWWRegexp) SetLinkifyOption(p *LinkifyConfig) {
p.WWWRegexp = o.value
}
// WithLinkifyWWWRegexp is a functional option that specify
// a pattern of the URL without a protocol.
// This pattern must start with 'www.' .
func WithLinkifyWWWRegexp(value *regexp.Regexp) LinkifyOption {
return &withLinkifyWWWRegexp{
value: value,
}
}
// WithLinkifyWWWRegexp is a functional otpion that specify
// a pattern of the email address.
type withLinkifyEmailRegexp struct {
value *regexp.Regexp
}
@@ -127,6 +125,8 @@ func (o *withLinkifyEmailRegexp) SetLinkifyOption(p *LinkifyConfig) {
p.EmailRegexp = o.value
}
// WithLinkifyEmailRegexp is a functional otpion that specify
// a pattern of the email address.
func WithLinkifyEmailRegexp(value *regexp.Regexp) LinkifyOption {
return &withLinkifyEmailRegexp{
value: value,
@@ -303,6 +303,8 @@ type linkify struct {
// Linkify is an extension that allow you to parse text that seems like a URL.
var Linkify = &linkify{}
// NewLinkify creates a new [goldmark.Extender] that
// allow you to parse text that seems like a URL.
func NewLinkify(opts ...LinkifyOption) goldmark.Extender {
return &linkify{
options: opts,

2
vendor/github.com/yuin/goldmark/extension/package.go generated vendored Normal file
View File

@@ -0,0 +1,2 @@
// Package extension is a collection of builtin extensions.
package extension

View File

@@ -85,7 +85,8 @@ func (r *StrikethroughHTMLRenderer) RegisterFuncs(reg renderer.NodeRendererFuncR
// StrikethroughAttributeFilter defines attribute names which dd elements can have.
var StrikethroughAttributeFilter = html.GlobalAttributeFilter
func (r *StrikethroughHTMLRenderer) renderStrikethrough(w util.BufWriter, source []byte, n gast.Node, entering bool) (gast.WalkStatus, error) {
func (r *StrikethroughHTMLRenderer) renderStrikethrough(
w util.BufWriter, source []byte, n gast.Node, entering bool) (gast.WalkStatus, error) {
if entering {
if n.Attributes() != nil {
_, _ = w.WriteString("<del")

View File

@@ -23,7 +23,7 @@ type escapedPipeCell struct {
Transformed bool
}
// TableCellAlignMethod indicates how are table cells aligned in HTML format.indicates how are table cells aligned in HTML format.
// TableCellAlignMethod indicates how are table cells aligned in HTML format.
type TableCellAlignMethod int
const (
@@ -181,7 +181,8 @@ func (b *tableParagraphTransformer) Transform(node *gast.Paragraph, reader text.
}
}
func (b *tableParagraphTransformer) parseRow(segment text.Segment, alignments []ast.Alignment, isHeader bool, reader text.Reader, pc parser.Context) *ast.TableRow {
func (b *tableParagraphTransformer) parseRow(segment text.Segment,
alignments []ast.Alignment, isHeader bool, reader text.Reader, pc parser.Context) *ast.TableRow {
source := reader.Source()
line := segment.Value(source)
pos := 0
@@ -369,7 +370,8 @@ var TableAttributeFilter = html.GlobalAttributeFilter.Extend(
[]byte("width"), // [Deprecated]
)
func (r *TableHTMLRenderer) renderTable(w util.BufWriter, source []byte, n gast.Node, entering bool) (gast.WalkStatus, error) {
func (r *TableHTMLRenderer) renderTable(
w util.BufWriter, source []byte, n gast.Node, entering bool) (gast.WalkStatus, error) {
if entering {
_, _ = w.WriteString("<table")
if n.Attributes() != nil {
@@ -391,7 +393,8 @@ var TableHeaderAttributeFilter = html.GlobalAttributeFilter.Extend(
[]byte("valign"), // [Deprecated since HTML4] [Obsolete since HTML5]
)
func (r *TableHTMLRenderer) renderTableHeader(w util.BufWriter, source []byte, n gast.Node, entering bool) (gast.WalkStatus, error) {
func (r *TableHTMLRenderer) renderTableHeader(
w util.BufWriter, source []byte, n gast.Node, entering bool) (gast.WalkStatus, error) {
if entering {
_, _ = w.WriteString("<thead")
if n.Attributes() != nil {
@@ -418,7 +421,8 @@ var TableRowAttributeFilter = html.GlobalAttributeFilter.Extend(
[]byte("valign"), // [Obsolete since HTML5]
)
func (r *TableHTMLRenderer) renderTableRow(w util.BufWriter, source []byte, n gast.Node, entering bool) (gast.WalkStatus, error) {
func (r *TableHTMLRenderer) renderTableRow(
w util.BufWriter, source []byte, n gast.Node, entering bool) (gast.WalkStatus, error) {
if entering {
_, _ = w.WriteString("<tr")
if n.Attributes() != nil {
@@ -445,12 +449,14 @@ var TableThCellAttributeFilter = html.GlobalAttributeFilter.Extend(
[]byte("charoff"), // [Obsolete since HTML5]
[]byte("colspan"), // [OK] Number of columns that the cell is to span
[]byte("headers"), // [OK] This attribute contains a list of space-separated strings, each corresponding to the id attribute of the <th> elements that apply to this element
[]byte("headers"), // [OK] This attribute contains a list of space-separated
// strings, each corresponding to the id attribute of the <th> elements that apply to this element
[]byte("height"), // [Deprecated since HTML4] [Obsolete since HTML5]
[]byte("rowspan"), // [OK] Number of rows that the cell is to span
[]byte("scope"), // [OK] This enumerated attribute defines the cells that the header (defined in the <th>) element relates to [NOT OK in <td>]
[]byte("scope"), // [OK] This enumerated attribute defines the cells that
// the header (defined in the <th>) element relates to [NOT OK in <td>]
[]byte("valign"), // [Obsolete since HTML5]
[]byte("width"), // [Deprecated since HTML4] [Obsolete since HTML5]
@@ -466,7 +472,8 @@ var TableTdCellAttributeFilter = html.GlobalAttributeFilter.Extend(
[]byte("charoff"), // [Obsolete since HTML5]
[]byte("colspan"), // [OK] Number of columns that the cell is to span
[]byte("headers"), // [OK] This attribute contains a list of space-separated strings, each corresponding to the id attribute of the <th> elements that apply to this element
[]byte("headers"), // [OK] This attribute contains a list of space-separated
// strings, each corresponding to the id attribute of the <th> elements that apply to this element
[]byte("height"), // [Deprecated since HTML4] [Obsolete since HTML5]
@@ -477,7 +484,8 @@ var TableTdCellAttributeFilter = html.GlobalAttributeFilter.Extend(
[]byte("width"), // [Deprecated since HTML4] [Obsolete since HTML5]
)
func (r *TableHTMLRenderer) renderTableCell(w util.BufWriter, source []byte, node gast.Node, entering bool) (gast.WalkStatus, error) {
func (r *TableHTMLRenderer) renderTableCell(
w util.BufWriter, source []byte, node gast.Node, entering bool) (gast.WalkStatus, error) {
n := node.(*ast.TableCell)
tag := "td"
if n.Parent().Kind() == ast.KindTableHeader {

View File

@@ -1,6 +1,8 @@
package extension
import (
"regexp"
"github.com/yuin/goldmark"
gast "github.com/yuin/goldmark/ast"
"github.com/yuin/goldmark/extension/ast"
@@ -9,7 +11,6 @@ import (
"github.com/yuin/goldmark/renderer/html"
"github.com/yuin/goldmark/text"
"github.com/yuin/goldmark/util"
"regexp"
)
var taskListRegexp = regexp.MustCompile(`^\[([\sxX])\]\s*`)
@@ -80,21 +81,22 @@ func (r *TaskCheckBoxHTMLRenderer) RegisterFuncs(reg renderer.NodeRendererFuncRe
reg.Register(ast.KindTaskCheckBox, r.renderTaskCheckBox)
}
func (r *TaskCheckBoxHTMLRenderer) renderTaskCheckBox(w util.BufWriter, source []byte, node gast.Node, entering bool) (gast.WalkStatus, error) {
func (r *TaskCheckBoxHTMLRenderer) renderTaskCheckBox(
w util.BufWriter, source []byte, node gast.Node, entering bool) (gast.WalkStatus, error) {
if !entering {
return gast.WalkContinue, nil
}
n := node.(*ast.TaskCheckBox)
if n.IsChecked {
w.WriteString(`<input checked="" disabled="" type="checkbox"`)
_, _ = w.WriteString(`<input checked="" disabled="" type="checkbox"`)
} else {
w.WriteString(`<input disabled="" type="checkbox"`)
_, _ = w.WriteString(`<input disabled="" type="checkbox"`)
}
if r.XHTML {
w.WriteString(" /> ")
_, _ = w.WriteString(" /> ")
} else {
w.WriteString("> ")
_, _ = w.WriteString("> ")
}
return gast.WalkContinue, nil
}

View File

@@ -36,25 +36,25 @@ func getUnclosedCounter(pc parser.Context) *unclosedCounter {
type TypographicPunctuation int
const (
// LeftSingleQuote is '
// LeftSingleQuote is ' .
LeftSingleQuote TypographicPunctuation = iota + 1
// RightSingleQuote is '
// RightSingleQuote is ' .
RightSingleQuote
// LeftDoubleQuote is "
// LeftDoubleQuote is " .
LeftDoubleQuote
// RightDoubleQuote is "
// RightDoubleQuote is " .
RightDoubleQuote
// EnDash is --
// EnDash is -- .
EnDash
// EmDash is ---
// EmDash is --- .
EmDash
// Ellipsis is ...
// Ellipsis is ... .
Ellipsis
// LeftAngleQuote is <<
// LeftAngleQuote is << .
LeftAngleQuote
// RightAngleQuote is >>
// RightAngleQuote is >> .
RightAngleQuote
// Apostrophe is '
// Apostrophe is ' .
Apostrophe
typographicPunctuationMax
@@ -218,7 +218,8 @@ func (s *typographerParser) Parse(parent gast.Node, block text.Reader, pc parser
if c == '\'' {
if s.Substitutions[Apostrophe] != nil {
// Handle decade abbrevations such as '90s
if d.CanOpen && !d.CanClose && len(line) > 3 && util.IsNumeric(line[1]) && util.IsNumeric(line[2]) && line[3] == 's' {
if d.CanOpen && !d.CanClose && len(line) > 3 &&
util.IsNumeric(line[1]) && util.IsNumeric(line[2]) && line[3] == 's' {
after := rune(' ')
if len(line) > 4 {
after = util.ToRune(line, 4)
@@ -231,7 +232,8 @@ func (s *typographerParser) Parse(parent gast.Node, block text.Reader, pc parser
}
}
// special cases: 'twas, 'em, 'net
if len(line) > 1 && (unicode.IsPunct(before) || unicode.IsSpace(before)) && (line[1] == 't' || line[1] == 'e' || line[1] == 'n' || line[1] == 'l') {
if len(line) > 1 && (unicode.IsPunct(before) || unicode.IsSpace(before)) &&
(line[1] == 't' || line[1] == 'e' || line[1] == 'n' || line[1] == 'l') {
node := gast.NewString(s.Substitutions[Apostrophe])
node.SetCode(true)
block.Advance(1)
@@ -239,7 +241,8 @@ func (s *typographerParser) Parse(parent gast.Node, block text.Reader, pc parser
}
// Convert normal apostrophes. This is probably more flexible than necessary but
// converts any apostrophe in between two alphanumerics.
if len(line) > 1 && (unicode.IsDigit(before) || unicode.IsLetter(before)) && (unicode.IsLetter(util.ToRune(line, 1))) {
if len(line) > 1 && (unicode.IsDigit(before) || unicode.IsLetter(before)) &&
(unicode.IsLetter(util.ToRune(line, 1))) {
node := gast.NewString(s.Substitutions[Apostrophe])
node.SetCode(true)
block.Advance(1)
@@ -249,11 +252,14 @@ func (s *typographerParser) Parse(parent gast.Node, block text.Reader, pc parser
if s.Substitutions[LeftSingleQuote] != nil && d.CanOpen && !d.CanClose {
nt := LeftSingleQuote
// special cases: Alice's, I'm, Don't, You'd
if len(line) > 1 && (line[1] == 's' || line[1] == 'm' || line[1] == 't' || line[1] == 'd') && (len(line) < 3 || util.IsPunct(line[2]) || util.IsSpace(line[2])) {
if len(line) > 1 && (line[1] == 's' || line[1] == 'm' || line[1] == 't' || line[1] == 'd') &&
(len(line) < 3 || util.IsPunct(line[2]) || util.IsSpace(line[2])) {
nt = RightSingleQuote
}
// special cases: I've, I'll, You're
if len(line) > 2 && ((line[1] == 'v' && line[2] == 'e') || (line[1] == 'l' && line[2] == 'l') || (line[1] == 'r' && line[2] == 'e')) && (len(line) < 4 || util.IsPunct(line[3]) || util.IsSpace(line[3])) {
if len(line) > 2 && ((line[1] == 'v' && line[2] == 'e') ||
(line[1] == 'l' && line[2] == 'l') || (line[1] == 'r' && line[2] == 'e')) &&
(len(line) < 4 || util.IsPunct(line[3]) || util.IsSpace(line[3])) {
nt = RightSingleQuote
}
if nt == LeftSingleQuote {
@@ -266,8 +272,9 @@ func (s *typographerParser) Parse(parent gast.Node, block text.Reader, pc parser
return node
}
if s.Substitutions[RightSingleQuote] != nil {
// plural possesives and abbreviations: Smiths', doin'
if len(line) > 1 && unicode.IsSpace(util.ToRune(line, 0)) || unicode.IsPunct(util.ToRune(line, 0)) && (len(line) > 2 && !unicode.IsDigit(util.ToRune(line, 1))) {
// plural possesive and abbreviations: Smiths', doin'
if len(line) > 1 && unicode.IsSpace(util.ToRune(line, 0)) || unicode.IsPunct(util.ToRune(line, 0)) &&
(len(line) > 2 && !unicode.IsDigit(util.ToRune(line, 1))) {
node := gast.NewString(s.Substitutions[RightSingleQuote])
node.SetCode(true)
block.Advance(1)
@@ -276,7 +283,8 @@ func (s *typographerParser) Parse(parent gast.Node, block text.Reader, pc parser
}
if s.Substitutions[RightSingleQuote] != nil && counter.Single > 0 {
isClose := d.CanClose && !d.CanOpen
maybeClose := d.CanClose && d.CanOpen && len(line) > 1 && unicode.IsPunct(util.ToRune(line, 1)) && (len(line) == 2 || (len(line) > 2 && util.IsPunct(line[2]) || util.IsSpace(line[2])))
maybeClose := d.CanClose && d.CanOpen && len(line) > 1 && unicode.IsPunct(util.ToRune(line, 1)) &&
(len(line) == 2 || (len(line) > 2 && util.IsPunct(line[2]) || util.IsSpace(line[2])))
if isClose || maybeClose {
node := gast.NewString(s.Substitutions[RightSingleQuote])
node.SetCode(true)
@@ -296,7 +304,8 @@ func (s *typographerParser) Parse(parent gast.Node, block text.Reader, pc parser
}
if s.Substitutions[RightDoubleQuote] != nil && counter.Double > 0 {
isClose := d.CanClose && !d.CanOpen
maybeClose := d.CanClose && d.CanOpen && len(line) > 1 && (unicode.IsPunct(util.ToRune(line, 1))) && (len(line) == 2 || (len(line) > 2 && util.IsPunct(line[2]) || util.IsSpace(line[2])))
maybeClose := d.CanClose && d.CanOpen && len(line) > 1 && (unicode.IsPunct(util.ToRune(line, 1))) &&
(len(line) == 2 || (len(line) > 2 && util.IsPunct(line[2]) || util.IsSpace(line[2])))
if isClose || maybeClose {
// special case: "Monitor 21""
if len(line) > 1 && line[1] == '"' && unicode.IsDigit(before) {

View File

@@ -12,7 +12,7 @@ import (
var attrNameID = []byte("id")
var attrNameClass = []byte("class")
// An Attribute is an attribute of the markdown elements
// An Attribute is an attribute of the markdown elements.
type Attribute struct {
Name []byte
Value interface{}
@@ -93,7 +93,8 @@ func parseAttribute(reader text.Reader) (Attribute, bool) {
// CommonMark is basically defined for XHTML(even though it is legacy).
// So we restrict id characters.
for ; i < len(line) && !util.IsSpace(line[i]) &&
(!util.IsPunct(line[i]) || line[i] == '_' || line[i] == '-' || line[i] == ':' || line[i] == '.'); i++ {
(!util.IsPunct(line[i]) || line[i] == '_' ||
line[i] == '-' || line[i] == ':' || line[i] == '.'); i++ {
}
name := attrNameClass
if c == '#' {
@@ -145,7 +146,7 @@ func parseAttributeValue(reader text.Reader) (interface{}, bool) {
reader.SkipSpaces()
c := reader.Peek()
var value interface{}
ok := false
var ok bool
switch c {
case text.EOF:
return Attribute{}, false
@@ -244,7 +245,7 @@ func scanAttributeDecimal(reader text.Reader, w io.ByteWriter) {
for {
c := reader.Peek()
if util.IsNumeric(c) {
w.WriteByte(c)
_ = w.WriteByte(c)
} else {
return
}
@@ -286,7 +287,7 @@ func parseAttributeNumber(reader text.Reader) (float64, bool) {
}
scanAttributeDecimal(reader, &buf)
}
f, err := strconv.ParseFloat(buf.String(), 10)
f, err := strconv.ParseFloat(buf.String(), 64)
if err != nil {
return 0, false
}

View File

@@ -13,7 +13,7 @@ type HeadingConfig struct {
}
// SetOption implements SetOptioner.
func (b *HeadingConfig) SetOption(name OptionName, value interface{}) {
func (b *HeadingConfig) SetOption(name OptionName, _ interface{}) {
switch name {
case optAutoHeadingID:
b.AutoHeadingID = true
@@ -135,7 +135,9 @@ func (b *atxHeadingParser) Open(parent ast.Node, reader text.Reader, pc Context)
for _, attr := range attrs {
node.SetAttribute(attr.Name, attr.Value)
}
node.Lines().Append(text.NewSegment(segment.Start+start+1-segment.Padding, segment.Start+closureOpen-segment.Padding))
node.Lines().Append(text.NewSegment(
segment.Start+start+1-segment.Padding,
segment.Start+closureOpen-segment.Padding))
}
}
}

View File

@@ -66,12 +66,12 @@ func (d *Delimiter) Dump(source []byte, level int) {
var kindDelimiter = ast.NewNodeKind("Delimiter")
// Kind implements Node.Kind
// Kind implements Node.Kind.
func (d *Delimiter) Kind() ast.NodeKind {
return kindDelimiter
}
// Text implements Node.Text
// Text implements Node.Text.
func (d *Delimiter) Text(source []byte) []byte {
return d.Segment.Value(source)
}
@@ -126,7 +126,7 @@ func ScanDelimiter(line []byte, before rune, min int, processor DelimiterProcess
after = util.ToRune(line, j)
}
canOpen, canClose := false, false
var canOpen, canClose bool
beforeIsPunctuation := util.IsPunctRune(before)
beforeIsWhitespace := util.IsSpaceRune(before)
afterIsPunctuation := util.IsPunctRune(after)

View File

@@ -76,7 +76,7 @@ var allowedBlockTags = map[string]bool{
"ul": true,
}
var htmlBlockType1OpenRegexp = regexp.MustCompile(`(?i)^[ ]{0,3}<(script|pre|style|textarea)(?:\s.*|>.*|/>.*|)(?:\r\n|\n)?$`)
var htmlBlockType1OpenRegexp = regexp.MustCompile(`(?i)^[ ]{0,3}<(script|pre|style|textarea)(?:\s.*|>.*|/>.*|)(?:\r\n|\n)?$`) //nolint:golint,lll
var htmlBlockType1CloseRegexp = regexp.MustCompile(`(?i)^.*</(?:script|pre|style|textarea)>.*`)
var htmlBlockType2OpenRegexp = regexp.MustCompile(`^[ ]{0,3}<!\-\-`)
@@ -91,9 +91,9 @@ var htmlBlockType4Close = []byte{'>'}
var htmlBlockType5OpenRegexp = regexp.MustCompile(`^[ ]{0,3}<\!\[CDATA\[`)
var htmlBlockType5Close = []byte{']', ']', '>'}
var htmlBlockType6Regexp = regexp.MustCompile(`^[ ]{0,3}<(?:/[ ]*)?([a-zA-Z]+[a-zA-Z0-9\-]*)(?:[ ].*|>.*|/>.*|)(?:\r\n|\n)?$`)
var htmlBlockType6Regexp = regexp.MustCompile(`^[ ]{0,3}<(?:/[ ]*)?([a-zA-Z]+[a-zA-Z0-9\-]*)(?:[ ].*|>.*|/>.*|)(?:\r\n|\n)?$`) //nolint:golint,lll
var htmlBlockType7Regexp = regexp.MustCompile(`^[ ]{0,3}<(/[ ]*)?([a-zA-Z]+[a-zA-Z0-9\-]*)(` + attributePattern + `*)[ ]*(?:>|/>)[ ]*(?:\r\n|\n)?$`)
var htmlBlockType7Regexp = regexp.MustCompile(`^[ ]{0,3}<(/[ ]*)?([a-zA-Z]+[a-zA-Z0-9\-]*)(` + attributePattern + `*)[ ]*(?:>|/>)[ ]*(?:\r\n|\n)?$`) //nolint:golint,lll
type htmlBlockParser struct {
}
@@ -135,7 +135,8 @@ func (b *htmlBlockParser) Open(parent ast.Node, reader text.Reader, pc Context)
_, ok := allowedBlockTags[tagName]
if ok {
node = ast.NewHTMLBlock(ast.HTMLBlockType6)
} else if tagName != "script" && tagName != "style" && tagName != "pre" && !ast.IsParagraph(last) && !(isCloseTag && hasAttr) { // type 7 can not interrupt paragraph
} else if tagName != "script" && tagName != "style" &&
tagName != "pre" && !ast.IsParagraph(last) && !(isCloseTag && hasAttr) { // type 7 can not interrupt paragraph
node = ast.NewHTMLBlock(ast.HTMLBlockType7)
}
}

View File

@@ -250,7 +250,8 @@ var linkFindClosureOptions text.FindClosureOptions = text.FindClosureOptions{
Advance: true,
}
func (s *linkParser) parseReferenceLink(parent ast.Node, last *linkLabelState, block text.Reader, pc Context) (*ast.Link, bool) {
func (s *linkParser) parseReferenceLink(parent ast.Node, last *linkLabelState,
block text.Reader, pc Context) (*ast.Link, bool) {
_, orgpos := block.Position()
block.Advance(1) // skip '['
segments, found := block.FindClosure('[', ']', linkFindClosureOptions)

View File

@@ -22,7 +22,7 @@ var listItemFlagValue interface{} = true
// Same as
// `^(([ ]*)([\-\*\+]))(\s+.*)?\n?$`.FindSubmatchIndex or
// `^(([ ]*)(\d{1,9}[\.\)]))(\s+.*)?\n?$`.FindSubmatchIndex
// `^(([ ]*)(\d{1,9}[\.\)]))(\s+.*)?\n?$`.FindSubmatchIndex.
func parseListItem(line []byte) ([6]int, listItemType) {
i := 0
l := len(line)
@@ -89,7 +89,7 @@ func matchesListItem(source []byte, strict bool) ([6]int, listItemType) {
}
func calcListOffset(source []byte, match [6]int) int {
offset := 0
var offset int
if match[4] < 0 || util.IsBlank(source[match[4]:]) { // list item starts with a blank line
offset = 1
} else {
@@ -250,14 +250,14 @@ func (b *listParser) Close(node ast.Node, reader text.Reader, pc Context) {
for c := node.FirstChild(); c != nil && list.IsTight; c = c.NextSibling() {
if c.FirstChild() != nil && c.FirstChild() != c.LastChild() {
for c1 := c.FirstChild().NextSibling(); c1 != nil; c1 = c1.NextSibling() {
if bl, ok := c1.(ast.Node); ok && bl.HasBlankPreviousLines() {
if c1.HasBlankPreviousLines() {
list.IsTight = false
break
}
}
}
if c != node.FirstChild() {
if bl, ok := c.(ast.Node); ok && bl.HasBlankPreviousLines() {
if c.HasBlankPreviousLines() {
list.IsTight = false
}
}

View File

@@ -403,7 +403,8 @@ func (p *parseContext) IsInLinkLabel() bool {
type State int
const (
none State = 1 << iota
// None is a default value of the [State].
None State = 1 << iota
// Continue indicates parser can continue parsing.
Continue
@@ -1049,7 +1050,7 @@ func isBlankLine(lineNum, level int, stats []lineStat) bool {
func (p *parser) parseBlocks(parent ast.Node, reader text.Reader, pc Context) {
pc.SetOpenedBlocks([]Block{})
blankLines := make([]lineStat, 0, 128)
isBlank := false
var isBlank bool
for { // process blocks separated by blank lines
_, lines, ok := reader.SkipBlankLines()
if !ok {
@@ -1152,18 +1153,23 @@ func (p *parser) parseBlock(block text.BlockReader, parent ast.Node, pc Context)
break
}
lineLength := len(line)
var lineBreakFlags uint8 = 0
var lineBreakFlags uint8
hasNewLine := line[lineLength-1] == '\n'
if ((lineLength >= 3 && line[lineLength-2] == '\\' && line[lineLength-3] != '\\') || (lineLength == 2 && line[lineLength-2] == '\\')) && hasNewLine { // ends with \\n
if ((lineLength >= 3 && line[lineLength-2] == '\\' &&
line[lineLength-3] != '\\') || (lineLength == 2 && line[lineLength-2] == '\\')) && hasNewLine { // ends with \\n
lineLength -= 2
lineBreakFlags |= lineBreakHard | lineBreakVisible
} else if ((lineLength >= 4 && line[lineLength-3] == '\\' && line[lineLength-2] == '\r' && line[lineLength-4] != '\\') || (lineLength == 3 && line[lineLength-3] == '\\' && line[lineLength-2] == '\r')) && hasNewLine { // ends with \\r\n
} else if ((lineLength >= 4 && line[lineLength-3] == '\\' && line[lineLength-2] == '\r' &&
line[lineLength-4] != '\\') || (lineLength == 3 && line[lineLength-3] == '\\' && line[lineLength-2] == '\r')) &&
hasNewLine { // ends with \\r\n
lineLength -= 3
lineBreakFlags |= lineBreakHard | lineBreakVisible
} else if lineLength >= 3 && line[lineLength-3] == ' ' && line[lineLength-2] == ' ' && hasNewLine { // ends with [space][space]\n
} else if lineLength >= 3 && line[lineLength-3] == ' ' && line[lineLength-2] == ' ' &&
hasNewLine { // ends with [space][space]\n
lineLength -= 3
lineBreakFlags |= lineBreakHard
} else if lineLength >= 4 && line[lineLength-4] == ' ' && line[lineLength-3] == ' ' && line[lineLength-2] == '\r' && hasNewLine { // ends with [space][space]\r\n
} else if lineLength >= 4 && line[lineLength-4] == ' ' && line[lineLength-3] == ' ' &&
line[lineLength-2] == '\r' && hasNewLine { // ends with [space][space]\r\n
lineLength -= 4
lineBreakFlags |= lineBreakHard
} else if hasNewLine {

View File

@@ -15,7 +15,7 @@ type rawHTMLParser struct {
var defaultRawHTMLParser = &rawHTMLParser{}
// NewRawHTMLParser return a new InlineParser that can parse
// inline htmls
// inline htmls.
func NewRawHTMLParser() InlineParser {
return defaultRawHTMLParser
}
@@ -48,10 +48,10 @@ func (s *rawHTMLParser) Parse(parent ast.Node, block text.Reader, pc Context) as
}
var tagnamePattern = `([A-Za-z][A-Za-z0-9-]*)`
var attributePattern = `(?:[\r\n \t]+[a-zA-Z_:][a-zA-Z0-9:._-]*(?:[\r\n \t]*=[\r\n \t]*(?:[^\"'=<>` + "`" + `\x00-\x20]+|'[^']*'|"[^"]*"))?)`
var openTagRegexp = regexp.MustCompile("^<" + tagnamePattern + attributePattern + `*[ \t]*/?>`)
var closeTagRegexp = regexp.MustCompile("^</" + tagnamePattern + `\s*>`)
var spaceOrOneNewline = `(?:[ \t]|(?:\r\n|\n){0,1})`
var attributePattern = `(?:[\r\n \t]+[a-zA-Z_:][a-zA-Z0-9:._-]*(?:[\r\n \t]*=[\r\n \t]*(?:[^\"'=<>` + "`" + `\x00-\x20]+|'[^']*'|"[^"]*"))?)` //nolint:golint,lll
var openTagRegexp = regexp.MustCompile("^<" + tagnamePattern + attributePattern + `*` + spaceOrOneNewline + `*/?>`)
var closeTagRegexp = regexp.MustCompile("^</" + tagnamePattern + spaceOrOneNewline + `*>`)
var openProcessingInstruction = []byte("<?")
var closeProcessingInstruction = []byte("?>")
@@ -153,9 +153,8 @@ func (s *rawHTMLParser) parseMultiLineRegexp(reg *regexp.Regexp, block text.Read
if l == eline {
block.Advance(end - start)
break
} else {
block.AdvanceLine()
}
block.AdvanceLine()
}
return node
}

View File

@@ -91,7 +91,7 @@ func (b *setextHeadingParser) Close(node ast.Node, reader text.Reader, pc Contex
para.Lines().Append(segment)
heading.Parent().InsertAfter(heading.Parent(), heading, para)
} else {
next.(ast.Node).Lines().Unshift(segment)
next.Lines().Unshift(segment)
}
heading.Parent().RemoveChild(heading.Parent(), heading)
} else {

View File

@@ -1,3 +1,4 @@
// Package html implements renderer that outputs HTMLs.
package html
import (
@@ -253,15 +254,17 @@ var GlobalAttributeFilter = util.NewBytesFilter(
[]byte("translate"),
)
func (r *Renderer) renderDocument(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
func (r *Renderer) renderDocument(
w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
// nothing to do
return ast.WalkContinue, nil
}
// HeadingAttributeFilter defines attribute names which heading elements can have
// HeadingAttributeFilter defines attribute names which heading elements can have.
var HeadingAttributeFilter = GlobalAttributeFilter
func (r *Renderer) renderHeading(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
func (r *Renderer) renderHeading(
w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
n := node.(*ast.Heading)
if entering {
_, _ = w.WriteString("<h")
@@ -278,12 +281,13 @@ func (r *Renderer) renderHeading(w util.BufWriter, source []byte, node ast.Node,
return ast.WalkContinue, nil
}
// BlockquoteAttributeFilter defines attribute names which blockquote elements can have
// BlockquoteAttributeFilter defines attribute names which blockquote elements can have.
var BlockquoteAttributeFilter = GlobalAttributeFilter.Extend(
[]byte("cite"),
)
func (r *Renderer) renderBlockquote(w util.BufWriter, source []byte, n ast.Node, entering bool) (ast.WalkStatus, error) {
func (r *Renderer) renderBlockquote(
w util.BufWriter, source []byte, n ast.Node, entering bool) (ast.WalkStatus, error) {
if entering {
if n.Attributes() != nil {
_, _ = w.WriteString("<blockquote")
@@ -308,7 +312,8 @@ func (r *Renderer) renderCodeBlock(w util.BufWriter, source []byte, n ast.Node,
return ast.WalkContinue, nil
}
func (r *Renderer) renderFencedCodeBlock(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
func (r *Renderer) renderFencedCodeBlock(
w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
n := node.(*ast.FencedCodeBlock)
if entering {
_, _ = w.WriteString("<pre><code")
@@ -326,7 +331,8 @@ func (r *Renderer) renderFencedCodeBlock(w util.BufWriter, source []byte, node a
return ast.WalkContinue, nil
}
func (r *Renderer) renderHTMLBlock(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
func (r *Renderer) renderHTMLBlock(
w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
n := node.(*ast.HTMLBlock)
if entering {
if r.Unsafe {
@@ -428,7 +434,7 @@ func (r *Renderer) renderParagraph(w util.BufWriter, source []byte, n ast.Node,
func (r *Renderer) renderTextBlock(w util.BufWriter, source []byte, n ast.Node, entering bool) (ast.WalkStatus, error) {
if !entering {
if _, ok := n.NextSibling().(ast.Node); ok && n.FirstChild() != nil {
if n.NextSibling() != nil && n.FirstChild() != nil {
_ = w.WriteByte('\n')
}
}
@@ -444,7 +450,8 @@ var ThematicAttributeFilter = GlobalAttributeFilter.Extend(
[]byte("width"), // [Deprecated]
)
func (r *Renderer) renderThematicBreak(w util.BufWriter, source []byte, n ast.Node, entering bool) (ast.WalkStatus, error) {
func (r *Renderer) renderThematicBreak(
w util.BufWriter, source []byte, n ast.Node, entering bool) (ast.WalkStatus, error) {
if !entering {
return ast.WalkContinue, nil
}
@@ -473,7 +480,8 @@ var LinkAttributeFilter = GlobalAttributeFilter.Extend(
[]byte("target"),
)
func (r *Renderer) renderAutoLink(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
func (r *Renderer) renderAutoLink(
w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
n := node.(*ast.AutoLink)
if !entering {
return ast.WalkContinue, nil
@@ -528,7 +536,8 @@ func (r *Renderer) renderCodeSpan(w util.BufWriter, source []byte, n ast.Node, e
// EmphasisAttributeFilter defines attribute names which emphasis elements can have.
var EmphasisAttributeFilter = GlobalAttributeFilter
func (r *Renderer) renderEmphasis(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
func (r *Renderer) renderEmphasis(
w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
n := node.(*ast.Emphasis)
tag := "em"
if n.Level == 2 {
@@ -618,7 +627,8 @@ func (r *Renderer) renderImage(w util.BufWriter, source []byte, node ast.Node, e
return ast.WalkSkipChildren, nil
}
func (r *Renderer) renderRawHTML(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
func (r *Renderer) renderRawHTML(
w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
if !entering {
return ast.WalkSkipChildren, nil
}
@@ -901,20 +911,24 @@ var bVb = []byte("vbscript:")
var bFile = []byte("file:")
var bData = []byte("data:")
func hasPrefix(s, prefix []byte) bool {
return len(s) >= len(prefix) && bytes.Equal(bytes.ToLower(s[0:len(prefix)]), bytes.ToLower(prefix))
}
// IsDangerousURL returns true if the given url seems a potentially dangerous url,
// otherwise false.
func IsDangerousURL(url []byte) bool {
if bytes.HasPrefix(url, bDataImage) && len(url) >= 11 {
if hasPrefix(url, bDataImage) && len(url) >= 11 {
v := url[11:]
if bytes.HasPrefix(v, bPng) || bytes.HasPrefix(v, bGif) ||
bytes.HasPrefix(v, bJpeg) || bytes.HasPrefix(v, bWebp) ||
bytes.HasPrefix(v, bSvg) {
if hasPrefix(v, bPng) || hasPrefix(v, bGif) ||
hasPrefix(v, bJpeg) || hasPrefix(v, bWebp) ||
hasPrefix(v, bSvg) {
return false
}
return true
}
return bytes.HasPrefix(url, bJs) || bytes.HasPrefix(url, bVb) ||
bytes.HasPrefix(url, bFile) || bytes.HasPrefix(url, bData)
return hasPrefix(url, bJs) || hasPrefix(url, bVb) ||
hasPrefix(url, bFile) || hasPrefix(url, bData)
}
func nodeToHTMLText(n ast.Node, source []byte) []byte {

View File

@@ -16,7 +16,7 @@ type Config struct {
NodeRenderers util.PrioritizedSlice
}
// NewConfig returns a new Config
// NewConfig returns a new Config.
func NewConfig() *Config {
return &Config{
Options: map[OptionName]interface{}{},
@@ -78,7 +78,7 @@ type NodeRenderer interface {
RegisterFuncs(NodeRendererFuncRegisterer)
}
// A NodeRendererFuncRegisterer registers
// A NodeRendererFuncRegisterer registers given NodeRendererFunc to this object.
type NodeRendererFuncRegisterer interface {
// Register registers given NodeRendererFunc to this object.
Register(ast.NodeKind, NodeRendererFunc)

2
vendor/github.com/yuin/goldmark/text/package.go generated vendored Normal file
View File

@@ -0,0 +1,2 @@
// Package text provides functionalities to manipulate texts.
package text

View File

@@ -1,6 +1,7 @@
package text
import (
"bytes"
"io"
"regexp"
"unicode/utf8"
@@ -75,7 +76,7 @@ type Reader interface {
FindClosure(opener, closer byte, options FindClosureOptions) (*Segments, bool)
}
// FindClosureOptions is options for Reader.FindClosure
// FindClosureOptions is options for Reader.FindClosure.
type FindClosureOptions struct {
// CodeSpan is a flag for the FindClosure. If this is set to true,
// FindClosure ignores closers in codespans.
@@ -153,7 +154,7 @@ func (r *reader) PeekLine() ([]byte, Segment) {
return nil, r.pos
}
// io.RuneReader interface
// io.RuneReader interface.
func (r *reader) ReadRune() (rune, int, error) {
return readRuneReader(r)
}
@@ -353,7 +354,7 @@ func (r *blockReader) Value(seg Segment) []byte {
return ret
}
// io.RuneReader interface
// io.RuneReader interface.
func (r *blockReader) ReadRune() (rune, int, error) {
return readRuneReader(r)
}
@@ -537,24 +538,30 @@ func matchReader(r Reader, reg *regexp.Regexp) bool {
}
func findSubMatchReader(r Reader, reg *regexp.Regexp) [][]byte {
oldline, oldseg := r.Position()
oldLine, oldSeg := r.Position()
match := reg.FindReaderSubmatchIndex(r)
r.SetPosition(oldline, oldseg)
r.SetPosition(oldLine, oldSeg)
if match == nil {
return nil
}
runes := make([]rune, 0, match[1]-match[0])
var bb bytes.Buffer
bb.Grow(match[1] - match[0])
for i := 0; i < match[1]; {
r, size, _ := readRuneReader(r)
i += size
runes = append(runes, r)
bb.WriteRune(r)
}
result := [][]byte{}
bs := bb.Bytes()
var result [][]byte
for i := 0; i < len(match); i += 2 {
result = append(result, []byte(string(runes[match[i]:match[i+1]])))
if match[i] < 0 {
result = append(result, []byte{})
continue
}
result = append(result, bs[match[i]:match[i+1]])
}
r.SetPosition(oldline, oldseg)
r.SetPosition(oldLine, oldSeg)
r.Advance(match[1] - match[0])
return result
}

View File

@@ -1,3 +1,4 @@
//nolint:golint,lll,misspell
package util
// An HTML5Entity struct represents HTML5 entitites.
@@ -8,7 +9,7 @@ type HTML5Entity struct {
}
// LookUpHTML5EntityByName returns (an HTML5Entity, true) if an entity named
// given name is found, otherwise (nil, false)
// given name is found, otherwise (nil, false).
func LookUpHTML5EntityByName(name string) (*HTML5Entity, bool) {
v, ok := html5entities[name]
return v, ok

File diff suppressed because it is too large Load Diff

View File

@@ -63,12 +63,13 @@ func (b *CopyOnWriteBuffer) AppendString(value string) {
// WriteByte writes the given byte to the buffer.
// WriteByte allocate new buffer and clears it at the first time.
func (b *CopyOnWriteBuffer) WriteByte(c byte) {
func (b *CopyOnWriteBuffer) WriteByte(c byte) error {
if !b.copied {
b.buffer = make([]byte, 0, len(b.buffer)+20)
b.copied = true
}
b.buffer = append(b.buffer, c)
return nil
}
// AppendByte appends given bytes to the buffer.
@@ -145,12 +146,12 @@ func TabWidth(currentPos int) int {
// If the line contains tab characters, paddings may be not zero.
// currentPos==0 and width==2:
//
// position: 0 1
// [TAB]aaaa
// width: 1234 5678
// position: 0 1
// [TAB]aaaa
// width: 1234 5678
//
// width=2 is in the tab character. In this case, IndentPosition returns
// (pos=1, padding=2)
// (pos=1, padding=2).
func IndentPosition(bs []byte, currentPos, width int) (pos, padding int) {
return IndentPositionPadding(bs, currentPos, 0, width)
}
@@ -424,7 +425,7 @@ func DoFullUnicodeCaseFolding(v []byte) []byte {
if c >= 0x41 && c <= 0x5a {
// A-Z to a-z
cob.Write(v[n:i])
cob.WriteByte(c + 32)
_ = cob.WriteByte(c + 32)
n = i + 1
}
continue
@@ -521,7 +522,7 @@ func ToLinkReference(v []byte) string {
return string(ReplaceSpaces(v, ' '))
}
var htmlEscapeTable = [256][]byte{nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, []byte("&quot;"), nil, nil, nil, []byte("&amp;"), nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, []byte("&lt;"), nil, []byte("&gt;"), nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil}
var htmlEscapeTable = [256][]byte{nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, []byte("&quot;"), nil, nil, nil, []byte("&amp;"), nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, []byte("&lt;"), nil, []byte("&gt;"), nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil} //nolint:golint,lll
// EscapeHTMLByte returns HTML escaped bytes if the given byte should be escaped,
// otherwise nil.
@@ -557,7 +558,7 @@ func UnescapePunctuations(source []byte) []byte {
c := source[i]
if i < limit-1 && c == '\\' && IsPunct(source[i+1]) {
cob.Write(source[n:i])
cob.WriteByte(source[i+1])
_ = cob.WriteByte(source[i+1])
i += 2
n = i
continue
@@ -573,9 +574,9 @@ func UnescapePunctuations(source []byte) []byte {
// ResolveNumericReferences resolve numeric references like '&#1234;" .
func ResolveNumericReferences(source []byte) []byte {
cob := NewCopyOnWriteBuffer(source)
buf := make([]byte, 6, 6)
buf := make([]byte, 6)
limit := len(source)
ok := false
var ok bool
n := 0
for i := 0; i < limit; i++ {
if source[i] == '&' {
@@ -625,7 +626,7 @@ func ResolveNumericReferences(source []byte) []byte {
func ResolveEntityNames(source []byte) []byte {
cob := NewCopyOnWriteBuffer(source)
limit := len(source)
ok := false
var ok bool
n := 0
for i := 0; i < limit; i++ {
if source[i] == '&' {
@@ -658,9 +659,9 @@ var htmlSpace = []byte("%20")
// URLEscape escape the given URL.
// If resolveReference is set true:
// 1. unescape punctuations
// 2. resolve numeric references
// 3. resolve entity references
// 1. unescape punctuations
// 2. resolve numeric references
// 3. resolve entity references
//
// URL encoded values (%xx) are kept as is.
func URLEscape(v []byte, resolveReference bool) []byte {
@@ -750,7 +751,7 @@ func FindURLIndex(b []byte) int {
return i
}
var emailDomainRegexp = regexp.MustCompile(`^[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*`)
var emailDomainRegexp = regexp.MustCompile(`^[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*`) //nolint:golint,lll
// FindEmailIndex returns a stop index value if the given bytes seem an email address.
func FindEmailIndex(b []byte) int {
@@ -781,18 +782,19 @@ func FindEmailIndex(b []byte) int {
var spaces = []byte(" \t\n\x0b\x0c\x0d")
var spaceTable = [256]int8{0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
var spaceTable = [256]int8{0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} //nolint:golint,lll
var punctTable = [256]int8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
var punctTable = [256]int8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} //nolint:golint,lll
// a-zA-Z0-9, ;/?:@&=+$,-_.!~*'()#
var urlEscapeTable = [256]int8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
var utf8lenTable = [256]int8{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 99, 99, 99, 99, 99, 99, 99, 99}
var urlEscapeTable = [256]int8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} //nolint:golint,lll
var urlTable = [256]uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 5, 1, 5, 5, 1, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 1, 1, 0, 1, 0, 1, 1, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 1, 1, 1, 1, 1, 1, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}
var utf8lenTable = [256]int8{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 99, 99, 99, 99, 99, 99, 99, 99} //nolint:golint,lll
var emailTable = [256]uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
var urlTable = [256]uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 5, 1, 5, 5, 1, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 1, 1, 0, 1, 0, 1, 1, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 1, 1, 1, 1, 1, 1, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} //nolint:golint,lll
var emailTable = [256]uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} //nolint:golint,lll
// UTF8Len returns a byte length of the utf-8 character.
func UTF8Len(b byte) int8 {
@@ -836,11 +838,18 @@ func IsAlphaNumeric(c byte) bool {
// IsEastAsianWideRune returns trhe if the given rune is an east asian wide character, otherwise false.
func IsEastAsianWideRune(r rune) bool {
// https://en.wikipedia.org/wiki/CJK_Symbols_and_Punctuation
var CJKSymbolsAndPunctuation = &unicode.RangeTable{
R16: []unicode.Range16{
{0x3000, 0x303F, 1},
},
}
return unicode.Is(unicode.Hiragana, r) ||
unicode.Is(unicode.Katakana, r) ||
unicode.Is(unicode.Han, r) ||
unicode.Is(unicode.Lm, r) ||
unicode.Is(unicode.Hangul, r)
unicode.Is(unicode.Hangul, r) ||
unicode.Is(CJKSymbolsAndPunctuation, r)
}
// A BufWriter is a subset of the bufio.Writer .
@@ -862,7 +871,7 @@ type PrioritizedValue struct {
Priority int
}
// PrioritizedSlice is a slice of the PrioritizedValues
// PrioritizedSlice is a slice of the PrioritizedValues.
type PrioritizedSlice []PrioritizedValue
// Sort sorts the PrioritizedSlice in ascending order.
@@ -977,7 +986,7 @@ func (s *bytesFilter) Contains(b []byte) bool {
}
h := bytesHash(b) % uint64(len(s.slots))
slot := s.slots[h]
if slot == nil || len(slot) == 0 {
if len(slot) == 0 {
return false
}
for _, element := range slot {

View File

@@ -1,3 +1,4 @@
//go:build appengine || js
// +build appengine js
package util

View File

@@ -1,3 +1,4 @@
//go:build !appengine && !js
// +build !appengine,!js
package util