update deps; experiment: log security

This commit is contained in:
Aine
2022-11-16 23:00:58 +02:00
parent 225ba2ee9b
commit 99a89ef87a
55 changed files with 883 additions and 308 deletions

View File

@@ -83,6 +83,11 @@ func initBot(cfg *config.Config) {
Dialect: cfg.DB.Dialect, Dialect: cfg.DB.Dialect,
NoEncryption: cfg.NoEncryption, NoEncryption: cfg.NoEncryption,
AccountDataSecret: cfg.DataSecret, AccountDataSecret: cfg.DataSecret,
AccountDataLogReplace: map[string]string{
"password": "<redacted>",
"dkim.pem": "<redacted>",
"dkim.pub": "<redacted>",
},
LPLogger: mxlog, LPLogger: mxlog,
APILogger: logger.New("api.", "INFO"), APILogger: logger.New("api.", "INFO"),
StoreLogger: logger.New("store.", "INFO"), StoreLogger: logger.New("store.", "INFO"),

10
go.mod
View File

@@ -12,7 +12,7 @@ require (
github.com/getsentry/sentry-go v0.13.0 github.com/getsentry/sentry-go v0.13.0
github.com/jhillyerd/enmime v0.10.0 github.com/jhillyerd/enmime v0.10.0
github.com/lib/pq v1.10.7 github.com/lib/pq v1.10.7
github.com/mattn/go-sqlite3 v1.14.15 github.com/mattn/go-sqlite3 v1.14.16
github.com/mileusna/crontab v1.2.0 github.com/mileusna/crontab v1.2.0
github.com/raja/argon2pw v1.0.2-0.20210910183755-a391af63bd39 github.com/raja/argon2pw v1.0.2-0.20210910183755-a391af63bd39
gitlab.com/etke.cc/go/env v1.0.0 gitlab.com/etke.cc/go/env v1.0.0
@@ -21,9 +21,9 @@ require (
gitlab.com/etke.cc/go/secgen v1.1.1 gitlab.com/etke.cc/go/secgen v1.1.1
gitlab.com/etke.cc/go/trysmtp v1.0.0 gitlab.com/etke.cc/go/trysmtp v1.0.0
gitlab.com/etke.cc/go/validator v1.0.3 gitlab.com/etke.cc/go/validator v1.0.3
gitlab.com/etke.cc/linkpearl v0.0.0-20221115164843-97f1e49414d9 gitlab.com/etke.cc/linkpearl v0.0.0-20221116205701-65547c5608e6
golang.org/x/net v0.2.0 golang.org/x/net v0.2.0
maunium.net/go/mautrix v0.12.2 maunium.net/go/mautrix v0.12.3
) )
require ( require (
@@ -47,8 +47,8 @@ require (
github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/match v1.1.1 // indirect
github.com/tidwall/pretty v1.2.1 // indirect github.com/tidwall/pretty v1.2.1 // indirect
github.com/tidwall/sjson v1.2.5 // indirect github.com/tidwall/sjson v1.2.5 // indirect
github.com/yuin/goldmark v1.5.2 // indirect github.com/yuin/goldmark v1.5.3 // indirect
golang.org/x/crypto v0.2.0 // indirect golang.org/x/crypto v0.3.0 // indirect
golang.org/x/sys v0.2.0 // indirect golang.org/x/sys v0.2.0 // indirect
golang.org/x/text v0.4.0 // indirect golang.org/x/text v0.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect

22
go.sum
View File

@@ -50,8 +50,8 @@ github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-runewidth v0.0.12 h1:Y41i/hVW3Pgwr8gV+J23B9YEY0zxjptBuCWEaxmAOow= github.com/mattn/go-runewidth v0.0.12 h1:Y41i/hVW3Pgwr8gV+J23B9YEY0zxjptBuCWEaxmAOow=
github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk=
github.com/mattn/go-sqlite3 v1.14.15 h1:vfoHhTN1af61xCRSWzFIWzx2YskyMTwHLrExkBOjvxI= github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y=
github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
github.com/mikesmitty/edkey v0.0.0-20170222072505-3356ea4e686a h1:eU8j/ClY2Ty3qdHnn0TyW3ivFoPC/0F1gQZz8yTxbbE= github.com/mikesmitty/edkey v0.0.0-20170222072505-3356ea4e686a h1:eU8j/ClY2Ty3qdHnn0TyW3ivFoPC/0F1gQZz8yTxbbE=
github.com/mikesmitty/edkey v0.0.0-20170222072505-3356ea4e686a/go.mod h1:v8eSC2SMp9/7FTKUncp7fH9IwPfw+ysMObcEz5FWheQ= github.com/mikesmitty/edkey v0.0.0-20170222072505-3356ea4e686a/go.mod h1:v8eSC2SMp9/7FTKUncp7fH9IwPfw+ysMObcEz5FWheQ=
github.com/mileusna/crontab v1.2.0 h1:x9ZmE2A4p6CDqMEGQ+GbqsNtnmbdmWMQYShdQu8LvrU= github.com/mileusna/crontab v1.2.0 h1:x9ZmE2A4p6CDqMEGQ+GbqsNtnmbdmWMQYShdQu8LvrU=
@@ -76,7 +76,7 @@ github.com/ssor/bom v0.0.0-20170718123548-6386211fdfcf/go.mod h1:RJID2RhlZKId02n
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/gjson v1.14.3 h1:9jvXn7olKEHU1S9vwoMGliaT8jq1vJ7IH/n9zD9Dnlw= github.com/tidwall/gjson v1.14.3 h1:9jvXn7olKEHU1S9vwoMGliaT8jq1vJ7IH/n9zD9Dnlw=
github.com/tidwall/gjson v1.14.3/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/gjson v1.14.3/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
@@ -87,8 +87,8 @@ github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4=
github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY=
github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28=
github.com/yuin/goldmark v1.5.2 h1:ALmeCk/px5FSm1MAcFBAsVKZjDuMVj8Tm7FFIlMJnqU= github.com/yuin/goldmark v1.5.3 h1:3HUJmBFbQW9fhQOzMgseU134xfi6hU+mjWywx5Ty+/M=
github.com/yuin/goldmark v1.5.2/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/goldmark v1.5.3/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
gitlab.com/etke.cc/go/env v1.0.0 h1:J98BwzOuELnjsVPFvz5wa79L7IoRV9CmrS41xLYXtSw= gitlab.com/etke.cc/go/env v1.0.0 h1:J98BwzOuELnjsVPFvz5wa79L7IoRV9CmrS41xLYXtSw=
gitlab.com/etke.cc/go/env v1.0.0/go.mod h1:e1l4RM5MA1sc0R1w/RBDAESWRwgo5cOG9gx8BKUn2C4= gitlab.com/etke.cc/go/env v1.0.0/go.mod h1:e1l4RM5MA1sc0R1w/RBDAESWRwgo5cOG9gx8BKUn2C4=
gitlab.com/etke.cc/go/logger v1.1.0 h1:Yngp/DDLmJ0jJNLvLXrfan5Gi5QV+r7z6kCczTv8t4U= gitlab.com/etke.cc/go/logger v1.1.0 h1:Yngp/DDLmJ0jJNLvLXrfan5Gi5QV+r7z6kCczTv8t4U=
@@ -101,11 +101,11 @@ gitlab.com/etke.cc/go/trysmtp v1.0.0 h1:f/7gSmzohKniVeLSLevI+ZsySYcPUGkT9cRlOTwj
gitlab.com/etke.cc/go/trysmtp v1.0.0/go.mod h1:KqRuIB2IPElEEbAxXmFyKtm7S5YiuEb4lxwWthccqyE= gitlab.com/etke.cc/go/trysmtp v1.0.0/go.mod h1:KqRuIB2IPElEEbAxXmFyKtm7S5YiuEb4lxwWthccqyE=
gitlab.com/etke.cc/go/validator v1.0.3 h1:qkMskwtA3Uiv1q7HTlNZaaZcIJTO4mp2p0KZAl53Xmo= gitlab.com/etke.cc/go/validator v1.0.3 h1:qkMskwtA3Uiv1q7HTlNZaaZcIJTO4mp2p0KZAl53Xmo=
gitlab.com/etke.cc/go/validator v1.0.3/go.mod h1:3vdssRG4LwgdTr9IHz9MjGSEO+3/FO9hXPGMuSeweJ8= gitlab.com/etke.cc/go/validator v1.0.3/go.mod h1:3vdssRG4LwgdTr9IHz9MjGSEO+3/FO9hXPGMuSeweJ8=
gitlab.com/etke.cc/linkpearl v0.0.0-20221115164843-97f1e49414d9 h1:U0xXVnRvXYxz/rndKIaksw5DwEV8KBBFjrV5k975Jiw= gitlab.com/etke.cc/linkpearl v0.0.0-20221116205701-65547c5608e6 h1:+HDT2/bx3Hug++aeDE/PaoRRcnKdYzEm6i2RlOAzPXo=
gitlab.com/etke.cc/linkpearl v0.0.0-20221115164843-97f1e49414d9/go.mod h1:ClA7UlRUoeydy0a7AbJrGAdWYOX//twuThny198PA1k= gitlab.com/etke.cc/linkpearl v0.0.0-20221116205701-65547c5608e6/go.mod h1:Dgtu0qvymNjjky4Bu5WC8+iSohcb5xZ9CtkD3ezDqIA=
golang.org/x/crypto v0.0.0-20220518034528-6f7dac969898/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220518034528-6f7dac969898/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.2.0 h1:BRXPfhNivWL5Yq0BGQ39a2sW6t44aODpfxkWjYdzewE= golang.org/x/crypto v0.3.0 h1:a06MkbcxBrEFc0w0QIZWXrH/9cCX6KJyWbBOIwAn+7A=
golang.org/x/crypto v0.2.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
golang.org/x/net v0.0.0-20210501142056-aec3718b3fa0/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210501142056-aec3718b3fa0/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
@@ -136,5 +136,5 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
maunium.net/go/maulogger/v2 v2.3.2 h1:1XmIYmMd3PoQfp9J+PaHhpt80zpfmMqaShzUTC7FwY0= maunium.net/go/maulogger/v2 v2.3.2 h1:1XmIYmMd3PoQfp9J+PaHhpt80zpfmMqaShzUTC7FwY0=
maunium.net/go/maulogger/v2 v2.3.2/go.mod h1:TYWy7wKwz/tIXTpsx8G3mZseIRiC5DoMxSZazOHy68A= maunium.net/go/maulogger/v2 v2.3.2/go.mod h1:TYWy7wKwz/tIXTpsx8G3mZseIRiC5DoMxSZazOHy68A=
maunium.net/go/mautrix v0.12.2 h1:HuIDgigR6VY2QUPyZADCwn8UZWYAqi31a77qd1jMPA4= maunium.net/go/mautrix v0.12.3 h1:pUeO1ThhtZxE6XibGCzDhRuxwDIFNugsreVr1yYq96k=
maunium.net/go/mautrix v0.12.2/go.mod h1:bCw45Qx/m9qsz7eazmbe7Rzq5ZbTPzwRE1UgX2S9DXs= maunium.net/go/mautrix v0.12.3/go.mod h1:uOUjkOjm2C+nQS3mr9B5ATjqemZfnPHvjdd1kZezAwg=

View File

@@ -1,7 +1,7 @@
go-sqlite3 go-sqlite3
========== ==========
[![GoDoc Reference](https://godoc.org/github.com/mattn/go-sqlite3?status.svg)](http://godoc.org/github.com/mattn/go-sqlite3) [![Go Reference](https://pkg.go.dev/badge/github.com/mattn/go-sqlite3.svg)](https://pkg.go.dev/github.com/mattn/go-sqlite3)
[![GitHub Actions](https://github.com/mattn/go-sqlite3/workflows/Go/badge.svg)](https://github.com/mattn/go-sqlite3/actions?query=workflow%3AGo) [![GitHub Actions](https://github.com/mattn/go-sqlite3/workflows/Go/badge.svg)](https://github.com/mattn/go-sqlite3/actions?query=workflow%3AGo)
[![Financial Contributors on Open Collective](https://opencollective.com/mattn-go-sqlite3/all/badge.svg?label=financial+contributors)](https://opencollective.com/mattn-go-sqlite3) [![Financial Contributors on Open Collective](https://opencollective.com/mattn-go-sqlite3/all/badge.svg?label=financial+contributors)](https://opencollective.com/mattn-go-sqlite3)
[![codecov](https://codecov.io/gh/mattn/go-sqlite3/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-sqlite3) [![codecov](https://codecov.io/gh/mattn/go-sqlite3/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-sqlite3)
@@ -172,15 +172,18 @@ go build --tags "icu json1 fts5 secure_delete"
| International Components for Unicode | sqlite_icu | This option causes the International Components for Unicode or "ICU" extension to SQLite to be added to the build | | International Components for Unicode | sqlite_icu | This option causes the International Components for Unicode or "ICU" extension to SQLite to be added to the build |
| Introspect PRAGMAS | sqlite_introspect | This option adds some extra PRAGMA statements. <ul><li>PRAGMA function_list</li><li>PRAGMA module_list</li><li>PRAGMA pragma_list</li></ul> | | Introspect PRAGMAS | sqlite_introspect | This option adds some extra PRAGMA statements. <ul><li>PRAGMA function_list</li><li>PRAGMA module_list</li><li>PRAGMA pragma_list</li></ul> |
| JSON SQL Functions | sqlite_json | When this option is defined in the amalgamation, the JSON SQL functions are added to the build automatically | | JSON SQL Functions | sqlite_json | When this option is defined in the amalgamation, the JSON SQL functions are added to the build automatically |
| Math Functions | sqlite_math_functions | This compile-time option enables built-in scalar math functions. For more information see [Built-In Mathematical SQL Functions](https://www.sqlite.org/lang_mathfunc.html) |
| OS Trace | sqlite_os_trace | This option enables OSTRACE() debug logging. This can be verbose and should not be used in production. |
| Pre Update Hook | sqlite_preupdate_hook | Registers a callback function that is invoked prior to each INSERT, UPDATE, and DELETE operation on a database table. | | Pre Update Hook | sqlite_preupdate_hook | Registers a callback function that is invoked prior to each INSERT, UPDATE, and DELETE operation on a database table. |
| Secure Delete | sqlite_secure_delete | This compile-time option changes the default setting of the secure_delete pragma.<br><br>When this option is not used, secure_delete defaults to off. When this option is present, secure_delete defaults to on.<br><br>The secure_delete setting causes deleted content to be overwritten with zeros. There is a small performance penalty since additional I/O must occur.<br><br>On the other hand, secure_delete can prevent fragments of sensitive information from lingering in unused parts of the database file after it has been deleted. See the documentation on the secure_delete pragma for additional information | | Secure Delete | sqlite_secure_delete | This compile-time option changes the default setting of the secure_delete pragma.<br><br>When this option is not used, secure_delete defaults to off. When this option is present, secure_delete defaults to on.<br><br>The secure_delete setting causes deleted content to be overwritten with zeros. There is a small performance penalty since additional I/O must occur.<br><br>On the other hand, secure_delete can prevent fragments of sensitive information from lingering in unused parts of the database file after it has been deleted. See the documentation on the secure_delete pragma for additional information |
| Secure Delete (FAST) | sqlite_secure_delete_fast | For more information see [PRAGMA secure_delete](https://www.sqlite.org/pragma.html#pragma_secure_delete) | | Secure Delete (FAST) | sqlite_secure_delete_fast | For more information see [PRAGMA secure_delete](https://www.sqlite.org/pragma.html#pragma_secure_delete) |
| Tracing / Debug | sqlite_trace | Activate trace functions | | Tracing / Debug | sqlite_trace | Activate trace functions |
| User Authentication | sqlite_userauth | SQLite User Authentication see [User Authentication](#user-authentication) for more information. | | User Authentication | sqlite_userauth | SQLite User Authentication see [User Authentication](#user-authentication) for more information. |
| Virtual Tables | sqlite_vtable | SQLite Virtual Tables see [SQLite Official VTABLE Documentation](https://www.sqlite.org/vtab.html) for more information, and a [full example here](https://github.com/mattn/go-sqlite3/tree/master/_example/vtable) |
# Compilation # Compilation
This package requires the `CGO_ENABLED=1` ennvironment variable if not set by default, and the presence of the `gcc` compiler. This package requires the `CGO_ENABLED=1` environment variable if not set by default, and the presence of the `gcc` compiler.
If you need to add additional CFLAGS or LDFLAGS to the build command, and do not want to modify this package, then this can be achieved by using the `CGO_CFLAGS` and `CGO_LDFLAGS` environment variables. If you need to add additional CFLAGS or LDFLAGS to the build command, and do not want to modify this package, then this can be achieved by using the `CGO_CFLAGS` and `CGO_LDFLAGS` environment variables.
@@ -216,14 +219,13 @@ This library can be cross-compiled.
In some cases you are required to the `CC` environment variable with the cross compiler. In some cases you are required to the `CC` environment variable with the cross compiler.
## Cross Compiling from MAC OSX ## Cross Compiling from MAC OSX
The simplest way to cross compile from OSX is to use [xgo](https://github.com/karalabe/xgo). The simplest way to cross compile from OSX is to use [musl-cross](https://github.com/FiloSottile/homebrew-musl-cross).
Steps: Steps:
- Install [xgo](https://github.com/karalabe/xgo) (`go get github.com/karalabe/xgo`). - Install [musl-cross](https://github.com/FiloSottile/homebrew-musl-cross) (`brew install FiloSottile/musl-cross/musl-cross`).
- Ensure that your project is within your `GOPATH`. - Run `CC=x86_64-linux-musl-gcc CXX=x86_64-linux-musl-g++ GOARCH=amd64 GOOS=linux CGO_ENABLED=1 go build -ldflags "-linkmode external -extldflags -static"`.
- Run `xgo local/path/to/project`.
Please refer to the project's [README](https://github.com/karalabe/xgo/blob/master/README.md) for further information. Please refer to the project's [README](https://github.com/FiloSottile/homebrew-musl-cross#readme) for further information.
# Google Cloud Platform # Google Cloud Platform

View File

@@ -1,7 +1,7 @@
#ifndef USE_LIBSQLITE3 #ifndef USE_LIBSQLITE3
/****************************************************************************** /******************************************************************************
** This file is an amalgamation of many separate C source files from SQLite ** This file is an amalgamation of many separate C source files from SQLite
** version 3.39.2. By combining all the individual C code files into this ** version 3.39.4. By combining all the individual C code files into this
** single large file, the entire code can be compiled as a single translation ** single large file, the entire code can be compiled as a single translation
** unit. This allows many compilers to do optimizations that would not be ** unit. This allows many compilers to do optimizations that would not be
** possible if the files were compiled separately. Performance improvements ** possible if the files were compiled separately. Performance improvements
@@ -453,9 +453,9 @@ extern "C" {
** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite3_libversion_number()], [sqlite3_sourceid()],
** [sqlite_version()] and [sqlite_source_id()]. ** [sqlite_version()] and [sqlite_source_id()].
*/ */
#define SQLITE_VERSION "3.39.2" #define SQLITE_VERSION "3.39.4"
#define SQLITE_VERSION_NUMBER 3039002 #define SQLITE_VERSION_NUMBER 3039004
#define SQLITE_SOURCE_ID "2022-07-21 15:24:47 698edb77537b67c41adc68f9b892db56bcf9a55e00371a61420f3ddd668e6603" #define SQLITE_SOURCE_ID "2022-09-29 15:55:41 a29f9949895322123f7c38fbe94c649a9d6e6c9cd0c3b41c96d694552f26b309"
/* /*
** CAPI3REF: Run-Time Library Version Numbers ** CAPI3REF: Run-Time Library Version Numbers
@@ -13145,6 +13145,11 @@ struct fts5_api {
/************** End of sqlite3.h *********************************************/ /************** End of sqlite3.h *********************************************/
/************** Continuing where we left off in sqliteInt.h ******************/ /************** Continuing where we left off in sqliteInt.h ******************/
/*
** Reuse the STATIC_LRU for mutex access to sqlite3_temp_directory.
*/
#define SQLITE_MUTEX_STATIC_TEMPDIR SQLITE_MUTEX_STATIC_VFS1
/* /*
** Include the configuration header output by 'configure' if we're using the ** Include the configuration header output by 'configure' if we're using the
** autoconf-based build ** autoconf-based build
@@ -29564,8 +29569,13 @@ SQLITE_PRIVATE void *sqlite3OomFault(sqlite3 *db){
} }
DisableLookaside; DisableLookaside;
if( db->pParse ){ if( db->pParse ){
Parse *pParse;
sqlite3ErrorMsg(db->pParse, "out of memory"); sqlite3ErrorMsg(db->pParse, "out of memory");
db->pParse->rc = SQLITE_NOMEM_BKPT; db->pParse->rc = SQLITE_NOMEM_BKPT;
for(pParse=db->pParse->pOuterParse; pParse; pParse = pParse->pOuterParse){
pParse->nErr++;
pParse->rc = SQLITE_NOMEM;
}
} }
} }
return 0; return 0;
@@ -33460,7 +33470,7 @@ SQLITE_PRIVATE void sqlite3ErrorMsg(Parse *pParse, const char *zFormat, ...){
va_list ap; va_list ap;
sqlite3 *db = pParse->db; sqlite3 *db = pParse->db;
assert( db!=0 ); assert( db!=0 );
assert( db->pParse==pParse ); assert( db->pParse==pParse || db->pParse->pToplevel==pParse );
db->errByteOffset = -2; db->errByteOffset = -2;
va_start(ap, zFormat); va_start(ap, zFormat);
zMsg = sqlite3VMPrintf(db, zFormat, ap); zMsg = sqlite3VMPrintf(db, zFormat, ap);
@@ -41321,6 +41331,7 @@ static const char *unixTempFileDir(void){
static int unixGetTempname(int nBuf, char *zBuf){ static int unixGetTempname(int nBuf, char *zBuf){
const char *zDir; const char *zDir;
int iLimit = 0; int iLimit = 0;
int rc = SQLITE_OK;
/* It's odd to simulate an io-error here, but really this is just /* It's odd to simulate an io-error here, but really this is just
** using the io-error infrastructure to test that SQLite handles this ** using the io-error infrastructure to test that SQLite handles this
@@ -41329,8 +41340,11 @@ static int unixGetTempname(int nBuf, char *zBuf){
zBuf[0] = 0; zBuf[0] = 0;
SimulateIOError( return SQLITE_IOERR ); SimulateIOError( return SQLITE_IOERR );
sqlite3_mutex_enter(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_TEMPDIR));
zDir = unixTempFileDir(); zDir = unixTempFileDir();
if( zDir==0 ) return SQLITE_IOERR_GETTEMPPATH; if( zDir==0 ){
rc = SQLITE_IOERR_GETTEMPPATH;
}else{
do{ do{
u64 r; u64 r;
sqlite3_randomness(sizeof(r), &r); sqlite3_randomness(sizeof(r), &r);
@@ -41338,9 +41352,14 @@ static int unixGetTempname(int nBuf, char *zBuf){
zBuf[nBuf-2] = 0; zBuf[nBuf-2] = 0;
sqlite3_snprintf(nBuf, zBuf, "%s/"SQLITE_TEMP_FILE_PREFIX"%llx%c", sqlite3_snprintf(nBuf, zBuf, "%s/"SQLITE_TEMP_FILE_PREFIX"%llx%c",
zDir, r, 0); zDir, r, 0);
if( zBuf[nBuf-2]!=0 || (iLimit++)>10 ) return SQLITE_ERROR; if( zBuf[nBuf-2]!=0 || (iLimit++)>10 ){
rc = SQLITE_ERROR;
break;
}
}while( osAccess(zBuf,0)==0 ); }while( osAccess(zBuf,0)==0 );
return SQLITE_OK; }
sqlite3_mutex_leave(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_TEMPDIR));
return rc;
} }
#if SQLITE_ENABLE_LOCKING_STYLE && defined(__APPLE__) #if SQLITE_ENABLE_LOCKING_STYLE && defined(__APPLE__)
@@ -45479,10 +45498,12 @@ SQLITE_API int sqlite3_win32_set_directory8(
const char *zValue /* New value for directory being set or reset */ const char *zValue /* New value for directory being set or reset */
){ ){
char **ppDirectory = 0; char **ppDirectory = 0;
int rc;
#ifndef SQLITE_OMIT_AUTOINIT #ifndef SQLITE_OMIT_AUTOINIT
int rc = sqlite3_initialize(); rc = sqlite3_initialize();
if( rc ) return rc; if( rc ) return rc;
#endif #endif
sqlite3_mutex_enter(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_TEMPDIR));
if( type==SQLITE_WIN32_DATA_DIRECTORY_TYPE ){ if( type==SQLITE_WIN32_DATA_DIRECTORY_TYPE ){
ppDirectory = &sqlite3_data_directory; ppDirectory = &sqlite3_data_directory;
}else if( type==SQLITE_WIN32_TEMP_DIRECTORY_TYPE ){ }else if( type==SQLITE_WIN32_TEMP_DIRECTORY_TYPE ){
@@ -45497,14 +45518,19 @@ SQLITE_API int sqlite3_win32_set_directory8(
if( zValue && zValue[0] ){ if( zValue && zValue[0] ){
zCopy = sqlite3_mprintf("%s", zValue); zCopy = sqlite3_mprintf("%s", zValue);
if ( zCopy==0 ){ if ( zCopy==0 ){
return SQLITE_NOMEM_BKPT; rc = SQLITE_NOMEM_BKPT;
goto set_directory8_done;
} }
} }
sqlite3_free(*ppDirectory); sqlite3_free(*ppDirectory);
*ppDirectory = zCopy; *ppDirectory = zCopy;
return SQLITE_OK; rc = SQLITE_OK;
}else{
rc = SQLITE_ERROR;
} }
return SQLITE_ERROR; set_directory8_done:
sqlite3_mutex_leave(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_TEMPDIR));
return rc;
} }
/* /*
@@ -48278,6 +48304,18 @@ static int winMakeEndInDirSep(int nBuf, char *zBuf){
return 0; return 0;
} }
/*
** If sqlite3_temp_directory is not, take the mutex and return true.
**
** If sqlite3_temp_directory is NULL, omit the mutex and return false.
*/
static int winTempDirDefined(void){
sqlite3_mutex_enter(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_TEMPDIR));
if( sqlite3_temp_directory!=0 ) return 1;
sqlite3_mutex_leave(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_TEMPDIR));
return 0;
}
/* /*
** Create a temporary file name and store the resulting pointer into pzBuf. ** Create a temporary file name and store the resulting pointer into pzBuf.
** The pointer returned in pzBuf must be freed via sqlite3_free(). ** The pointer returned in pzBuf must be freed via sqlite3_free().
@@ -48314,20 +48352,23 @@ static int winGetTempname(sqlite3_vfs *pVfs, char **pzBuf){
*/ */
nDir = nMax - (nPre + 15); nDir = nMax - (nPre + 15);
assert( nDir>0 ); assert( nDir>0 );
if( sqlite3_temp_directory ){ if( winTempDirDefined() ){
int nDirLen = sqlite3Strlen30(sqlite3_temp_directory); int nDirLen = sqlite3Strlen30(sqlite3_temp_directory);
if( nDirLen>0 ){ if( nDirLen>0 ){
if( !winIsDirSep(sqlite3_temp_directory[nDirLen-1]) ){ if( !winIsDirSep(sqlite3_temp_directory[nDirLen-1]) ){
nDirLen++; nDirLen++;
} }
if( nDirLen>nDir ){ if( nDirLen>nDir ){
sqlite3_mutex_leave(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_TEMPDIR));
sqlite3_free(zBuf); sqlite3_free(zBuf);
OSTRACE(("TEMP-FILENAME rc=SQLITE_ERROR\n")); OSTRACE(("TEMP-FILENAME rc=SQLITE_ERROR\n"));
return winLogError(SQLITE_ERROR, 0, "winGetTempname1", 0); return winLogError(SQLITE_ERROR, 0, "winGetTempname1", 0);
} }
sqlite3_snprintf(nMax, zBuf, "%s", sqlite3_temp_directory); sqlite3_snprintf(nMax, zBuf, "%s", sqlite3_temp_directory);
} }
sqlite3_mutex_leave(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_TEMPDIR));
} }
#if defined(__CYGWIN__) #if defined(__CYGWIN__)
else{ else{
static const char *azDirs[] = { static const char *azDirs[] = {
@@ -49116,7 +49157,7 @@ static BOOL winIsVerbatimPathname(
** pathname into zOut[]. zOut[] will be at least pVfs->mxPathname ** pathname into zOut[]. zOut[] will be at least pVfs->mxPathname
** bytes in size. ** bytes in size.
*/ */
static int winFullPathname( static int winFullPathnameNoMutex(
sqlite3_vfs *pVfs, /* Pointer to vfs object */ sqlite3_vfs *pVfs, /* Pointer to vfs object */
const char *zRelative, /* Possibly relative input path */ const char *zRelative, /* Possibly relative input path */
int nFull, /* Size of output buffer in bytes */ int nFull, /* Size of output buffer in bytes */
@@ -49295,6 +49336,19 @@ static int winFullPathname(
} }
#endif #endif
} }
static int winFullPathname(
sqlite3_vfs *pVfs, /* Pointer to vfs object */
const char *zRelative, /* Possibly relative input path */
int nFull, /* Size of output buffer in bytes */
char *zFull /* Output buffer */
){
int rc;
sqlite3_mutex *pMutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_TEMPDIR);
sqlite3_mutex_enter(pMutex);
rc = winFullPathnameNoMutex(pVfs, zRelative, nFull, zFull);
sqlite3_mutex_leave(pMutex);
return rc;
}
#ifndef SQLITE_OMIT_LOAD_EXTENSION #ifndef SQLITE_OMIT_LOAD_EXTENSION
/* /*
@@ -51639,14 +51693,24 @@ SQLITE_PRIVATE void sqlite3PcacheClearSyncFlags(PCache *pCache){
*/ */
SQLITE_PRIVATE void sqlite3PcacheMove(PgHdr *p, Pgno newPgno){ SQLITE_PRIVATE void sqlite3PcacheMove(PgHdr *p, Pgno newPgno){
PCache *pCache = p->pCache; PCache *pCache = p->pCache;
sqlite3_pcache_page *pOther;
assert( p->nRef>0 ); assert( p->nRef>0 );
assert( newPgno>0 ); assert( newPgno>0 );
assert( sqlite3PcachePageSanity(p) ); assert( sqlite3PcachePageSanity(p) );
pcacheTrace(("%p.MOVE %d -> %d\n",pCache,p->pgno,newPgno)); pcacheTrace(("%p.MOVE %d -> %d\n",pCache,p->pgno,newPgno));
pOther = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, newPgno, 0);
if( pOther ){
PgHdr *pXPage = (PgHdr*)pOther->pExtra;
assert( pXPage->nRef==0 );
pXPage->nRef++;
pCache->nRefSum++;
sqlite3PcacheDrop(pXPage);
}
sqlite3GlobalConfig.pcache2.xRekey(pCache->pCache, p->pPage, p->pgno,newPgno); sqlite3GlobalConfig.pcache2.xRekey(pCache->pCache, p->pPage, p->pgno,newPgno);
p->pgno = newPgno; p->pgno = newPgno;
if( (p->flags&PGHDR_DIRTY) && (p->flags&PGHDR_NEED_SYNC) ){ if( (p->flags&PGHDR_DIRTY) && (p->flags&PGHDR_NEED_SYNC) ){
pcacheManageDirtyList(p, PCACHE_DIRTYLIST_FRONT); pcacheManageDirtyList(p, PCACHE_DIRTYLIST_FRONT);
assert( sqlite3PcachePageSanity(p) );
} }
} }
@@ -53028,23 +53092,26 @@ static void pcache1Rekey(
PCache1 *pCache = (PCache1 *)p; PCache1 *pCache = (PCache1 *)p;
PgHdr1 *pPage = (PgHdr1 *)pPg; PgHdr1 *pPage = (PgHdr1 *)pPg;
PgHdr1 **pp; PgHdr1 **pp;
unsigned int h; unsigned int hOld, hNew;
assert( pPage->iKey==iOld ); assert( pPage->iKey==iOld );
assert( pPage->pCache==pCache ); assert( pPage->pCache==pCache );
assert( iOld!=iNew ); /* The page number really is changing */
pcache1EnterMutex(pCache->pGroup); pcache1EnterMutex(pCache->pGroup);
h = iOld%pCache->nHash; assert( pcache1FetchNoMutex(p, iOld, 0)==pPage ); /* pPg really is iOld */
pp = &pCache->apHash[h]; hOld = iOld%pCache->nHash;
pp = &pCache->apHash[hOld];
while( (*pp)!=pPage ){ while( (*pp)!=pPage ){
pp = &(*pp)->pNext; pp = &(*pp)->pNext;
} }
*pp = pPage->pNext; *pp = pPage->pNext;
h = iNew%pCache->nHash; assert( pcache1FetchNoMutex(p, iNew, 0)==0 ); /* iNew not in cache */
hNew = iNew%pCache->nHash;
pPage->iKey = iNew; pPage->iKey = iNew;
pPage->pNext = pCache->apHash[h]; pPage->pNext = pCache->apHash[hNew];
pCache->apHash[h] = pPage; pCache->apHash[hNew] = pPage;
if( iNew>pCache->iMaxKey ){ if( iNew>pCache->iMaxKey ){
pCache->iMaxKey = iNew; pCache->iMaxKey = iNew;
} }
@@ -59677,6 +59744,7 @@ static int pager_open_journal(Pager *pPager){
if( rc!=SQLITE_OK ){ if( rc!=SQLITE_OK ){
sqlite3BitvecDestroy(pPager->pInJournal); sqlite3BitvecDestroy(pPager->pInJournal);
pPager->pInJournal = 0; pPager->pInJournal = 0;
pPager->journalOff = 0;
}else{ }else{
assert( pPager->eState==PAGER_WRITER_LOCKED ); assert( pPager->eState==PAGER_WRITER_LOCKED );
pPager->eState = PAGER_WRITER_CACHEMOD; pPager->eState = PAGER_WRITER_CACHEMOD;
@@ -61232,7 +61300,7 @@ SQLITE_PRIVATE int sqlite3PagerGetJournalMode(Pager *pPager){
SQLITE_PRIVATE int sqlite3PagerOkToChangeJournalMode(Pager *pPager){ SQLITE_PRIVATE int sqlite3PagerOkToChangeJournalMode(Pager *pPager){
assert( assert_pager_state(pPager) ); assert( assert_pager_state(pPager) );
if( pPager->eState>=PAGER_WRITER_CACHEMOD ) return 0; if( pPager->eState>=PAGER_WRITER_CACHEMOD ) return 0;
if( NEVER(isOpen(pPager->jfd) && pPager->journalOff>0) ) return 0; if( isOpen(pPager->jfd) && pPager->journalOff>0 ) return 0;
return 1; return 1;
} }
@@ -68347,7 +68415,7 @@ static int defragmentPage(MemPage *pPage, int nMaxFrag){
if( iFree2+sz2 > usableSize ) return SQLITE_CORRUPT_PAGE(pPage); if( iFree2+sz2 > usableSize ) return SQLITE_CORRUPT_PAGE(pPage);
memmove(&data[iFree+sz+sz2], &data[iFree+sz], iFree2-(iFree+sz)); memmove(&data[iFree+sz+sz2], &data[iFree+sz], iFree2-(iFree+sz));
sz += sz2; sz += sz2;
}else if( NEVER(iFree+sz>usableSize) ){ }else if( iFree+sz>usableSize ){
return SQLITE_CORRUPT_PAGE(pPage); return SQLITE_CORRUPT_PAGE(pPage);
} }
@@ -74703,8 +74771,6 @@ static int balance_nonroot(
Pgno pgno; /* Temp var to store a page number in */ Pgno pgno; /* Temp var to store a page number in */
u8 abDone[NB+2]; /* True after i'th new page is populated */ u8 abDone[NB+2]; /* True after i'th new page is populated */
Pgno aPgno[NB+2]; /* Page numbers of new pages before shuffling */ Pgno aPgno[NB+2]; /* Page numbers of new pages before shuffling */
Pgno aPgOrder[NB+2]; /* Copy of aPgno[] used for sorting pages */
u16 aPgFlags[NB+2]; /* flags field of new pages before shuffling */
CellArray b; /* Parsed information on cells being balanced */ CellArray b; /* Parsed information on cells being balanced */
memset(abDone, 0, sizeof(abDone)); memset(abDone, 0, sizeof(abDone));
@@ -75128,42 +75194,39 @@ static int balance_nonroot(
** of the table is closer to a linear scan through the file. That in turn ** of the table is closer to a linear scan through the file. That in turn
** helps the operating system to deliver pages from the disk more rapidly. ** helps the operating system to deliver pages from the disk more rapidly.
** **
** An O(n^2) insertion sort algorithm is used, but since n is never more ** An O(N*N) sort algorithm is used, but since N is never more than NB+2
** than (NB+2) (a small constant), that should not be a problem. ** (5), that is not a performance concern.
** **
** When NB==3, this one optimization makes the database about 25% faster ** When NB==3, this one optimization makes the database about 25% faster
** for large insertions and deletions. ** for large insertions and deletions.
*/ */
for(i=0; i<nNew; i++){ for(i=0; i<nNew; i++){
aPgOrder[i] = aPgno[i] = apNew[i]->pgno; aPgno[i] = apNew[i]->pgno;
aPgFlags[i] = apNew[i]->pDbPage->flags; assert( apNew[i]->pDbPage->flags & PGHDR_WRITEABLE );
for(j=0; j<i; j++){ assert( apNew[i]->pDbPage->flags & PGHDR_DIRTY );
if( NEVER(aPgno[j]==aPgno[i]) ){
/* This branch is taken if the set of sibling pages somehow contains
** duplicate entries. This can happen if the database is corrupt.
** It would be simpler to detect this as part of the loop below, but
** we do the detection here in order to avoid populating the pager
** cache with two separate objects associated with the same
** page number. */
assert( CORRUPT_DB );
rc = SQLITE_CORRUPT_BKPT;
goto balance_cleanup;
} }
for(i=0; i<nNew-1; i++){
int iB = i;
for(j=i+1; j<nNew; j++){
if( apNew[j]->pgno < apNew[iB]->pgno ) iB = j;
} }
}
for(i=0; i<nNew; i++){ /* If apNew[i] has a page number that is bigger than any of the
int iBest = 0; /* aPgno[] index of page number to use */ ** subsequence apNew[i] entries, then swap apNew[i] with the subsequent
for(j=1; j<nNew; j++){ ** entry that has the smallest page number (which we know to be
if( aPgOrder[j]<aPgOrder[iBest] ) iBest = j; ** entry apNew[iB]).
} */
pgno = aPgOrder[iBest]; if( iB!=i ){
aPgOrder[iBest] = 0xffffffff; Pgno pgnoA = apNew[i]->pgno;
if( iBest!=i ){ Pgno pgnoB = apNew[iB]->pgno;
if( iBest>i ){ Pgno pgnoTemp = (PENDING_BYTE/pBt->pageSize)+1;
sqlite3PagerRekey(apNew[iBest]->pDbPage, pBt->nPage+iBest+1, 0); u16 fgA = apNew[i]->pDbPage->flags;
} u16 fgB = apNew[iB]->pDbPage->flags;
sqlite3PagerRekey(apNew[i]->pDbPage, pgno, aPgFlags[iBest]); sqlite3PagerRekey(apNew[i]->pDbPage, pgnoTemp, fgB);
apNew[i]->pgno = pgno; sqlite3PagerRekey(apNew[iB]->pDbPage, pgnoA, fgA);
sqlite3PagerRekey(apNew[i]->pDbPage, pgnoB, fgB);
apNew[i]->pgno = pgnoB;
apNew[iB]->pgno = pgnoA;
} }
} }
@@ -81036,6 +81099,7 @@ SQLITE_PRIVATE int sqlite3VdbeAddFunctionCall(
addr = sqlite3VdbeAddOp4(v, eCallCtx ? OP_PureFunc : OP_Function, addr = sqlite3VdbeAddOp4(v, eCallCtx ? OP_PureFunc : OP_Function,
p1, p2, p3, (char*)pCtx, P4_FUNCCTX); p1, p2, p3, (char*)pCtx, P4_FUNCCTX);
sqlite3VdbeChangeP5(v, eCallCtx & NC_SelfRef); sqlite3VdbeChangeP5(v, eCallCtx & NC_SelfRef);
sqlite3MayAbort(pParse);
return addr; return addr;
} }
@@ -81371,6 +81435,7 @@ SQLITE_PRIVATE int sqlite3VdbeAssertMayAbort(Vdbe *v, int mayAbort){
|| opcode==OP_VDestroy || opcode==OP_VDestroy
|| opcode==OP_VCreate || opcode==OP_VCreate
|| opcode==OP_ParseSchema || opcode==OP_ParseSchema
|| opcode==OP_Function || opcode==OP_PureFunc
|| ((opcode==OP_Halt || opcode==OP_HaltIfNull) || ((opcode==OP_Halt || opcode==OP_HaltIfNull)
&& ((pOp->p1)!=SQLITE_OK && pOp->p2==OE_Abort)) && ((pOp->p1)!=SQLITE_OK && pOp->p2==OE_Abort))
){ ){
@@ -132705,6 +132770,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
** **
*/ */
case PragTyp_TEMP_STORE_DIRECTORY: { case PragTyp_TEMP_STORE_DIRECTORY: {
sqlite3_mutex_enter(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_TEMPDIR));
if( !zRight ){ if( !zRight ){
returnSingleText(v, sqlite3_temp_directory); returnSingleText(v, sqlite3_temp_directory);
}else{ }else{
@@ -132714,6 +132780,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
rc = sqlite3OsAccess(db->pVfs, zRight, SQLITE_ACCESS_READWRITE, &res); rc = sqlite3OsAccess(db->pVfs, zRight, SQLITE_ACCESS_READWRITE, &res);
if( rc!=SQLITE_OK || res==0 ){ if( rc!=SQLITE_OK || res==0 ){
sqlite3ErrorMsg(pParse, "not a writable directory"); sqlite3ErrorMsg(pParse, "not a writable directory");
sqlite3_mutex_leave(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_TEMPDIR));
goto pragma_out; goto pragma_out;
} }
} }
@@ -132731,6 +132798,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
} }
#endif /* SQLITE_OMIT_WSD */ #endif /* SQLITE_OMIT_WSD */
} }
sqlite3_mutex_leave(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_TEMPDIR));
break; break;
} }
@@ -132749,6 +132817,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
** **
*/ */
case PragTyp_DATA_STORE_DIRECTORY: { case PragTyp_DATA_STORE_DIRECTORY: {
sqlite3_mutex_enter(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_TEMPDIR));
if( !zRight ){ if( !zRight ){
returnSingleText(v, sqlite3_data_directory); returnSingleText(v, sqlite3_data_directory);
}else{ }else{
@@ -132758,6 +132827,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
rc = sqlite3OsAccess(db->pVfs, zRight, SQLITE_ACCESS_READWRITE, &res); rc = sqlite3OsAccess(db->pVfs, zRight, SQLITE_ACCESS_READWRITE, &res);
if( rc!=SQLITE_OK || res==0 ){ if( rc!=SQLITE_OK || res==0 ){
sqlite3ErrorMsg(pParse, "not a writable directory"); sqlite3ErrorMsg(pParse, "not a writable directory");
sqlite3_mutex_leave(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_TEMPDIR));
goto pragma_out; goto pragma_out;
} }
} }
@@ -132769,6 +132839,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
} }
#endif /* SQLITE_OMIT_WSD */ #endif /* SQLITE_OMIT_WSD */
} }
sqlite3_mutex_leave(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_TEMPDIR));
break; break;
} }
#endif #endif
@@ -137214,7 +137285,7 @@ static void generateSortTail(
if( addrOnce ) sqlite3VdbeJumpHere(v, addrOnce); if( addrOnce ) sqlite3VdbeJumpHere(v, addrOnce);
addr = 1 + sqlite3VdbeAddOp2(v, OP_SorterSort, iTab, addrBreak); addr = 1 + sqlite3VdbeAddOp2(v, OP_SorterSort, iTab, addrBreak);
VdbeCoverage(v); VdbeCoverage(v);
codeOffset(v, p->iOffset, addrContinue); assert( p->iLimit==0 && p->iOffset==0 );
sqlite3VdbeAddOp3(v, OP_SorterData, iTab, regSortOut, iSortTab); sqlite3VdbeAddOp3(v, OP_SorterData, iTab, regSortOut, iSortTab);
bSeq = 0; bSeq = 0;
}else{ }else{
@@ -137222,6 +137293,9 @@ static void generateSortTail(
codeOffset(v, p->iOffset, addrContinue); codeOffset(v, p->iOffset, addrContinue);
iSortTab = iTab; iSortTab = iTab;
bSeq = 1; bSeq = 1;
if( p->iOffset>0 ){
sqlite3VdbeAddOp2(v, OP_AddImm, p->iLimit, -1);
}
} }
for(i=0, iCol=nKey+bSeq-1; i<nColumn; i++){ for(i=0, iCol=nKey+bSeq-1; i<nColumn; i++){
#ifdef SQLITE_ENABLE_SORTER_REFERENCES #ifdef SQLITE_ENABLE_SORTER_REFERENCES
@@ -139214,10 +139288,11 @@ static int multiSelectOrderBy(
*/ */
sqlite3VdbeResolveLabel(v, labelEnd); sqlite3VdbeResolveLabel(v, labelEnd);
/* Reassembly the compound query so that it will be freed correctly /* Reassemble the compound query so that it will be freed correctly
** by the calling function */ ** by the calling function */
if( pSplit->pPrior ){ if( pSplit->pPrior ){
sqlite3SelectDelete(db, pSplit->pPrior); sqlite3ParserAddCleanup(pParse,
(void(*)(sqlite3*,void*))sqlite3SelectDelete, pSplit->pPrior);
} }
pSplit->pPrior = pPrior; pSplit->pPrior = pPrior;
pPrior->pNext = pSplit; pPrior->pNext = pSplit;
@@ -140736,6 +140811,7 @@ static Table *isSimpleCount(Select *p, AggInfo *pAggInfo){
|| p->pSrc->nSrc!=1 || p->pSrc->nSrc!=1
|| p->pSrc->a[0].pSelect || p->pSrc->a[0].pSelect
|| pAggInfo->nFunc!=1 || pAggInfo->nFunc!=1
|| p->pHaving
){ ){
return 0; return 0;
} }
@@ -143973,6 +144049,23 @@ SQLITE_PRIVATE void sqlite3FinishTrigger(
Vdbe *v; Vdbe *v;
char *z; char *z;
/* If this is a new CREATE TABLE statement, and if shadow tables
** are read-only, and the trigger makes a change to a shadow table,
** then raise an error - do not allow the trigger to be created. */
if( sqlite3ReadOnlyShadowTables(db) ){
TriggerStep *pStep;
for(pStep=pTrig->step_list; pStep; pStep=pStep->pNext){
if( pStep->zTarget!=0
&& sqlite3ShadowTableName(db, pStep->zTarget)
){
sqlite3ErrorMsg(pParse,
"trigger \"%s\" may not write to shadow table \"%s\"",
pTrig->zName, pStep->zTarget);
goto triggerfinish_cleanup;
}
}
}
/* Make an entry in the sqlite_schema table */ /* Make an entry in the sqlite_schema table */
v = sqlite3GetVdbe(pParse); v = sqlite3GetVdbe(pParse);
if( v==0 ) goto triggerfinish_cleanup; if( v==0 ) goto triggerfinish_cleanup;
@@ -149784,7 +149877,8 @@ static int codeEqualityTerm(
} }
sqlite3ExprDelete(db, pX); sqlite3ExprDelete(db, pX);
}else{ }else{
aiMap = (int*)sqlite3DbMallocZero(pParse->db, sizeof(int)*nEq); int n = sqlite3ExprVectorSize(pX->pLeft);
aiMap = (int*)sqlite3DbMallocZero(pParse->db, sizeof(int)*MAX(nEq,n));
eType = sqlite3FindInIndex(pParse, pX, IN_INDEX_LOOP, 0, aiMap, &iTab); eType = sqlite3FindInIndex(pParse, pX, IN_INDEX_LOOP, 0, aiMap, &iTab);
} }
pX = pExpr; pX = pExpr;
@@ -176937,7 +177031,7 @@ struct Fts3MultiSegReader {
int nAdvance; /* How many seg-readers to advance */ int nAdvance; /* How many seg-readers to advance */
Fts3SegFilter *pFilter; /* Pointer to filter object */ Fts3SegFilter *pFilter; /* Pointer to filter object */
char *aBuffer; /* Buffer to merge doclists in */ char *aBuffer; /* Buffer to merge doclists in */
int nBuffer; /* Allocated size of aBuffer[] in bytes */ i64 nBuffer; /* Allocated size of aBuffer[] in bytes */
int iColFilter; /* If >=0, filter for this column */ int iColFilter; /* If >=0, filter for this column */
int bRestart; int bRestart;
@@ -179633,7 +179727,7 @@ static int fts3TermSelectMerge(
** **
** Similar padding is added in the fts3DoclistOrMerge() function. ** Similar padding is added in the fts3DoclistOrMerge() function.
*/ */
pTS->aaOutput[0] = sqlite3_malloc(nDoclist + FTS3_VARINT_MAX + 1); pTS->aaOutput[0] = sqlite3_malloc64((i64)nDoclist + FTS3_VARINT_MAX + 1);
pTS->anOutput[0] = nDoclist; pTS->anOutput[0] = nDoclist;
if( pTS->aaOutput[0] ){ if( pTS->aaOutput[0] ){
memcpy(pTS->aaOutput[0], aDoclist, nDoclist); memcpy(pTS->aaOutput[0], aDoclist, nDoclist);
@@ -181121,7 +181215,7 @@ static int fts3EvalDeferredPhrase(Fts3Cursor *pCsr, Fts3Phrase *pPhrase){
nDistance = iPrev - nMaxUndeferred; nDistance = iPrev - nMaxUndeferred;
} }
aOut = (char *)sqlite3_malloc(nPoslist+8); aOut = (char *)sqlite3Fts3MallocZero(nPoslist+FTS3_BUFFER_PADDING);
if( !aOut ){ if( !aOut ){
sqlite3_free(aPoslist); sqlite3_free(aPoslist);
return SQLITE_NOMEM; return SQLITE_NOMEM;
@@ -181490,7 +181584,7 @@ static int fts3EvalIncrPhraseNext(
if( bEof==0 ){ if( bEof==0 ){
int nList = 0; int nList = 0;
int nByte = a[p->nToken-1].nList; int nByte = a[p->nToken-1].nList;
char *aDoclist = sqlite3_malloc(nByte+FTS3_BUFFER_PADDING); char *aDoclist = sqlite3_malloc64((i64)nByte+FTS3_BUFFER_PADDING);
if( !aDoclist ) return SQLITE_NOMEM; if( !aDoclist ) return SQLITE_NOMEM;
memcpy(aDoclist, a[p->nToken-1].pList, nByte+1); memcpy(aDoclist, a[p->nToken-1].pList, nByte+1);
memset(&aDoclist[nByte], 0, FTS3_BUFFER_PADDING); memset(&aDoclist[nByte], 0, FTS3_BUFFER_PADDING);
@@ -185726,7 +185820,7 @@ static int porterNext(
if( n>c->nAllocated ){ if( n>c->nAllocated ){
char *pNew; char *pNew;
c->nAllocated = n+20; c->nAllocated = n+20;
pNew = sqlite3_realloc(c->zToken, c->nAllocated); pNew = sqlite3_realloc64(c->zToken, c->nAllocated);
if( !pNew ) return SQLITE_NOMEM; if( !pNew ) return SQLITE_NOMEM;
c->zToken = pNew; c->zToken = pNew;
} }
@@ -186478,7 +186572,7 @@ static int simpleNext(
if( n>c->nTokenAllocated ){ if( n>c->nTokenAllocated ){
char *pNew; char *pNew;
c->nTokenAllocated = n+20; c->nTokenAllocated = n+20;
pNew = sqlite3_realloc(c->pToken, c->nTokenAllocated); pNew = sqlite3_realloc64(c->pToken, c->nTokenAllocated);
if( !pNew ) return SQLITE_NOMEM; if( !pNew ) return SQLITE_NOMEM;
c->pToken = pNew; c->pToken = pNew;
} }
@@ -187640,7 +187734,7 @@ static int fts3PendingListAppendVarint(
/* Allocate or grow the PendingList as required. */ /* Allocate or grow the PendingList as required. */
if( !p ){ if( !p ){
p = sqlite3_malloc(sizeof(*p) + 100); p = sqlite3_malloc64(sizeof(*p) + 100);
if( !p ){ if( !p ){
return SQLITE_NOMEM; return SQLITE_NOMEM;
} }
@@ -187649,14 +187743,14 @@ static int fts3PendingListAppendVarint(
p->nData = 0; p->nData = 0;
} }
else if( p->nData+FTS3_VARINT_MAX+1>p->nSpace ){ else if( p->nData+FTS3_VARINT_MAX+1>p->nSpace ){
int nNew = p->nSpace * 2; i64 nNew = p->nSpace * 2;
p = sqlite3_realloc(p, sizeof(*p) + nNew); p = sqlite3_realloc64(p, sizeof(*p) + nNew);
if( !p ){ if( !p ){
sqlite3_free(*pp); sqlite3_free(*pp);
*pp = 0; *pp = 0;
return SQLITE_NOMEM; return SQLITE_NOMEM;
} }
p->nSpace = nNew; p->nSpace = (int)nNew;
p->aData = (char *)&p[1]; p->aData = (char *)&p[1];
} }
@@ -188213,7 +188307,7 @@ SQLITE_PRIVATE int sqlite3Fts3ReadBlock(
int nByte = sqlite3_blob_bytes(p->pSegments); int nByte = sqlite3_blob_bytes(p->pSegments);
*pnBlob = nByte; *pnBlob = nByte;
if( paBlob ){ if( paBlob ){
char *aByte = sqlite3_malloc(nByte + FTS3_NODE_PADDING); char *aByte = sqlite3_malloc64((i64)nByte + FTS3_NODE_PADDING);
if( !aByte ){ if( !aByte ){
rc = SQLITE_NOMEM; rc = SQLITE_NOMEM;
}else{ }else{
@@ -188330,7 +188424,7 @@ static int fts3SegReaderNext(
int nTerm = fts3HashKeysize(pElem); int nTerm = fts3HashKeysize(pElem);
if( (nTerm+1)>pReader->nTermAlloc ){ if( (nTerm+1)>pReader->nTermAlloc ){
sqlite3_free(pReader->zTerm); sqlite3_free(pReader->zTerm);
pReader->zTerm = (char*)sqlite3_malloc((nTerm+1)*2); pReader->zTerm = (char*)sqlite3_malloc64(((i64)nTerm+1)*2);
if( !pReader->zTerm ) return SQLITE_NOMEM; if( !pReader->zTerm ) return SQLITE_NOMEM;
pReader->nTermAlloc = (nTerm+1)*2; pReader->nTermAlloc = (nTerm+1)*2;
} }
@@ -188338,7 +188432,7 @@ static int fts3SegReaderNext(
pReader->zTerm[nTerm] = '\0'; pReader->zTerm[nTerm] = '\0';
pReader->nTerm = nTerm; pReader->nTerm = nTerm;
aCopy = (char*)sqlite3_malloc(nCopy); aCopy = (char*)sqlite3_malloc64(nCopy);
if( !aCopy ) return SQLITE_NOMEM; if( !aCopy ) return SQLITE_NOMEM;
memcpy(aCopy, pList->aData, nCopy); memcpy(aCopy, pList->aData, nCopy);
pReader->nNode = pReader->nDoclist = nCopy; pReader->nNode = pReader->nDoclist = nCopy;
@@ -188625,7 +188719,7 @@ SQLITE_PRIVATE int sqlite3Fts3SegReaderNew(
nExtra = nRoot + FTS3_NODE_PADDING; nExtra = nRoot + FTS3_NODE_PADDING;
} }
pReader = (Fts3SegReader *)sqlite3_malloc(sizeof(Fts3SegReader) + nExtra); pReader = (Fts3SegReader *)sqlite3_malloc64(sizeof(Fts3SegReader) + nExtra);
if( !pReader ){ if( !pReader ){
return SQLITE_NOMEM; return SQLITE_NOMEM;
} }
@@ -188717,7 +188811,7 @@ SQLITE_PRIVATE int sqlite3Fts3SegReaderPending(
if( nElem==nAlloc ){ if( nElem==nAlloc ){
Fts3HashElem **aElem2; Fts3HashElem **aElem2;
nAlloc += 16; nAlloc += 16;
aElem2 = (Fts3HashElem **)sqlite3_realloc( aElem2 = (Fts3HashElem **)sqlite3_realloc64(
aElem, nAlloc*sizeof(Fts3HashElem *) aElem, nAlloc*sizeof(Fts3HashElem *)
); );
if( !aElem2 ){ if( !aElem2 ){
@@ -189051,7 +189145,7 @@ static int fts3NodeAddTerm(
** this is not expected to be a serious problem. ** this is not expected to be a serious problem.
*/ */
assert( pTree->aData==(char *)&pTree[1] ); assert( pTree->aData==(char *)&pTree[1] );
pTree->aData = (char *)sqlite3_malloc(nReq); pTree->aData = (char *)sqlite3_malloc64(nReq);
if( !pTree->aData ){ if( !pTree->aData ){
return SQLITE_NOMEM; return SQLITE_NOMEM;
} }
@@ -189069,7 +189163,7 @@ static int fts3NodeAddTerm(
if( isCopyTerm ){ if( isCopyTerm ){
if( pTree->nMalloc<nTerm ){ if( pTree->nMalloc<nTerm ){
char *zNew = sqlite3_realloc(pTree->zMalloc, nTerm*2); char *zNew = sqlite3_realloc64(pTree->zMalloc, (i64)nTerm*2);
if( !zNew ){ if( !zNew ){
return SQLITE_NOMEM; return SQLITE_NOMEM;
} }
@@ -189095,7 +189189,7 @@ static int fts3NodeAddTerm(
** now. Instead, the term is inserted into the parent of pTree. If pTree ** now. Instead, the term is inserted into the parent of pTree. If pTree
** has no parent, one is created here. ** has no parent, one is created here.
*/ */
pNew = (SegmentNode *)sqlite3_malloc(sizeof(SegmentNode) + p->nNodeSize); pNew = (SegmentNode *)sqlite3_malloc64(sizeof(SegmentNode) + p->nNodeSize);
if( !pNew ){ if( !pNew ){
return SQLITE_NOMEM; return SQLITE_NOMEM;
} }
@@ -189233,7 +189327,7 @@ static int fts3SegWriterAdd(
){ ){
int nPrefix; /* Size of term prefix in bytes */ int nPrefix; /* Size of term prefix in bytes */
int nSuffix; /* Size of term suffix in bytes */ int nSuffix; /* Size of term suffix in bytes */
int nReq; /* Number of bytes required on leaf page */ i64 nReq; /* Number of bytes required on leaf page */
int nData; int nData;
SegmentWriter *pWriter = *ppWriter; SegmentWriter *pWriter = *ppWriter;
@@ -189242,13 +189336,13 @@ static int fts3SegWriterAdd(
sqlite3_stmt *pStmt; sqlite3_stmt *pStmt;
/* Allocate the SegmentWriter structure */ /* Allocate the SegmentWriter structure */
pWriter = (SegmentWriter *)sqlite3_malloc(sizeof(SegmentWriter)); pWriter = (SegmentWriter *)sqlite3_malloc64(sizeof(SegmentWriter));
if( !pWriter ) return SQLITE_NOMEM; if( !pWriter ) return SQLITE_NOMEM;
memset(pWriter, 0, sizeof(SegmentWriter)); memset(pWriter, 0, sizeof(SegmentWriter));
*ppWriter = pWriter; *ppWriter = pWriter;
/* Allocate a buffer in which to accumulate data */ /* Allocate a buffer in which to accumulate data */
pWriter->aData = (char *)sqlite3_malloc(p->nNodeSize); pWriter->aData = (char *)sqlite3_malloc64(p->nNodeSize);
if( !pWriter->aData ) return SQLITE_NOMEM; if( !pWriter->aData ) return SQLITE_NOMEM;
pWriter->nSize = p->nNodeSize; pWriter->nSize = p->nNodeSize;
@@ -189323,7 +189417,7 @@ static int fts3SegWriterAdd(
** the buffer to make it large enough. ** the buffer to make it large enough.
*/ */
if( nReq>pWriter->nSize ){ if( nReq>pWriter->nSize ){
char *aNew = sqlite3_realloc(pWriter->aData, nReq); char *aNew = sqlite3_realloc64(pWriter->aData, nReq);
if( !aNew ) return SQLITE_NOMEM; if( !aNew ) return SQLITE_NOMEM;
pWriter->aData = aNew; pWriter->aData = aNew;
pWriter->nSize = nReq; pWriter->nSize = nReq;
@@ -189348,7 +189442,7 @@ static int fts3SegWriterAdd(
*/ */
if( isCopyTerm ){ if( isCopyTerm ){
if( nTerm>pWriter->nMalloc ){ if( nTerm>pWriter->nMalloc ){
char *zNew = sqlite3_realloc(pWriter->zMalloc, nTerm*2); char *zNew = sqlite3_realloc64(pWriter->zMalloc, (i64)nTerm*2);
if( !zNew ){ if( !zNew ){
return SQLITE_NOMEM; return SQLITE_NOMEM;
} }
@@ -189656,12 +189750,12 @@ static void fts3ColumnFilter(
static int fts3MsrBufferData( static int fts3MsrBufferData(
Fts3MultiSegReader *pMsr, /* Multi-segment-reader handle */ Fts3MultiSegReader *pMsr, /* Multi-segment-reader handle */
char *pList, char *pList,
int nList i64 nList
){ ){
if( nList>pMsr->nBuffer ){ if( nList>pMsr->nBuffer ){
char *pNew; char *pNew;
pMsr->nBuffer = nList*2; pMsr->nBuffer = nList*2;
pNew = (char *)sqlite3_realloc(pMsr->aBuffer, pMsr->nBuffer); pNew = (char *)sqlite3_realloc64(pMsr->aBuffer, pMsr->nBuffer);
if( !pNew ) return SQLITE_NOMEM; if( !pNew ) return SQLITE_NOMEM;
pMsr->aBuffer = pNew; pMsr->aBuffer = pNew;
} }
@@ -189717,7 +189811,7 @@ SQLITE_PRIVATE int sqlite3Fts3MsrIncrNext(
fts3SegReaderSort(pMsr->apSegment, nMerge, j, xCmp); fts3SegReaderSort(pMsr->apSegment, nMerge, j, xCmp);
if( nList>0 && fts3SegReaderIsPending(apSegment[0]) ){ if( nList>0 && fts3SegReaderIsPending(apSegment[0]) ){
rc = fts3MsrBufferData(pMsr, pList, nList+1); rc = fts3MsrBufferData(pMsr, pList, (i64)nList+1);
if( rc!=SQLITE_OK ) return rc; if( rc!=SQLITE_OK ) return rc;
assert( (pMsr->aBuffer[nList] & 0xFE)==0x00 ); assert( (pMsr->aBuffer[nList] & 0xFE)==0x00 );
pList = pMsr->aBuffer; pList = pMsr->aBuffer;
@@ -189854,11 +189948,11 @@ SQLITE_PRIVATE int sqlite3Fts3MsrIncrRestart(Fts3MultiSegReader *pCsr){
return SQLITE_OK; return SQLITE_OK;
} }
static int fts3GrowSegReaderBuffer(Fts3MultiSegReader *pCsr, int nReq){ static int fts3GrowSegReaderBuffer(Fts3MultiSegReader *pCsr, i64 nReq){
if( nReq>pCsr->nBuffer ){ if( nReq>pCsr->nBuffer ){
char *aNew; char *aNew;
pCsr->nBuffer = nReq*2; pCsr->nBuffer = nReq*2;
aNew = sqlite3_realloc(pCsr->aBuffer, pCsr->nBuffer); aNew = sqlite3_realloc64(pCsr->aBuffer, pCsr->nBuffer);
if( !aNew ){ if( !aNew ){
return SQLITE_NOMEM; return SQLITE_NOMEM;
} }
@@ -189949,7 +190043,8 @@ SQLITE_PRIVATE int sqlite3Fts3SegReaderStep(
){ ){
pCsr->nDoclist = apSegment[0]->nDoclist; pCsr->nDoclist = apSegment[0]->nDoclist;
if( fts3SegReaderIsPending(apSegment[0]) ){ if( fts3SegReaderIsPending(apSegment[0]) ){
rc = fts3MsrBufferData(pCsr, apSegment[0]->aDoclist, pCsr->nDoclist); rc = fts3MsrBufferData(pCsr, apSegment[0]->aDoclist,
(i64)pCsr->nDoclist);
pCsr->aDoclist = pCsr->aBuffer; pCsr->aDoclist = pCsr->aBuffer;
}else{ }else{
pCsr->aDoclist = apSegment[0]->aDoclist; pCsr->aDoclist = apSegment[0]->aDoclist;
@@ -190002,7 +190097,8 @@ SQLITE_PRIVATE int sqlite3Fts3SegReaderStep(
nByte = sqlite3Fts3VarintLen(iDelta) + (isRequirePos?nList+1:0); nByte = sqlite3Fts3VarintLen(iDelta) + (isRequirePos?nList+1:0);
rc = fts3GrowSegReaderBuffer(pCsr, nByte+nDoclist+FTS3_NODE_PADDING); rc = fts3GrowSegReaderBuffer(pCsr,
(i64)nByte+nDoclist+FTS3_NODE_PADDING);
if( rc ) return rc; if( rc ) return rc;
if( isFirst ){ if( isFirst ){
@@ -190028,7 +190124,7 @@ SQLITE_PRIVATE int sqlite3Fts3SegReaderStep(
fts3SegReaderSort(apSegment, nMerge, j, xCmp); fts3SegReaderSort(apSegment, nMerge, j, xCmp);
} }
if( nDoclist>0 ){ if( nDoclist>0 ){
rc = fts3GrowSegReaderBuffer(pCsr, nDoclist+FTS3_NODE_PADDING); rc = fts3GrowSegReaderBuffer(pCsr, (i64)nDoclist+FTS3_NODE_PADDING);
if( rc ) return rc; if( rc ) return rc;
memset(&pCsr->aBuffer[nDoclist], 0, FTS3_NODE_PADDING); memset(&pCsr->aBuffer[nDoclist], 0, FTS3_NODE_PADDING);
pCsr->aDoclist = pCsr->aBuffer; pCsr->aDoclist = pCsr->aBuffer;
@@ -190741,7 +190837,7 @@ struct NodeReader {
static void blobGrowBuffer(Blob *pBlob, int nMin, int *pRc){ static void blobGrowBuffer(Blob *pBlob, int nMin, int *pRc){
if( *pRc==SQLITE_OK && nMin>pBlob->nAlloc ){ if( *pRc==SQLITE_OK && nMin>pBlob->nAlloc ){
int nAlloc = nMin; int nAlloc = nMin;
char *a = (char *)sqlite3_realloc(pBlob->a, nAlloc); char *a = (char *)sqlite3_realloc64(pBlob->a, nAlloc);
if( a ){ if( a ){
pBlob->nAlloc = nAlloc; pBlob->nAlloc = nAlloc;
pBlob->a = a; pBlob->a = a;
@@ -191538,7 +191634,7 @@ static int fts3RepackSegdirLevel(
if( nIdx>=nAlloc ){ if( nIdx>=nAlloc ){
int *aNew; int *aNew;
nAlloc += 16; nAlloc += 16;
aNew = sqlite3_realloc(aIdx, nAlloc*sizeof(int)); aNew = sqlite3_realloc64(aIdx, nAlloc*sizeof(int));
if( !aNew ){ if( !aNew ){
rc = SQLITE_NOMEM; rc = SQLITE_NOMEM;
break; break;
@@ -191912,7 +192008,7 @@ SQLITE_PRIVATE int sqlite3Fts3Incrmerge(Fts3Table *p, int nMerge, int nMin){
/* Allocate space for the cursor, filter and writer objects */ /* Allocate space for the cursor, filter and writer objects */
const int nAlloc = sizeof(*pCsr) + sizeof(*pFilter) + sizeof(*pWriter); const int nAlloc = sizeof(*pCsr) + sizeof(*pFilter) + sizeof(*pWriter);
pWriter = (IncrmergeWriter *)sqlite3_malloc(nAlloc); pWriter = (IncrmergeWriter *)sqlite3_malloc64(nAlloc);
if( !pWriter ) return SQLITE_NOMEM; if( !pWriter ) return SQLITE_NOMEM;
pFilter = (Fts3SegFilter *)&pWriter[1]; pFilter = (Fts3SegFilter *)&pWriter[1];
pCsr = (Fts3MultiSegReader *)&pFilter[1]; pCsr = (Fts3MultiSegReader *)&pFilter[1];
@@ -192548,7 +192644,7 @@ SQLITE_PRIVATE int sqlite3Fts3DeferredTokenList(
return SQLITE_OK; return SQLITE_OK;
} }
pRet = (char *)sqlite3_malloc(p->pList->nData); pRet = (char *)sqlite3_malloc64(p->pList->nData);
if( !pRet ) return SQLITE_NOMEM; if( !pRet ) return SQLITE_NOMEM;
nSkip = sqlite3Fts3GetVarint(p->pList->aData, &dummy); nSkip = sqlite3Fts3GetVarint(p->pList->aData, &dummy);
@@ -192568,7 +192664,7 @@ SQLITE_PRIVATE int sqlite3Fts3DeferToken(
int iCol /* Column that token must appear in (or -1) */ int iCol /* Column that token must appear in (or -1) */
){ ){
Fts3DeferredToken *pDeferred; Fts3DeferredToken *pDeferred;
pDeferred = sqlite3_malloc(sizeof(*pDeferred)); pDeferred = sqlite3_malloc64(sizeof(*pDeferred));
if( !pDeferred ){ if( !pDeferred ){
return SQLITE_NOMEM; return SQLITE_NOMEM;
} }
@@ -204147,7 +204243,7 @@ static int geopolyUpdate(
sqlite3_free(p); sqlite3_free(p);
nChange = 1; nChange = 1;
} }
for(jj=1; jj<pRtree->nAux; jj++){ for(jj=1; jj<nData-2; jj++){
nChange++; nChange++;
sqlite3_bind_value(pUp, jj+2, aData[jj+2]); sqlite3_bind_value(pUp, jj+2, aData[jj+2]);
} }
@@ -204750,8 +204846,9 @@ static void icuRegexpFunc(sqlite3_context *p, int nArg, sqlite3_value **apArg){
if( U_SUCCESS(status) ){ if( U_SUCCESS(status) ){
sqlite3_set_auxdata(p, 0, pExpr, icuRegexpDelete); sqlite3_set_auxdata(p, 0, pExpr, icuRegexpDelete);
}else{ pExpr = sqlite3_get_auxdata(p, 0);
assert(!pExpr); }
if( !pExpr ){
icuFunctionError(p, "uregex_open", status); icuFunctionError(p, "uregex_open", status);
return; return;
} }
@@ -236637,7 +236734,7 @@ static void fts5SourceIdFunc(
){ ){
assert( nArg==0 ); assert( nArg==0 );
UNUSED_PARAM2(nArg, apUnused); UNUSED_PARAM2(nArg, apUnused);
sqlite3_result_text(pCtx, "fts5: 2022-07-21 15:24:47 698edb77537b67c41adc68f9b892db56bcf9a55e00371a61420f3ddd668e6603", -1, SQLITE_TRANSIENT); sqlite3_result_text(pCtx, "fts5: 2022-09-29 15:55:41 a29f9949895322123f7c38fbe94c649a9d6e6c9cd0c3b41c96d694552f26b309", -1, SQLITE_TRANSIENT);
} }
/* /*

View File

@@ -147,9 +147,9 @@ extern "C" {
** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite3_libversion_number()], [sqlite3_sourceid()],
** [sqlite_version()] and [sqlite_source_id()]. ** [sqlite_version()] and [sqlite_source_id()].
*/ */
#define SQLITE_VERSION "3.39.2" #define SQLITE_VERSION "3.39.4"
#define SQLITE_VERSION_NUMBER 3039002 #define SQLITE_VERSION_NUMBER 3039004
#define SQLITE_SOURCE_ID "2022-07-21 15:24:47 698edb77537b67c41adc68f9b892db56bcf9a55e00371a61420f3ddd668e6603" #define SQLITE_SOURCE_ID "2022-09-29 15:55:41 a29f9949895322123f7c38fbe94c649a9d6e6c9cd0c3b41c96d694552f26b309"
/* /*
** CAPI3REF: Run-Time Library Version Numbers ** CAPI3REF: Run-Time Library Version Numbers

View File

@@ -494,10 +494,12 @@ func (ai *aggInfo) Done(ctx *C.sqlite3_context) {
// Commit transaction. // Commit transaction.
func (tx *SQLiteTx) Commit() error { func (tx *SQLiteTx) Commit() error {
_, err := tx.c.exec(context.Background(), "COMMIT", nil) _, err := tx.c.exec(context.Background(), "COMMIT", nil)
if err != nil && err.(Error).Code == C.SQLITE_BUSY { if err != nil {
// sqlite3 will leave the transaction open in this scenario. // sqlite3 may leave the transaction open in this scenario.
// However, database/sql considers the transaction complete once we // However, database/sql considers the transaction complete once we
// return from Commit() - we must clean up to honour its semantics. // return from Commit() - we must clean up to honour its semantics.
// We don't know if the ROLLBACK is strictly necessary, but according
// to sqlite's docs, there is no harm in calling ROLLBACK unnecessarily.
tx.c.exec(context.Background(), "ROLLBACK", nil) tx.c.exec(context.Background(), "ROLLBACK", nil)
} }
return err return err

View File

@@ -0,0 +1,14 @@
// Copyright (C) 2022 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// +build sqlite_math_functions
package sqlite3
/*
#cgo CFLAGS: -DSQLITE_ENABLE_MATH_FUNCTIONS
#cgo LDFLAGS: -lm
*/
import "C"

View File

@@ -0,0 +1,15 @@
// Copyright (C) 2022 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
//go:build sqlite_os_trace
// +build sqlite_os_trace
package sqlite3
/*
#cgo CFLAGS: -DSQLITE_FORCE_OS_TRACE=1
#cgo CFLAGS: -DSQLITE_DEBUG_OS_TRACE=1
*/
import "C"

View File

@@ -12,7 +12,6 @@ package sqlite3
#cgo CFLAGS: -fno-stack-check #cgo CFLAGS: -fno-stack-check
#cgo CFLAGS: -fno-stack-protector #cgo CFLAGS: -fno-stack-protector
#cgo CFLAGS: -mno-stack-arg-probe #cgo CFLAGS: -mno-stack-arg-probe
#cgo LDFLAGS: -lmingwex -lmingw32
#cgo windows,386 CFLAGS: -D_USE_32BIT_TIME_T #cgo windows,386 CFLAGS: -D_USE_32BIT_TIME_T
*/ */
import "C" import "C"

View File

@@ -445,6 +445,7 @@ Extensions
- [goldmark-pikchr](https://github.com/jchenry/goldmark-pikchr): Adds support for rendering [Pikchr](https://pikchr.org/home/doc/trunk/homepage.md) diagrams in goldmark documents. - [goldmark-pikchr](https://github.com/jchenry/goldmark-pikchr): Adds support for rendering [Pikchr](https://pikchr.org/home/doc/trunk/homepage.md) diagrams in goldmark documents.
- [goldmark-embed](https://github.com/13rac1/goldmark-embed): Adds support for rendering embeds from YouTube links. - [goldmark-embed](https://github.com/13rac1/goldmark-embed): Adds support for rendering embeds from YouTube links.
- [goldmark-latex](https://github.com/soypat/goldmark-latex): A $\LaTeX$ renderer that can be passed to `goldmark.WithRenderer()`. - [goldmark-latex](https://github.com/soypat/goldmark-latex): A $\LaTeX$ renderer that can be passed to `goldmark.WithRenderer()`.
- [goldmark-fences](https://github.com/stefanfritsch/goldmark-fences): Support for pandoc-style [fenced divs](https://pandoc.org/MANUAL.html#divs-and-spans) in goldmark.
goldmark internal(for extension developers) goldmark internal(for extension developers)

View File

@@ -122,6 +122,9 @@ func WithTableCellAlignMethod(a TableCellAlignMethod) TableOption {
} }
func isTableDelim(bs []byte) bool { func isTableDelim(bs []byte) bool {
if w, _ := util.IndentWidth(bs, 0); w > 3 {
return false
}
for _, b := range bs { for _, b := range bs {
if !(util.IsSpace(b) || b == '-' || b == '|' || b == ':') { if !(util.IsSpace(b) || b == '-' || b == '|' || b == ':') {
return false return false
@@ -243,6 +246,7 @@ func (b *tableParagraphTransformer) parseRow(segment text.Segment, alignments []
} }
func (b *tableParagraphTransformer) parseDelimiter(segment text.Segment, reader text.Reader) []ast.Alignment { func (b *tableParagraphTransformer) parseDelimiter(segment text.Segment, reader text.Reader) []ast.Alignment {
line := segment.Value(reader.Source()) line := segment.Value(reader.Source())
if !isTableDelim(line) { if !isTableDelim(line) {
return nil return nil

View File

@@ -3,6 +3,7 @@ package parser
import ( import (
"github.com/yuin/goldmark/ast" "github.com/yuin/goldmark/ast"
"github.com/yuin/goldmark/text" "github.com/yuin/goldmark/text"
"github.com/yuin/goldmark/util"
) )
type paragraphParser struct { type paragraphParser struct {
@@ -33,9 +34,8 @@ func (b *paragraphParser) Open(parent ast.Node, reader text.Reader, pc Context)
} }
func (b *paragraphParser) Continue(node ast.Node, reader text.Reader, pc Context) State { func (b *paragraphParser) Continue(node ast.Node, reader text.Reader, pc Context) State {
_, segment := reader.PeekLine() line, segment := reader.PeekLine()
segment = segment.TrimLeftSpace(reader.Source()) if util.IsBlank(line) {
if segment.IsEmpty() {
return Close return Close
} }
node.Lines().Append(segment) node.Lines().Append(segment)
@@ -44,13 +44,14 @@ func (b *paragraphParser) Continue(node ast.Node, reader text.Reader, pc Context
} }
func (b *paragraphParser) Close(node ast.Node, reader text.Reader, pc Context) { func (b *paragraphParser) Close(node ast.Node, reader text.Reader, pc Context) {
parent := node.Parent()
if parent == nil {
// paragraph has been transformed
return
}
lines := node.Lines() lines := node.Lines()
if lines.Len() != 0 { if lines.Len() != 0 {
// trim leading spaces
for i := 0; i < lines.Len(); i++ {
l := lines.At(i)
lines.Set(i, l.TrimLeftSpace(reader.Source()))
}
// trim trailing spaces // trim trailing spaces
length := lines.Len() length := lines.Len()
lastLine := node.Lines().At(length - 1) lastLine := node.Lines().At(length - 1)

View File

@@ -899,11 +899,13 @@ func (p *parser) closeBlocks(from, to int, reader text.Reader, pc Context) {
blocks := pc.OpenedBlocks() blocks := pc.OpenedBlocks()
for i := from; i >= to; i-- { for i := from; i >= to; i-- {
node := blocks[i].Node node := blocks[i].Node
blocks[i].Parser.Close(blocks[i].Node, reader, pc)
paragraph, ok := node.(*ast.Paragraph) paragraph, ok := node.(*ast.Paragraph)
if ok && node.Parent() != nil { if ok && node.Parent() != nil {
p.transformParagraph(paragraph, reader, pc) p.transformParagraph(paragraph, reader, pc)
} }
if node.Parent() != nil { // closes only if node has not been transformed
blocks[i].Parser.Close(blocks[i].Node, reader, pc)
}
} }
if from == len(blocks)-1 { if from == len(blocks)-1 {
blocks = blocks[0:to] blocks = blocks[0:to]

View File

@@ -10,18 +10,17 @@ import (
func (l *Linkpearl) GetAccountData(name string) (map[string]string, error) { func (l *Linkpearl) GetAccountData(name string) (map[string]string, error) {
cached, ok := l.acc.Get(name) cached, ok := l.acc.Get(name)
if ok { if ok {
l.log.Debug("GetAccountData(%s) from cache (data): %+v", name, cached) l.logAccountData(l.log.Debug, "GetAccountData(%q) cached:", cached, name)
if cached == nil { if cached == nil {
return map[string]string{}, nil return map[string]string{}, nil
} }
return cached, nil return cached, nil
} }
l.log.Debug("GetAccountData(%s) from API", name)
var data map[string]string var data map[string]string
err := l.GetClient().GetAccountData(name, &data) err := l.GetClient().GetAccountData(name, &data)
if err != nil { if err != nil {
l.log.Debug("GetAccountData(%s) from API (error): %v", name, err) l.logAccountData(l.log.Debug, "GetAccountData(%q) error: %v", nil, name, err)
data = map[string]string{} data = map[string]string{}
if strings.Contains(err.Error(), "M_NOT_FOUND") { if strings.Contains(err.Error(), "M_NOT_FOUND") {
l.acc.Add(name, data) l.acc.Add(name, data)
@@ -30,7 +29,7 @@ func (l *Linkpearl) GetAccountData(name string) (map[string]string, error) {
return data, err return data, err
} }
data = l.decryptAccountData(data) data = l.decryptAccountData(data)
l.log.Debug("GetAccountData(%s) from API (data): %+v", name, data) l.logAccountData(l.log.Debug, "GetAccountData(%q):", data, name)
l.acc.Add(name, data) l.acc.Add(name, data)
return data, err return data, err
@@ -40,7 +39,7 @@ func (l *Linkpearl) GetAccountData(name string) (map[string]string, error) {
func (l *Linkpearl) SetAccountData(name string, data map[string]string) error { func (l *Linkpearl) SetAccountData(name string, data map[string]string) error {
l.acc.Add(name, data) l.acc.Add(name, data)
l.log.Debug("SetAccountData(%s) to API (data): %+v", name, data) l.logAccountData(l.log.Debug, "SetAccountData(%q):", data, name)
data = l.encryptAccountData(data) data = l.encryptAccountData(data)
return l.GetClient().SetAccountData(name, data) return l.GetClient().SetAccountData(name, data)
} }
@@ -50,18 +49,17 @@ func (l *Linkpearl) GetRoomAccountData(roomID id.RoomID, name string) (map[strin
key := roomID.String() + name key := roomID.String() + name
cached, ok := l.acc.Get(key) cached, ok := l.acc.Get(key)
if ok { if ok {
l.log.Debug("GetRoomAccountData(%s, %s) from cache (data): %+v", roomID, name, cached) l.logAccountData(l.log.Debug, "GetRoomAccountData(%q, %q) cached:", cached, roomID, name)
if cached == nil { if cached == nil {
return map[string]string{}, nil return map[string]string{}, nil
} }
return cached, nil return cached, nil
} }
l.log.Debug("GetRoomAccountData(%s, %s) from API", roomID, name)
var data map[string]string var data map[string]string
err := l.GetClient().GetRoomAccountData(roomID, name, &data) err := l.GetClient().GetRoomAccountData(roomID, name, &data)
if err != nil { if err != nil {
l.log.Debug("GetRoomAccountData(%s, %s) from API (error): %v", roomID, name, err) l.logAccountData(l.log.Debug, "GetRoomAccountData(%q, %q) error: %v", nil, roomID, name, err)
data = map[string]string{} data = map[string]string{}
if strings.Contains(err.Error(), "M_NOT_FOUND") { if strings.Contains(err.Error(), "M_NOT_FOUND") {
l.acc.Add(key, data) l.acc.Add(key, data)
@@ -70,7 +68,7 @@ func (l *Linkpearl) GetRoomAccountData(roomID id.RoomID, name string) (map[strin
return data, err return data, err
} }
data = l.decryptAccountData(data) data = l.decryptAccountData(data)
l.log.Debug("GetRoomAccountData(%s,%s) from API (data): %+v", roomID, name, data) l.logAccountData(l.log.Debug, "GetRoomAccountData(%q, %q):", data, roomID, name)
l.acc.Add(key, data) l.acc.Add(key, data)
return data, err return data, err
@@ -81,7 +79,7 @@ func (l *Linkpearl) SetRoomAccountData(roomID id.RoomID, name string, data map[s
key := roomID.String() + name key := roomID.String() + name
l.acc.Add(key, data) l.acc.Add(key, data)
l.log.Debug("SetRoomAccountData(%s, %s) to API (data): %+v", roomID, name, data) l.logAccountData(l.log.Debug, "SetRoomAccountData(%q, %q):", data, roomID, name)
data = l.encryptAccountData(data) data = l.encryptAccountData(data)
return l.GetClient().SetRoomAccountData(roomID, name, data) return l.GetClient().SetRoomAccountData(roomID, name, data)
} }
@@ -95,11 +93,11 @@ func (l *Linkpearl) encryptAccountData(data map[string]string) map[string]string
for k, v := range data { for k, v := range data {
ek, err := l.acr.Encrypt(k) ek, err := l.acr.Encrypt(k)
if err != nil { if err != nil {
l.log.Error("cannot encrypt account data (key=%s): %v", k, err) l.log.Error("cannot encrypt account data (key=%q): %v", k, err)
} }
ev, err := l.acr.Encrypt(v) ev, err := l.acr.Encrypt(v)
if err != nil { if err != nil {
l.log.Error("cannot encrypt account data (key=%s): %v", k, err) l.log.Error("cannot encrypt account data (key=%q): %v", k, err)
} }
encrypted[ek] = ev // worst case: plaintext value encrypted[ek] = ev // worst case: plaintext value
} }
@@ -116,14 +114,35 @@ func (l *Linkpearl) decryptAccountData(data map[string]string) map[string]string
for ek, ev := range data { for ek, ev := range data {
k, err := l.acr.Decrypt(ek) k, err := l.acr.Decrypt(ek)
if err != nil { if err != nil {
l.log.Error("cannot decrypt account data (key=%s): %v", k, err) l.log.Error("cannot decrypt account data (key=%q): %v", k, err)
} }
v, err := l.acr.Decrypt(ev) v, err := l.acr.Decrypt(ev)
if err != nil { if err != nil {
l.log.Error("cannot decrypt account data (key=%s): %v", k, err) l.log.Error("cannot decrypt account data (key=%q): %v", k, err)
} }
decrypted[k] = v // worst case: encrypted value, usual case: migration from plaintext to encrypted account data decrypted[k] = v // worst case: encrypted value, usual case: migration from plaintext to encrypted account data
} }
return decrypted return decrypted
} }
func (l *Linkpearl) logAccountData(method func(string, ...any), message string, data map[string]string, args ...any) {
if len(data) == 0 {
method(message, args...)
return
}
safeData := make(map[string]string, len(data))
for k, v := range data {
sv, ok := l.aclr[k]
if ok {
safeData[k] = sv
continue
}
safeData[k] = v
}
args = append(args, safeData)
method(message+" %+v", args...)
}

View File

@@ -32,6 +32,11 @@ type Config struct {
// AccountDataSecret (Password) for encryption // AccountDataSecret (Password) for encryption
AccountDataSecret string AccountDataSecret string
// AccountDataLogReplace contains map of field name => value
// that will be used to replace mentioned account data fields with provided values
// when printing in logs (DEBUG, TRACE)
AccountDataLogReplace map[string]string
// MaxRetries for operations like auto join // MaxRetries for operations like auto join
MaxRetries int MaxRetries int

View File

@@ -25,6 +25,7 @@ type Linkpearl struct {
db *sql.DB db *sql.DB
acc *lru.Cache[string, map[string]string] acc *lru.Cache[string, map[string]string]
acr *Crypter acr *Crypter
aclr map[string]string
log config.Logger log config.Logger
api *mautrix.Client api *mautrix.Client
olm *crypto.OlmMachine olm *crypto.OlmMachine
@@ -41,6 +42,9 @@ type ReqPresence struct {
} }
func setDefaults(cfg *config.Config) { func setDefaults(cfg *config.Config) {
if cfg.AccountDataLogReplace == nil {
cfg.AccountDataLogReplace = make(map[string]string)
}
if cfg.MaxRetries == 0 { if cfg.MaxRetries == 0 {
cfg.MaxRetries = DefaultMaxRetries cfg.MaxRetries = DefaultMaxRetries
} }
@@ -80,6 +84,7 @@ func New(cfg *config.Config) (*Linkpearl, error) {
db: cfg.DB, db: cfg.DB,
acc: acc, acc: acc,
acr: acr, acr: acr,
aclr: cfg.AccountDataLogReplace,
api: api, api: api,
log: cfg.LPLogger, log: cfg.LPLogger,
joinPermit: cfg.JoinPermit, joinPermit: cfg.JoinPermit,

View File

@@ -10,10 +10,10 @@ import (
// Send a message to the roomID and automatically try to encrypt it, if the destination room is encrypted // Send a message to the roomID and automatically try to encrypt it, if the destination room is encrypted
func (l *Linkpearl) Send(roomID id.RoomID, content interface{}) (id.EventID, error) { func (l *Linkpearl) Send(roomID id.RoomID, content interface{}) (id.EventID, error) {
if !l.store.IsEncrypted(roomID) { if !l.store.IsEncrypted(roomID) {
l.log.Debug("room %s is not encrypted", roomID) l.log.Debug("room %q is not encrypted", roomID)
return l.SendPlaintext(roomID, content) return l.SendPlaintext(roomID, content)
} }
l.log.Debug("room %s is encrypted", roomID) l.log.Debug("room %q is encrypted", roomID)
encrypted, err := l.EncryptEvent(roomID, content) encrypted, err := l.EncryptEvent(roomID, content)
if err != nil { if err != nil {
@@ -28,7 +28,7 @@ func (l *Linkpearl) Send(roomID id.RoomID, content interface{}) (id.EventID, err
func (l *Linkpearl) SendFile(roomID id.RoomID, req *mautrix.ReqUploadMedia, msgtype event.MessageType, relation *event.RelatesTo) error { func (l *Linkpearl) SendFile(roomID id.RoomID, req *mautrix.ReqUploadMedia, msgtype event.MessageType, relation *event.RelatesTo) error {
resp, err := l.GetClient().UploadMedia(*req) resp, err := l.GetClient().UploadMedia(*req)
if err != nil { if err != nil {
l.log.Error("cannot upload file %s: %v", req.FileName, err) l.log.Error("cannot upload file %q: %v", req.FileName, err)
return err return err
} }
_, err = l.Send(roomID, &event.Content{ _, err = l.Send(roomID, &event.Content{
@@ -40,7 +40,7 @@ func (l *Linkpearl) SendFile(roomID id.RoomID, req *mautrix.ReqUploadMedia, msgt
}, },
}) })
if err != nil { if err != nil {
l.log.Error("cannot send uploaded file: %s: %v", req.FileName, err) l.log.Error("cannot send uploaded file: %q: %v", req.FileName, err)
} }
return err return err
@@ -48,7 +48,7 @@ func (l *Linkpearl) SendFile(roomID id.RoomID, req *mautrix.ReqUploadMedia, msgt
// SendPlaintext sends plaintext event only // SendPlaintext sends plaintext event only
func (l *Linkpearl) SendPlaintext(roomID id.RoomID, content interface{}) (id.EventID, error) { func (l *Linkpearl) SendPlaintext(roomID id.RoomID, content interface{}) (id.EventID, error) {
l.log.Debug("sending plaintext event to %s: %+v", roomID, content) l.log.Debug("sending plaintext event to %q: %+v", roomID, content)
resp, err := l.api.SendMessageEvent(roomID, event.EventMessage, content) resp, err := l.api.SendMessageEvent(roomID, event.EventMessage, content)
if err != nil { if err != nil {
return "", err return "", err
@@ -58,7 +58,7 @@ func (l *Linkpearl) SendPlaintext(roomID id.RoomID, content interface{}) (id.Eve
// SendEncrypted sends encrypted event only // SendEncrypted sends encrypted event only
func (l *Linkpearl) SendEncrypted(roomID id.RoomID, content interface{}) (id.EventID, error) { func (l *Linkpearl) SendEncrypted(roomID id.RoomID, content interface{}) (id.EventID, error) {
l.log.Debug("sending encrypted event to %s: %+v", roomID, content) l.log.Debug("sending encrypted event to %q: %+v", roomID, content)
resp, err := l.api.SendMessageEvent(roomID, event.EventEncrypted, content) resp, err := l.api.SendMessageEvent(roomID, event.EventEncrypted, content)
if err != nil { if err != nil {
return "", err return "", err

View File

@@ -42,43 +42,43 @@ func (s *Store) GetAccount() (*crypto.OlmAccount, error) {
// HasSession returns whether there is an Olm session for the given sender key. // HasSession returns whether there is an Olm session for the given sender key.
func (s *Store) HasSession(key id.SenderKey) bool { func (s *Store) HasSession(key id.SenderKey) bool {
s.log.Debug("check if olm session exists for the key %s", key) s.log.Debug("check if olm session exists for the key %q", key)
return s.s.HasSession(key) return s.s.HasSession(key)
} }
// GetSessions returns all the known Olm sessions for a sender key. // GetSessions returns all the known Olm sessions for a sender key.
func (s *Store) GetSessions(key id.SenderKey) (crypto.OlmSessionList, error) { func (s *Store) GetSessions(key id.SenderKey) (crypto.OlmSessionList, error) {
s.log.Debug("loading olm session for the key %s", key) s.log.Debug("loading olm session for the key %q", key)
return s.s.GetSessions(key) return s.s.GetSessions(key)
} }
// GetLatestSession retrieves the Olm session for a given sender key from the database that has the largest ID. // GetLatestSession retrieves the Olm session for a given sender key from the database that has the largest ID.
func (s *Store) GetLatestSession(key id.SenderKey) (*crypto.OlmSession, error) { func (s *Store) GetLatestSession(key id.SenderKey) (*crypto.OlmSession, error) {
s.log.Debug("loading latest session for the key %s", key) s.log.Debug("loading latest session for the key %q", key)
return s.s.GetLatestSession(key) return s.s.GetLatestSession(key)
} }
// AddSession persists an Olm session for a sender in the database. // AddSession persists an Olm session for a sender in the database.
func (s *Store) AddSession(key id.SenderKey, session *crypto.OlmSession) error { func (s *Store) AddSession(key id.SenderKey, session *crypto.OlmSession) error {
s.log.Debug("adding new olm session for the key %s", key) s.log.Debug("adding new olm session for the key %q", key)
return s.s.AddSession(key, session) return s.s.AddSession(key, session)
} }
// UpdateSession replaces the Olm session for a sender in the database. // UpdateSession replaces the Olm session for a sender in the database.
func (s *Store) UpdateSession(key id.SenderKey, session *crypto.OlmSession) error { func (s *Store) UpdateSession(key id.SenderKey, session *crypto.OlmSession) error {
s.log.Debug("update olm session for the key %s", key) s.log.Debug("update olm session for the key %q", key)
return s.s.UpdateSession(key, session) return s.s.UpdateSession(key, session)
} }
// PutGroupSession stores an inbound Megolm group session for a room, sender and session. // PutGroupSession stores an inbound Megolm group session for a room, sender and session.
func (s *Store) PutGroupSession(roomID id.RoomID, senderKey id.SenderKey, sessionID id.SessionID, session *crypto.InboundGroupSession) error { func (s *Store) PutGroupSession(roomID id.RoomID, senderKey id.SenderKey, sessionID id.SessionID, session *crypto.InboundGroupSession) error {
s.log.Debug("storing inbound group session for the room %s", roomID) s.log.Debug("storing inbound group session for the room %q", roomID)
return s.s.PutGroupSession(roomID, senderKey, sessionID, session) return s.s.PutGroupSession(roomID, senderKey, sessionID, session)
} }
// GetGroupSession retrieves an inbound Megolm group session for a room, sender and session. // GetGroupSession retrieves an inbound Megolm group session for a room, sender and session.
func (s *Store) GetGroupSession(roomID id.RoomID, senderKey id.SenderKey, sessionID id.SessionID) (*crypto.InboundGroupSession, error) { func (s *Store) GetGroupSession(roomID id.RoomID, senderKey id.SenderKey, sessionID id.SessionID) (*crypto.InboundGroupSession, error) {
s.log.Debug("loading inbound group session for the room %s", roomID) s.log.Debug("loading inbound group session for the room %q", roomID)
return s.s.GetGroupSession(roomID, senderKey, sessionID) return s.s.GetGroupSession(roomID, senderKey, sessionID)
} }
@@ -98,7 +98,7 @@ func (s *Store) GetWithheldGroupSession(roomID id.RoomID, senderKey id.SenderKey
// GetGroupSessionsForRoom gets all the inbound Megolm sessions for a specific room. This is used for creating key // GetGroupSessionsForRoom gets all the inbound Megolm sessions for a specific room. This is used for creating key
// export files. Unlike GetGroupSession, this should not return any errors about withheld keys. // export files. Unlike GetGroupSession, this should not return any errors about withheld keys.
func (s *Store) GetGroupSessionsForRoom(roomID id.RoomID) ([]*crypto.InboundGroupSession, error) { func (s *Store) GetGroupSessionsForRoom(roomID id.RoomID) ([]*crypto.InboundGroupSession, error) {
s.log.Debug("loading group session for the room %s", roomID) s.log.Debug("loading group session for the room %q", roomID)
return s.s.GetGroupSessionsForRoom(roomID) return s.s.GetGroupSessionsForRoom(roomID)
} }
@@ -143,31 +143,31 @@ func (s *Store) ValidateMessageIndex(senderKey id.SenderKey, sessionID id.Sessio
// GetDevices returns a map of device IDs to device identities, including the identity and signing keys, for a given user ID. // GetDevices returns a map of device IDs to device identities, including the identity and signing keys, for a given user ID.
func (s *Store) GetDevices(userID id.UserID) (map[id.DeviceID]*id.Device, error) { func (s *Store) GetDevices(userID id.UserID) (map[id.DeviceID]*id.Device, error) {
s.log.Debug("loading devices of the %s", userID) s.log.Debug("loading devices of the %q", userID)
return s.s.GetDevices(userID) return s.s.GetDevices(userID)
} }
// GetDevice returns the device dentity for a given user and device ID. // GetDevice returns the device dentity for a given user and device ID.
func (s *Store) GetDevice(userID id.UserID, deviceID id.DeviceID) (*id.Device, error) { func (s *Store) GetDevice(userID id.UserID, deviceID id.DeviceID) (*id.Device, error) {
s.log.Debug("loading device %s for the %s", deviceID, userID) s.log.Debug("loading device %q for the %q", deviceID, userID)
return s.s.GetDevice(userID, deviceID) return s.s.GetDevice(userID, deviceID)
} }
// FindDeviceByKey finds a specific device by its sender key. // FindDeviceByKey finds a specific device by its sender key.
func (s *Store) FindDeviceByKey(userID id.UserID, identityKey id.IdentityKey) (*id.Device, error) { func (s *Store) FindDeviceByKey(userID id.UserID, identityKey id.IdentityKey) (*id.Device, error) {
s.log.Debug("loading device of the %s by the key %s", userID, identityKey) s.log.Debug("loading device of the %q by the key %q", userID, identityKey)
return s.s.FindDeviceByKey(userID, identityKey) return s.s.FindDeviceByKey(userID, identityKey)
} }
// PutDevice stores a single device for a user, replacing it if it exists already. // PutDevice stores a single device for a user, replacing it if it exists already.
func (s *Store) PutDevice(userID id.UserID, device *id.Device) error { func (s *Store) PutDevice(userID id.UserID, device *id.Device) error {
s.log.Debug("storing device of the %s", userID) s.log.Debug("storing device of the %q", userID)
return s.s.PutDevice(userID, device) return s.s.PutDevice(userID, device)
} }
// PutDevices stores the device identity information for the given user ID. // PutDevices stores the device identity information for the given user ID.
func (s *Store) PutDevices(userID id.UserID, devices map[id.DeviceID]*id.Device) error { func (s *Store) PutDevices(userID id.UserID, devices map[id.DeviceID]*id.Device) error {
s.log.Debug("storing devices of the %s", userID) s.log.Debug("storing devices of the %q", userID)
return s.s.PutDevices(userID, devices) return s.s.PutDevices(userID, devices)
} }
@@ -179,13 +179,13 @@ func (s *Store) FilterTrackedUsers(users []id.UserID) ([]id.UserID, error) {
// PutCrossSigningKey stores a cross-signing key of some user along with its usage. // PutCrossSigningKey stores a cross-signing key of some user along with its usage.
func (s *Store) PutCrossSigningKey(userID id.UserID, usage id.CrossSigningUsage, key id.Ed25519) error { func (s *Store) PutCrossSigningKey(userID id.UserID, usage id.CrossSigningUsage, key id.Ed25519) error {
s.log.Debug("storing crosssigning key of the %s", userID) s.log.Debug("storing crosssigning key of the %q", userID)
return s.s.PutCrossSigningKey(userID, usage, key) return s.s.PutCrossSigningKey(userID, usage, key)
} }
// GetCrossSigningKeys retrieves a user's stored cross-signing keys. // GetCrossSigningKeys retrieves a user's stored cross-signing keys.
func (s *Store) GetCrossSigningKeys(userID id.UserID) (map[id.CrossSigningUsage]id.CrossSigningKey, error) { func (s *Store) GetCrossSigningKeys(userID id.UserID) (map[id.CrossSigningUsage]id.CrossSigningKey, error) {
s.log.Debug("loading crosssigning keys of the %s", userID) s.log.Debug("loading crosssigning keys of the %q", userID)
return s.s.GetCrossSigningKeys(userID) return s.s.GetCrossSigningKeys(userID)
} }
@@ -209,6 +209,6 @@ func (s *Store) IsKeySignedBy(userID id.UserID, key id.Ed25519, signerID id.User
// DropSignaturesByKey deletes the signatures made by the given user and key from the store. It returns the number of signatures deleted. // DropSignaturesByKey deletes the signatures made by the given user and key from the store. It returns the number of signatures deleted.
func (s *Store) DropSignaturesByKey(userID id.UserID, key id.Ed25519) (int64, error) { func (s *Store) DropSignaturesByKey(userID id.UserID, key id.Ed25519) (int64, error) {
s.log.Debug("removing signatures by the %s/%s", userID, key) s.log.Debug("removing signatures by the %q/%q", userID, key)
return s.s.DropSignaturesByKey(userID, key) return s.s.DropSignaturesByKey(userID, key)
} }

View File

@@ -20,7 +20,7 @@ func (s *Store) IsEncrypted(roomID id.RoomID) bool {
return false return false
} }
s.log.Debug("checking if room %s is encrypted", roomID) s.log.Debug("checking if room %q is encrypted", roomID)
return s.GetEncryptionEvent(roomID) != nil return s.GetEncryptionEvent(roomID) != nil
} }
@@ -79,7 +79,7 @@ func (s *Store) SetEncryptionEvent(evt *event.Event) {
// SetMembership saves room members // SetMembership saves room members
func (s *Store) SetMembership(evt *event.Event) { func (s *Store) SetMembership(evt *event.Event) {
s.log.Debug("saving membership event for %s", evt.RoomID) s.log.Debug("saving membership event for %q", evt.RoomID)
tx, err := s.db.Begin() tx, err := s.db.Begin()
if err != nil { if err != nil {
s.log.Error("cannot begin transaction: %v", err) s.log.Error("cannot begin transaction: %v", err)
@@ -127,7 +127,7 @@ func (s *Store) SetMembership(evt *event.Event) {
// GetRoomMembers ... // GetRoomMembers ...
func (s *Store) GetRoomMembers(roomID id.RoomID) []id.UserID { func (s *Store) GetRoomMembers(roomID id.RoomID) []id.UserID {
s.log.Debug("loading room members of %s", roomID) s.log.Debug("loading room members of %q", roomID)
query := "SELECT user_id FROM room_members WHERE room_id = $1" query := "SELECT user_id FROM room_members WHERE room_id = $1"
rows, err := s.db.Query(query, roomID) rows, err := s.db.Query(query, roomID)
users := make([]id.UserID, 0) users := make([]id.UserID, 0)
@@ -148,7 +148,7 @@ func (s *Store) GetRoomMembers(roomID id.RoomID) []id.UserID {
// SaveSession to DB // SaveSession to DB
func (s *Store) SaveSession(userID id.UserID, deviceID id.DeviceID, accessToken string) { func (s *Store) SaveSession(userID id.UserID, deviceID id.DeviceID, accessToken string) {
s.log.Debug("saving session credentials of %s/%s", userID, deviceID) s.log.Debug("saving session credentials of %q/%q", userID, deviceID)
tx, err := s.db.Begin() tx, err := s.db.Begin()
if err != nil { if err != nil {
s.log.Error("cannot begin transaction: %v", err) s.log.Error("cannot begin transaction: %v", err)

View File

@@ -16,7 +16,7 @@ func (s *Store) GetEncryptionEvent(roomID id.RoomID) *event.EncryptionEventConte
if !s.encryption { if !s.encryption {
return nil return nil
} }
s.log.Debug("finding encryption event of %s", roomID) s.log.Debug("finding encryption event of %q", roomID)
query := "SELECT encryption_event FROM rooms WHERE room_id = $1" query := "SELECT encryption_event FROM rooms WHERE room_id = $1"
row := s.db.QueryRow(query, roomID) row := s.db.QueryRow(query, roomID)
@@ -28,7 +28,7 @@ func (s *Store) GetEncryptionEvent(roomID id.RoomID) *event.EncryptionEventConte
} }
var encryptionEvent event.EncryptionEventContent var encryptionEvent event.EncryptionEventContent
if err := json.Unmarshal(encryptionEventJSON, &encryptionEvent); err != nil { if err := json.Unmarshal(encryptionEventJSON, &encryptionEvent); err != nil {
s.log.Debug("cannot unmarshal encryption event: %s", err) s.log.Debug("cannot unmarshal encryption event: %q", err)
return nil return nil
} }
@@ -40,12 +40,12 @@ func (s *Store) FindSharedRooms(userID id.UserID) []id.RoomID {
if !s.encryption { if !s.encryption {
return nil return nil
} }
s.log.Debug("loading shared rooms for %s", userID) s.log.Debug("loading shared rooms for %q", userID)
query := "SELECT room_id FROM room_members WHERE user_id = $1" query := "SELECT room_id FROM room_members WHERE user_id = $1"
rows, queryErr := s.db.Query(query, userID) rows, queryErr := s.db.Query(query, userID)
rooms := make([]id.RoomID, 0) rooms := make([]id.RoomID, 0)
if queryErr != nil { if queryErr != nil {
s.log.Error("cannot load room members: %s", queryErr) s.log.Error("cannot load room members: %q", queryErr)
return rooms return rooms
} }
defer rows.Close() defer rows.Close()

View File

@@ -10,7 +10,7 @@ import (
// SaveFilterID to DB // SaveFilterID to DB
func (s *Store) SaveFilterID(userID id.UserID, filterID string) { func (s *Store) SaveFilterID(userID id.UserID, filterID string) {
s.log.Debug("saving filter ID %s for %s", filterID, userID) s.log.Debug("saving filter ID %q for %q", filterID, userID)
tx, err := s.db.Begin() tx, err := s.db.Begin()
if err != nil { if err != nil {
s.log.Error("cannot begin transaction: %v", err) s.log.Error("cannot begin transaction: %v", err)
@@ -52,12 +52,12 @@ func (s *Store) SaveFilterID(userID id.UserID, filterID string) {
// LoadFilterID from DB // LoadFilterID from DB
func (s *Store) LoadFilterID(userID id.UserID) string { func (s *Store) LoadFilterID(userID id.UserID) string {
s.log.Debug("loading filter ID for %s", userID) s.log.Debug("loading filter ID for %q", userID)
query := "SELECT filter_id FROM user_filter_ids WHERE user_id = $1" query := "SELECT filter_id FROM user_filter_ids WHERE user_id = $1"
row := s.db.QueryRow(query, userID) row := s.db.QueryRow(query, userID)
var filterID string var filterID string
if err := row.Scan(&filterID); err != nil { if err := row.Scan(&filterID); err != nil {
s.log.Error("cannot load filter ID: %s", err) s.log.Error("cannot load filter ID: %q", err)
return "" return ""
} }
return filterID return filterID
@@ -65,7 +65,7 @@ func (s *Store) LoadFilterID(userID id.UserID) string {
// SaveNextBatch to DB // SaveNextBatch to DB
func (s *Store) SaveNextBatch(userID id.UserID, nextBatchToken string) { func (s *Store) SaveNextBatch(userID id.UserID, nextBatchToken string) {
s.log.Debug("saving next batch token for %s", userID) s.log.Debug("saving next batch token for %q", userID)
tx, err := s.db.Begin() tx, err := s.db.Begin()
if err != nil { if err != nil {
s.log.Error("cannot begin transaction: %v", err) s.log.Error("cannot begin transaction: %v", err)
@@ -103,7 +103,7 @@ func (s *Store) SaveNextBatch(userID id.UserID, nextBatchToken string) {
// LoadNextBatch from DB // LoadNextBatch from DB
func (s *Store) LoadNextBatch(userID id.UserID) string { func (s *Store) LoadNextBatch(userID id.UserID) string {
s.log.Debug("loading next batch token for %s", userID) s.log.Debug("loading next batch token for %q", userID)
query := "SELECT next_batch_token FROM user_batch_tokens WHERE user_id = $1" query := "SELECT next_batch_token FROM user_batch_tokens WHERE user_id = $1"
row := s.db.QueryRow(query, userID) row := s.db.QueryRow(query, userID)
var batchToken string var batchToken string
@@ -116,11 +116,11 @@ func (s *Store) LoadNextBatch(userID id.UserID) string {
// SaveRoom to DB, not implemented // SaveRoom to DB, not implemented
func (s *Store) SaveRoom(room *mautrix.Room) { func (s *Store) SaveRoom(room *mautrix.Room) {
s.log.Debug("saving room %s (stub, not implemented)", room.ID) s.log.Debug("saving room %q (stub, not implemented)", room.ID)
} }
// LoadRoom from DB, not implemented // LoadRoom from DB, not implemented
func (s *Store) LoadRoom(roomID id.RoomID) *mautrix.Room { func (s *Store) LoadRoom(roomID id.RoomID) *mautrix.Room {
s.log.Debug("loading room %s (stub, not implemented)", roomID) s.log.Debug("loading room %q (stub, not implemented)", roomID)
return mautrix.NewRoom(roomID) return mautrix.NewRoom(roomID)
} }

View File

@@ -78,7 +78,7 @@ func (l *Linkpearl) tryJoin(roomID id.RoomID, retry int) {
_, err := l.api.JoinRoomByID(roomID) _, err := l.api.JoinRoomByID(roomID)
if err != nil { if err != nil {
l.log.Error("cannot join the room %s: %v", roomID, err) l.log.Error("cannot join the room %q: %v", roomID, err)
time.Sleep(5 * time.Second) time.Sleep(5 * time.Second)
l.log.Debug("trying to join again (%d/%d)", retry+1, l.maxretries) l.log.Debug("trying to join again (%d/%d)", retry+1, l.maxretries)
l.tryJoin(roomID, retry+1) l.tryJoin(roomID, retry+1)

View File

@@ -10,6 +10,7 @@ import (
"fmt" "fmt"
"io" "io"
"math" "math"
"strings"
"sync" "sync"
_ "crypto/sha1" _ "crypto/sha1"
@@ -118,6 +119,20 @@ func algorithmsForKeyFormat(keyFormat string) []string {
} }
} }
// supportedPubKeyAuthAlgos specifies the supported client public key
// authentication algorithms. Note that this doesn't include certificate types
// since those use the underlying algorithm. This list is sent to the client if
// it supports the server-sig-algs extension. Order is irrelevant.
var supportedPubKeyAuthAlgos = []string{
KeyAlgoED25519,
KeyAlgoSKED25519, KeyAlgoSKECDSA256,
KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521,
KeyAlgoRSASHA256, KeyAlgoRSASHA512, KeyAlgoRSA,
KeyAlgoDSA,
}
var supportedPubKeyAuthAlgosList = strings.Join(supportedPubKeyAuthAlgos, ",")
// unexpectedMessageError results when the SSH message that we received didn't // unexpectedMessageError results when the SSH message that we received didn't
// match what we wanted. // match what we wanted.
func unexpectedMessageError(expected, got uint8) error { func unexpectedMessageError(expected, got uint8) error {

View File

@@ -615,7 +615,8 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error {
return err return err
} }
if t.sessionID == nil { firstKeyExchange := t.sessionID == nil
if firstKeyExchange {
t.sessionID = result.H t.sessionID = result.H
} }
result.SessionID = t.sessionID result.SessionID = t.sessionID
@@ -626,6 +627,24 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error {
if err = t.conn.writePacket([]byte{msgNewKeys}); err != nil { if err = t.conn.writePacket([]byte{msgNewKeys}); err != nil {
return err return err
} }
// On the server side, after the first SSH_MSG_NEWKEYS, send a SSH_MSG_EXT_INFO
// message with the server-sig-algs extension if the client supports it. See
// RFC 8308, Sections 2.4 and 3.1.
if !isClient && firstKeyExchange && contains(clientInit.KexAlgos, "ext-info-c") {
extInfo := &extInfoMsg{
NumExtensions: 1,
Payload: make([]byte, 0, 4+15+4+len(supportedPubKeyAuthAlgosList)),
}
extInfo.Payload = appendInt(extInfo.Payload, len("server-sig-algs"))
extInfo.Payload = append(extInfo.Payload, "server-sig-algs"...)
extInfo.Payload = appendInt(extInfo.Payload, len(supportedPubKeyAuthAlgosList))
extInfo.Payload = append(extInfo.Payload, supportedPubKeyAuthAlgosList...)
if err := t.conn.writePacket(Marshal(extInfo)); err != nil {
return err
}
}
if packet, err := t.conn.readPacket(); err != nil { if packet, err := t.conn.readPacket(); err != nil {
return err return err
} else if packet[0] != msgNewKeys { } else if packet[0] != msgNewKeys {

View File

@@ -291,15 +291,6 @@ func (s *connection) serverHandshake(config *ServerConfig) (*Permissions, error)
return perms, err return perms, err
} }
func isAcceptableAlgo(algo string) bool {
switch algo {
case KeyAlgoRSA, KeyAlgoRSASHA256, KeyAlgoRSASHA512, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoSKECDSA256, KeyAlgoED25519, KeyAlgoSKED25519,
CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoSKECDSA256v01, CertAlgoED25519v01, CertAlgoSKED25519v01:
return true
}
return false
}
func checkSourceAddress(addr net.Addr, sourceAddrs string) error { func checkSourceAddress(addr net.Addr, sourceAddrs string) error {
if addr == nil { if addr == nil {
return errors.New("ssh: no address known for client, but source-address match required") return errors.New("ssh: no address known for client, but source-address match required")
@@ -514,7 +505,7 @@ userAuthLoop:
return nil, parseError(msgUserAuthRequest) return nil, parseError(msgUserAuthRequest)
} }
algo := string(algoBytes) algo := string(algoBytes)
if !isAcceptableAlgo(algo) { if !contains(supportedPubKeyAuthAlgos, underlyingAlgo(algo)) {
authErr = fmt.Errorf("ssh: algorithm %q not accepted", algo) authErr = fmt.Errorf("ssh: algorithm %q not accepted", algo)
break break
} }
@@ -572,7 +563,7 @@ userAuthLoop:
// algorithm name that corresponds to algo with // algorithm name that corresponds to algo with
// sig.Format. This is usually the same, but // sig.Format. This is usually the same, but
// for certs, the names differ. // for certs, the names differ.
if !isAcceptableAlgo(sig.Format) { if !contains(supportedPubKeyAuthAlgos, sig.Format) {
authErr = fmt.Errorf("ssh: algorithm %q not accepted", sig.Format) authErr = fmt.Errorf("ssh: algorithm %q not accepted", sig.Format)
break break
} }

View File

@@ -1,3 +1,30 @@
## v0.12.3 (2022-11-16)
* **Breaking change:** Added logging for row iteration in the dbutil package.
This changes the return type of `Query` methods from `*sql.Rows` to a new
`dbutil.Rows` interface.
* Added flag to disable wrapping database upgrades in a transaction (e.g. to
allow setting `PRAGMA`s for advanced table mutations on SQLite).
* Deprecated `MessageEventContent.GetReplyTo` in favor of directly using
`RelatesTo.GetReplyTo`. RelatesTo methods are nil-safe, so checking if
RelatesTo is nil is not necessary for using those methods.
* Added wrapper for space hierarchyendpoint (thanks to [@mgcm] in [#100]).
* Added bridge config option to handle transactions asynchronously.
* Added separate channels for to-device events in appservice transaction
handler to avoid blocking to-device events behind normal events.
* Added `RelatesTo.GetNonFallbackReplyTo` utility method to get the reply event
ID, unless the reply is a thread fallback.
* Added `event.TextToHTML` as an utility method to HTML-escape a string and
replace newlines with `<br/>`.
* Added check to bridge encryption helper to make sure the e2ee keys are still
on the server. Synapse is known to sometimes lose keys randomly.
* Changed bridge crypto syncer to crash on `M_UNKNOWN_TOKEN` errors instead of
retrying forever pointlessly.
* Fixed verifying signatures of fallback one-time keys.
[@mgcm]: https://github.com/mgcm
[#100]: https://github.com/mautrix/go/pull/100
## v0.12.2 (2022-10-16) ## v0.12.2 (2022-10-16)
* Added utility method to redact bridge commands. * Added utility method to redact bridge commands.

View File

@@ -97,6 +97,7 @@ type AppService struct {
txnIDC *TransactionIDCache txnIDC *TransactionIDCache
Events chan *event.Event `yaml:"-"` Events chan *event.Event `yaml:"-"`
ToDeviceEvents chan *event.Event `yaml:"-"`
DeviceLists chan *mautrix.DeviceLists `yaml:"-"` DeviceLists chan *mautrix.DeviceLists `yaml:"-"`
OTKCounts chan *mautrix.OTKCount `yaml:"-"` OTKCounts chan *mautrix.OTKCount `yaml:"-"`
QueryHandler QueryHandler `yaml:"-"` QueryHandler QueryHandler `yaml:"-"`
@@ -275,6 +276,7 @@ func (as *AppService) BotClient() *mautrix.Client {
// Init initializes the logger and loads the registration of this appservice. // Init initializes the logger and loads the registration of this appservice.
func (as *AppService) Init() (bool, error) { func (as *AppService) Init() (bool, error) {
as.Events = make(chan *event.Event, EventChannelSize) as.Events = make(chan *event.Event, EventChannelSize)
as.ToDeviceEvents = make(chan *event.Event, EventChannelSize)
as.OTKCounts = make(chan *mautrix.OTKCount, OTKChannelSize) as.OTKCounts = make(chan *mautrix.OTKCount, OTKChannelSize)
as.DeviceLists = make(chan *mautrix.DeviceLists, EventChannelSize) as.DeviceLists = make(chan *mautrix.DeviceLists, EventChannelSize)
as.QueryHandler = &QueryHandlerStub{} as.QueryHandler = &QueryHandlerStub{}

View File

@@ -137,12 +137,22 @@ func (ep *EventProcessor) Dispatch(evt *event.Event) {
} }
} }
} }
func (ep *EventProcessor) startEvents() {
func (ep *EventProcessor) Start() {
for { for {
select { select {
case evt := <-ep.as.Events: case evt := <-ep.as.Events:
ep.Dispatch(evt) ep.Dispatch(evt)
case <-ep.stop:
return
}
}
}
func (ep *EventProcessor) startEncryption() {
for {
select {
case evt := <-ep.as.ToDeviceEvents:
ep.Dispatch(evt)
case otk := <-ep.as.OTKCounts: case otk := <-ep.as.OTKCounts:
ep.DispatchOTK(otk) ep.DispatchOTK(otk)
case dl := <-ep.as.DeviceLists: case dl := <-ep.as.DeviceLists:
@@ -153,6 +163,11 @@ func (ep *EventProcessor) Start() {
} }
} }
func (ep *EventProcessor) Stop() { func (ep *EventProcessor) Start() {
ep.stop <- struct{}{} go ep.startEvents()
go ep.startEncryption()
}
func (ep *EventProcessor) Stop() {
close(ep.stop)
} }

View File

@@ -206,14 +206,20 @@ func (as *AppService) handleEvents(evts []*event.Event, defaultTypeClass event.T
} }
if evt.Type.IsState() { if evt.Type.IsState() {
// TODO remove this check after https://github.com/matrix-org/synapse/pull/11265 // TODO remove this check after making sure the log doesn't happen
historical, ok := evt.Content.Raw["org.matrix.msc2716.historical"].(bool) historical, ok := evt.Content.Raw["org.matrix.msc2716.historical"].(bool)
if !ok || !historical { if ok && historical {
as.Log.Warnfln("Received historical state event %s (%s/%s)", evt.ID, evt.Type.Type, evt.GetStateKey())
} else {
as.UpdateState(evt) as.UpdateState(evt)
} }
} }
if evt.Type.Class == event.ToDeviceEventType {
as.ToDeviceEvents <- evt
} else {
as.Events <- evt as.Events <- evt
} }
}
} }
// GetRoom handles a /rooms GET call from the homeserver. // GetRoom handles a /rooms GET call from the homeserver.

View File

@@ -59,15 +59,16 @@ type AppserviceConfig struct {
HSToken string `yaml:"hs_token"` HSToken string `yaml:"hs_token"`
EphemeralEvents bool `yaml:"ephemeral_events"` EphemeralEvents bool `yaml:"ephemeral_events"`
AsyncTransactions bool `yaml:"async_transactions"`
} }
func (config *BaseConfig) MakeUserIDRegex() *regexp.Regexp { func (config *BaseConfig) MakeUserIDRegex(matcher string) *regexp.Regexp {
usernamePlaceholder := util.RandomString(16) usernamePlaceholder := strings.ToLower(util.RandomString(16))
usernameTemplate := fmt.Sprintf("@%s:%s", usernameTemplate := fmt.Sprintf("@%s:%s",
config.Bridge.FormatUsername(usernamePlaceholder), config.Bridge.FormatUsername(usernamePlaceholder),
config.Homeserver.Domain) config.Homeserver.Domain)
usernameTemplate = regexp.QuoteMeta(usernameTemplate) usernameTemplate = regexp.QuoteMeta(usernameTemplate)
usernameTemplate = strings.Replace(usernameTemplate, usernamePlaceholder, ".+", 1) usernameTemplate = strings.Replace(usernameTemplate, usernamePlaceholder, matcher, 1)
usernameTemplate = fmt.Sprintf("^%s$", usernameTemplate) usernameTemplate = fmt.Sprintf("^%s$", usernameTemplate)
return regexp.MustCompile(usernameTemplate) return regexp.MustCompile(usernameTemplate)
} }
@@ -84,7 +85,7 @@ func (config *BaseConfig) GenerateRegistration() *appservice.Registration {
regexp.QuoteMeta(config.AppService.Bot.Username), regexp.QuoteMeta(config.AppService.Bot.Username),
regexp.QuoteMeta(config.Homeserver.Domain))) regexp.QuoteMeta(config.Homeserver.Domain)))
registration.Namespaces.UserIDs.Register(botRegex, true) registration.Namespaces.UserIDs.Register(botRegex, true)
registration.Namespaces.UserIDs.Register(config.MakeUserIDRegex(), true) registration.Namespaces.UserIDs.Register(config.MakeUserIDRegex(".*"), true)
return registration return registration
} }
@@ -230,6 +231,7 @@ func doUpgrade(helper *up.Helper) {
helper.Copy(up.Str, "appservice", "bot", "displayname") helper.Copy(up.Str, "appservice", "bot", "displayname")
helper.Copy(up.Str, "appservice", "bot", "avatar") helper.Copy(up.Str, "appservice", "bot", "avatar")
helper.Copy(up.Bool, "appservice", "ephemeral_events") helper.Copy(up.Bool, "appservice", "ephemeral_events")
helper.Copy(up.Bool, "appservice", "async_transactions")
helper.Copy(up.Str, "appservice", "as_token") helper.Copy(up.Str, "appservice", "as_token")
helper.Copy(up.Str, "appservice", "hs_token") helper.Copy(up.Str, "appservice", "hs_token")

View File

@@ -1468,6 +1468,18 @@ func (cli *Client) JoinedRooms() (resp *RespJoinedRooms, err error) {
return return
} }
// Hierarchy returns a list of rooms that are in the room's hierarchy. See https://spec.matrix.org/v1.4/client-server-api/#get_matrixclientv1roomsroomidhierarchy
//
// The hierarchy API is provided to walk the space tree and discover the rooms with their aesthetic details. works in a depth-first manner:
// when it encounters another space as a child it recurses into that space before returning non-space children.
//
// The second function parameter specifies query parameters to limit the response. No query parameters will be added if it's nil.
func (cli *Client) Hierarchy(roomID id.RoomID, req *ReqHierarchy) (resp *RespHierarchy, err error) {
urlPath := cli.BuildURLWithQuery(ClientURLPath{"v1", "rooms", roomID, "hierarchy"}, req.Query())
_, err = cli.MakeRequest(http.MethodGet, urlPath, nil, &resp)
return
}
// Messages returns a list of message and state events for a room. It uses // Messages returns a list of message and state events for a room. It uses
// pagination query parameters to paginate history in the room. // pagination query parameters to paginate history in the room.
// See https://spec.matrix.org/v1.2/client-server-api/#get_matrixclientv3roomsroomidmessages // See https://spec.matrix.org/v1.2/client-server-api/#get_matrixclientv3roomsroomidmessages
@@ -1760,6 +1772,9 @@ func (cli *Client) BatchSend(roomID id.RoomID, req *ReqBatchSend) (resp *RespBat
if req.BeeperNewMessages { if req.BeeperNewMessages {
query["com.beeper.new_messages"] = "true" query["com.beeper.new_messages"] = "true"
} }
if req.BeeperMarkReadBy != "" {
query["com.beeper.mark_read_by"] = req.BeeperMarkReadBy.String()
}
if len(req.BatchID) > 0 { if len(req.BatchID) > 0 {
query["batch_id"] = req.BatchID.String() query["batch_id"] = req.BatchID.String()
} }

View File

@@ -97,7 +97,7 @@ func (mach *OlmMachine) createOutboundSessions(input map[id.UserID]map[id.Device
continue continue
} }
identity := input[userID][deviceID] identity := input[userID][deviceID]
if ok, err := olm.VerifySignatureJSON(oneTimeKey, userID, deviceID.String(), identity.SigningKey); err != nil { if ok, err := olm.VerifySignatureJSON(oneTimeKey.RawData, userID, deviceID.String(), identity.SigningKey); err != nil {
mach.Log.Error("Failed to verify signature for %s of %s: %v", deviceID, userID, err) mach.Log.Error("Failed to verify signature for %s of %s: %v", deviceID, userID, err)
} else if !ok { } else if !ok {
mach.Log.Warn("Invalid signature for %s of %s", deviceID, userID) mach.Log.Warn("Invalid signature for %s of %s", deviceID, userID)

View File

@@ -445,15 +445,20 @@ func (mach *OlmMachine) WaitForSession(roomID id.RoomID, senderKey id.SenderKey,
mach.keyWaitersLock.Lock() mach.keyWaitersLock.Lock()
ch, ok := mach.keyWaiters[sessionID] ch, ok := mach.keyWaiters[sessionID]
if !ok { if !ok {
ch := make(chan struct{}) ch = make(chan struct{})
mach.keyWaiters[sessionID] = ch mach.keyWaiters[sessionID] = ch
} }
mach.keyWaitersLock.Unlock() mach.keyWaitersLock.Unlock()
// Handle race conditions where a session appears between the failed decryption and WaitForSession call.
sess, err := mach.CryptoStore.GetGroupSession(roomID, senderKey, sessionID)
if sess != nil || errors.Is(err, ErrGroupSessionWithheld) {
return true
}
select { select {
case <-ch: case <-ch:
return true return true
case <-time.After(timeout): case <-time.After(timeout):
sess, err := mach.CryptoStore.GetGroupSession(roomID, senderKey, sessionID) sess, err = mach.CryptoStore.GetGroupSession(roomID, senderKey, sessionID)
// Check if the session somehow appeared in the store without telling us // Check if the session somehow appeared in the store without telling us
// We accept withheld sessions as received, as then the decryption attempt will show the error. // We accept withheld sessions as received, as then the decryption attempt will show the error.
return sess != nil || errors.Is(err, ErrGroupSessionWithheld) return sess != nil || errors.Is(err, ErrGroupSessionWithheld)

View File

@@ -107,10 +107,14 @@ func (u *Utility) VerifySignature(message string, key id.Ed25519, signature stri
// https://matrix.org/speculator/spec/drafts%2Fe2e/appendices.html#signing-json // https://matrix.org/speculator/spec/drafts%2Fe2e/appendices.html#signing-json
// If the _obj is a struct, the `json` tags will be honored. // If the _obj is a struct, the `json` tags will be honored.
func (u *Utility) VerifySignatureJSON(obj interface{}, userID id.UserID, keyName string, key id.Ed25519) (bool, error) { func (u *Utility) VerifySignatureJSON(obj interface{}, userID id.UserID, keyName string, key id.Ed25519) (bool, error) {
objJSON, err := json.Marshal(obj) var err error
objJSON, ok := obj.(json.RawMessage)
if !ok {
objJSON, err = json.Marshal(obj)
if err != nil { if err != nil {
return false, err return false, err
} }
}
sig := gjson.GetBytes(objJSON, util.GJSONPath("signatures", string(userID), fmt.Sprintf("ed25519:%s", keyName))) sig := gjson.GetBytes(objJSON, util.GJSONPath("signatures", string(userID), fmt.Sprintf("ed25519:%s", keyName)))
if !sig.Exists() || sig.Type != gjson.String { if !sig.Exists() || sig.Type != gjson.String {
return false, SignatureNotFound return false, SignatureNotFound

View File

@@ -302,7 +302,7 @@ func (store *SQLCryptoStore) GetWithheldGroupSession(roomID id.RoomID, senderKey
}, nil }, nil
} }
func (store *SQLCryptoStore) scanGroupSessionList(rows *sql.Rows) (result []*InboundGroupSession, err error) { func (store *SQLCryptoStore) scanGroupSessionList(rows dbutil.Rows) (result []*InboundGroupSession, err error) {
for rows.Next() { for rows.Next() {
var roomID id.RoomID var roomID id.RoomID
var signingKey, senderKey, forwardingChains sql.NullString var signingKey, senderKey, forwardingChains sql.NullString
@@ -577,7 +577,7 @@ func (store *SQLCryptoStore) PutDevices(userID id.UserID, devices map[id.DeviceI
// FilterTrackedUsers finds all the user IDs out of the given ones for which the database contains identity information. // FilterTrackedUsers finds all the user IDs out of the given ones for which the database contains identity information.
func (store *SQLCryptoStore) FilterTrackedUsers(users []id.UserID) ([]id.UserID, error) { func (store *SQLCryptoStore) FilterTrackedUsers(users []id.UserID) ([]id.UserID, error) {
var rows *sql.Rows var rows dbutil.Rows
var err error var err error
if store.DB.Dialect == dbutil.Postgres && PostgresArrayWrapper != nil { if store.DB.Dialect == dbutil.Postgres && PostgresArrayWrapper != nil {
rows, err = store.DB.Query("SELECT user_id FROM crypto_tracked_user WHERE user_id = ANY($1)", PostgresArrayWrapper(users)) rows, err = store.DB.Query("SELECT user_id FROM crypto_tracked_user WHERE user_id = ANY($1)", PostgresArrayWrapper(users))

View File

@@ -22,7 +22,7 @@ const VersionTableName = "crypto_version"
var fs embed.FS var fs embed.FS
func init() { func init() {
Table.Register(-1, 3, "Unsupported version", func(tx dbutil.Transaction, database *dbutil.Database) error { Table.Register(-1, 3, "Unsupported version", false, func(tx dbutil.Execable, database *dbutil.Database) error {
return fmt.Errorf("upgrading from versions 1 and 2 of the crypto store is no longer supported in mautrix-go v0.12+") return fmt.Errorf("upgrading from versions 1 and 2 of the crypto store is no longer supported in mautrix-go v0.12+")
}) })
Table.RegisterFS(fs) Table.RegisterFS(fs)

View File

@@ -58,6 +58,8 @@ var (
// The client attempted to join a room that has a version the server does not support. // The client attempted to join a room that has a version the server does not support.
// Inspect the room_version property of the error response for the room's version. // Inspect the room_version property of the error response for the room's version.
MIncompatibleRoomVersion = RespError{ErrCode: "M_INCOMPATIBLE_ROOM_VERSION"} MIncompatibleRoomVersion = RespError{ErrCode: "M_INCOMPATIBLE_ROOM_VERSION"}
// The client specified a parameter that has the wrong value.
MInvalidParam = RespError{ErrCode: "M_INVALID_PARAM"}
) )
// HTTPError An HTTP Error response, which may wrap an underlying native Go Error. // HTTPError An HTTP Error response, which may wrap an underlying native Go Error.

View File

@@ -127,6 +127,7 @@ type StrippedState struct {
Content Content `json:"content"` Content Content `json:"content"`
Type Type `json:"type"` Type Type `json:"type"`
StateKey string `json:"state_key"` StateKey string `json:"state_key"`
Sender id.UserID `json:"sender"`
} }
type Unsigned struct { type Unsigned struct {

View File

@@ -138,9 +138,13 @@ func (content *MessageEventContent) SetEdit(original id.EventID) {
} }
} }
func TextToHTML(text string) string {
return strings.ReplaceAll(html.EscapeString(text), "\n", "<br/>")
}
func (content *MessageEventContent) EnsureHasHTML() { func (content *MessageEventContent) EnsureHasHTML() {
if len(content.FormattedBody) == 0 || content.Format != FormatHTML { if len(content.FormattedBody) == 0 || content.Format != FormatHTML {
content.FormattedBody = strings.ReplaceAll(html.EscapeString(content.Body), "\n", "<br/>") content.FormattedBody = TextToHTML(content.Body)
content.Format = FormatHTML content.Format = FormatHTML
} }
} }

View File

@@ -70,6 +70,13 @@ func (rel *RelatesTo) GetReplyTo() id.EventID {
return "" return ""
} }
func (rel *RelatesTo) GetNonFallbackReplyTo() id.EventID {
if rel != nil && rel.InReplyTo != nil && !rel.IsFallingBack {
return rel.InReplyTo.EventID
}
return ""
}
func (rel *RelatesTo) GetAnnotationID() id.EventID { func (rel *RelatesTo) GetAnnotationID() id.EventID {
if rel != nil && rel.Type == RelAnnotation { if rel != nil && rel.Type == RelAnnotation {
return rel.EventID return rel.EventID

View File

@@ -35,7 +35,7 @@ func TrimReplyFallbackText(text string) string {
} }
func (content *MessageEventContent) RemoveReplyFallback() { func (content *MessageEventContent) RemoveReplyFallback() {
if len(content.GetReplyTo()) > 0 && !content.replyFallbackRemoved { if len(content.RelatesTo.GetReplyTo()) > 0 && !content.replyFallbackRemoved {
if content.Format == FormatHTML { if content.Format == FormatHTML {
content.FormattedBody = TrimReplyFallbackHTML(content.FormattedBody) content.FormattedBody = TrimReplyFallbackHTML(content.FormattedBody)
} }
@@ -44,11 +44,9 @@ func (content *MessageEventContent) RemoveReplyFallback() {
} }
} }
// Deprecated: RelatesTo methods are nil-safe, so RelatesTo.GetReplyTo can be used directly
func (content *MessageEventContent) GetReplyTo() id.EventID { func (content *MessageEventContent) GetReplyTo() id.EventID {
if content.RelatesTo != nil {
return content.RelatesTo.GetReplyTo() return content.RelatesTo.GetReplyTo()
}
return ""
} }
const ReplyFormat = `<mx-reply><blockquote><a href="https://matrix.to/#/%s/%s">In reply to</a> <a href="https://matrix.to/#/%s">%s</a><br>%s</blockquote></mx-reply>` const ReplyFormat = `<mx-reply><blockquote><a href="https://matrix.to/#/%s/%s">In reply to</a> <a href="https://matrix.to/#/%s">%s</a><br>%s</blockquote></mx-reply>`

View File

@@ -2,6 +2,7 @@ package mautrix
import ( import (
"encoding/json" "encoding/json"
"strconv"
"maunium.net/go/mautrix/event" "maunium.net/go/mautrix/event"
"maunium.net/go/mautrix/id" "maunium.net/go/mautrix/id"
@@ -173,9 +174,14 @@ type ReqAliasCreate struct {
type OneTimeKey struct { type OneTimeKey struct {
Key id.Curve25519 `json:"key"` Key id.Curve25519 `json:"key"`
IsSigned bool `json:"-"` Fallback bool `json:"fallback,omitempty"`
Signatures Signatures `json:"signatures,omitempty"` Signatures Signatures `json:"signatures,omitempty"`
Unsigned map[string]interface{} `json:"unsigned,omitempty"` Unsigned map[string]any `json:"unsigned,omitempty"`
IsSigned bool `json:"-"`
// Raw data in the one-time key. This must be used for signature verification to ensure unrecognized fields
// aren't thrown away (because that would invalidate the signature).
RawData json.RawMessage `json:"-"`
} }
type serializableOTK OneTimeKey type serializableOTK OneTimeKey
@@ -188,6 +194,7 @@ func (otk *OneTimeKey) UnmarshalJSON(data []byte) (err error) {
otk.IsSigned = false otk.IsSigned = false
} else { } else {
err = json.Unmarshal(data, (*serializableOTK)(otk)) err = json.Unmarshal(data, (*serializableOTK)(otk))
otk.RawData = data
otk.IsSigned = true otk.IsSigned = true
} }
return err return err
@@ -320,6 +327,7 @@ type ReqBatchSend struct {
BatchID id.BatchID `json:"-"` BatchID id.BatchID `json:"-"`
BeeperNewMessages bool `json:"-"` BeeperNewMessages bool `json:"-"`
BeeperMarkReadBy id.UserID `json:"-"`
StateEventsAtStart []*event.Event `json:"state_events_at_start"` StateEventsAtStart []*event.Event `json:"state_events_at_start"`
Events []*event.Event `json:"events"` Events []*event.Event `json:"events"`
@@ -334,3 +342,41 @@ type ReqSetReadMarkers struct {
BeeperReadPrivateExtra interface{} `json:"com.beeper.read.private.extra"` BeeperReadPrivateExtra interface{} `json:"com.beeper.read.private.extra"`
BeeperFullyReadExtra interface{} `json:"com.beeper.fully_read.extra"` BeeperFullyReadExtra interface{} `json:"com.beeper.fully_read.extra"`
} }
// ReqHierarchy contains the parameters for https://spec.matrix.org/v1.4/client-server-api/#get_matrixclientv1roomsroomidhierarchy
//
// As it's a GET method, there is no JSON body, so this is only query parameters.
type ReqHierarchy struct {
// A pagination token from a previous Hierarchy call.
// If specified, max_depth and suggested_only cannot be changed from the first request.
From string
// Limit for the maximum number of rooms to include per response.
// The server will apply a default value if a limit isn't provided.
Limit int
// Limit for how far to go into the space. When reached, no further child rooms will be returned.
// The server will apply a default value if a max depth isn't provided.
MaxDepth *int
// Flag to indicate whether the server should only consider suggested rooms.
// Suggested rooms are annotated in their m.space.child event contents.
SuggestedOnly bool
}
func (req *ReqHierarchy) Query() map[string]string {
query := map[string]string{}
if req == nil {
return query
}
if req.From != "" {
query["from"] = req.From
}
if req.Limit > 0 {
query["limit"] = strconv.Itoa(req.Limit)
}
if req.MaxDepth != nil {
query["max_depth"] = strconv.Itoa(*req.MaxDepth)
}
if req.SuggestedOnly {
query["suggested_only"] = "true"
}
return query
}

View File

@@ -12,6 +12,7 @@ import (
"maunium.net/go/mautrix/event" "maunium.net/go/mautrix/event"
"maunium.net/go/mautrix/id" "maunium.net/go/mautrix/id"
"maunium.net/go/mautrix/util" "maunium.net/go/mautrix/util"
"maunium.net/go/mautrix/util/jsontime"
) )
// RespWhoami is the JSON response for https://spec.matrix.org/v1.2/client-server-api/#get_matrixclientv3accountwhoami // RespWhoami is the JSON response for https://spec.matrix.org/v1.2/client-server-api/#get_matrixclientv3accountwhoami
@@ -514,3 +515,28 @@ func (vers *CapRoomVersions) IsAvailable(version string) bool {
_, available := vers.Available[version] _, available := vers.Available[version]
return available return available
} }
// RespHierarchy is the JSON response for https://spec.matrix.org/v1.4/client-server-api/#get_matrixclientv1roomsroomidhierarchy
type RespHierarchy struct {
NextBatch string `json:"next_batch,omitempty"`
Rooms []ChildRoomsChunk `json:"rooms"`
}
type ChildRoomsChunk struct {
AvatarURL id.ContentURI `json:"avatar_url,omitempty"`
CanonicalAlias id.RoomAlias `json:"canonical_alias,omitempty"`
ChildrenState []StrippedStateWithTime `json:"children_state"`
GuestCanJoin bool `json:"guest_can_join"`
JoinRule event.JoinRule `json:"join_rule,omitempty"`
Name string `json:"name,omitempty"`
NumJoinedMembers int `json:"num_joined_members"`
RoomID id.RoomID `json:"room_id"`
RoomType event.RoomType `json:"room_type"`
Topic string `json:"topic,omitempty"`
WorldReadble bool `json:"world_readable"`
}
type StrippedStateWithTime struct {
event.StrippedState
Timestamp jsontime.UnixMilli `json:"origin_server_ts"`
}

View File

@@ -15,7 +15,7 @@ import (
// LoggingExecable is a wrapper for anything with database Exec methods (i.e. sql.Conn, sql.DB and sql.Tx) // LoggingExecable is a wrapper for anything with database Exec methods (i.e. sql.Conn, sql.DB and sql.Tx)
// that can preprocess queries (e.g. replacing $ with ? on SQLite) and log query durations. // that can preprocess queries (e.g. replacing $ with ? on SQLite) and log query durations.
type LoggingExecable struct { type LoggingExecable struct {
UnderlyingExecable Execable UnderlyingExecable UnderlyingExecable
db *Database db *Database
} }
@@ -23,23 +23,30 @@ func (le *LoggingExecable) ExecContext(ctx context.Context, query string, args .
start := time.Now() start := time.Now()
query = le.db.mutateQuery(query) query = le.db.mutateQuery(query)
res, err := le.UnderlyingExecable.ExecContext(ctx, query, args...) res, err := le.UnderlyingExecable.ExecContext(ctx, query, args...)
le.db.Log.QueryTiming(ctx, "Exec", query, args, time.Since(start)) le.db.Log.QueryTiming(ctx, "Exec", query, args, -1, time.Since(start))
return res, err return res, err
} }
func (le *LoggingExecable) QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) { func (le *LoggingExecable) QueryContext(ctx context.Context, query string, args ...interface{}) (Rows, error) {
start := time.Now() start := time.Now()
query = le.db.mutateQuery(query) query = le.db.mutateQuery(query)
rows, err := le.UnderlyingExecable.QueryContext(ctx, query, args...) rows, err := le.UnderlyingExecable.QueryContext(ctx, query, args...)
le.db.Log.QueryTiming(ctx, "Query", query, args, time.Since(start)) le.db.Log.QueryTiming(ctx, "Query", query, args, -1, time.Since(start))
return rows, err return &LoggingRows{
ctx: ctx,
db: le.db,
query: query,
args: args,
rs: rows,
start: start,
}, err
} }
func (le *LoggingExecable) QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row { func (le *LoggingExecable) QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row {
start := time.Now() start := time.Now()
query = le.db.mutateQuery(query) query = le.db.mutateQuery(query)
row := le.UnderlyingExecable.QueryRowContext(ctx, query, args...) row := le.UnderlyingExecable.QueryRowContext(ctx, query, args...)
le.db.Log.QueryTiming(ctx, "QueryRow", query, args, time.Since(start)) le.db.Log.QueryTiming(ctx, "QueryRow", query, args, -1, time.Since(start))
return row return row
} }
@@ -47,7 +54,7 @@ func (le *LoggingExecable) Exec(query string, args ...interface{}) (sql.Result,
return le.ExecContext(context.Background(), query, args...) return le.ExecContext(context.Background(), query, args...)
} }
func (le *LoggingExecable) Query(query string, args ...interface{}) (*sql.Rows, error) { func (le *LoggingExecable) Query(query string, args ...interface{}) (Rows, error) {
return le.QueryContext(context.Background(), query, args...) return le.QueryContext(context.Background(), query, args...)
} }
@@ -66,7 +73,7 @@ type loggingDB struct {
func (ld *loggingDB) BeginTx(ctx context.Context, opts *sql.TxOptions) (*LoggingTxn, error) { func (ld *loggingDB) BeginTx(ctx context.Context, opts *sql.TxOptions) (*LoggingTxn, error) {
start := time.Now() start := time.Now()
tx, err := ld.db.RawDB.BeginTx(ctx, opts) tx, err := ld.db.RawDB.BeginTx(ctx, opts)
ld.db.Log.QueryTiming(ctx, "Begin", "", nil, time.Since(start)) ld.db.Log.QueryTiming(ctx, "Begin", "", nil, -1, time.Since(start))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -90,13 +97,76 @@ type LoggingTxn struct {
func (lt *LoggingTxn) Commit() error { func (lt *LoggingTxn) Commit() error {
start := time.Now() start := time.Now()
err := lt.UnderlyingTx.Commit() err := lt.UnderlyingTx.Commit()
lt.db.Log.QueryTiming(lt.ctx, "Commit", "", nil, time.Since(start)) lt.db.Log.QueryTiming(lt.ctx, "Commit", "", nil, -1, time.Since(start))
return err return err
} }
func (lt *LoggingTxn) Rollback() error { func (lt *LoggingTxn) Rollback() error {
start := time.Now() start := time.Now()
err := lt.UnderlyingTx.Rollback() err := lt.UnderlyingTx.Rollback()
lt.db.Log.QueryTiming(lt.ctx, "Rollback", "", nil, time.Since(start)) lt.db.Log.QueryTiming(lt.ctx, "Rollback", "", nil, -1, time.Since(start))
return err return err
} }
type LoggingRows struct {
ctx context.Context
db *Database
query string
args []interface{}
rs Rows
start time.Time
nrows int
}
func (lrs *LoggingRows) stopTiming() {
if !lrs.start.IsZero() {
lrs.db.Log.QueryTiming(lrs.ctx, "EndRows", lrs.query, lrs.args, lrs.nrows, time.Since(lrs.start))
lrs.start = time.Time{}
}
}
func (lrs *LoggingRows) Close() error {
err := lrs.rs.Close()
lrs.stopTiming()
return err
}
func (lrs *LoggingRows) ColumnTypes() ([]*sql.ColumnType, error) {
return lrs.rs.ColumnTypes()
}
func (lrs *LoggingRows) Columns() ([]string, error) {
return lrs.rs.Columns()
}
func (lrs *LoggingRows) Err() error {
return lrs.rs.Err()
}
func (lrs *LoggingRows) Next() bool {
hasNext := lrs.rs.Next()
if !hasNext {
lrs.stopTiming()
} else {
lrs.nrows++
}
return hasNext
}
func (lrs *LoggingRows) NextResultSet() bool {
hasNext := lrs.rs.NextResultSet()
if !hasNext {
lrs.stopTiming()
} else {
lrs.nrows++
}
return hasNext
}
func (lrs *LoggingRows) Scan(dest ...any) error {
return lrs.rs.Scan(dest...)
}

View File

@@ -40,13 +40,23 @@ func ParseDialect(engine string) (Dialect, error) {
switch strings.ToLower(engine) { switch strings.ToLower(engine) {
case "postgres", "postgresql": case "postgres", "postgresql":
return Postgres, nil return Postgres, nil
case "sqlite3", "sqlite", "litestream": case "sqlite3", "sqlite", "litestream", "sqlite3-fk-wal":
return SQLite, nil return SQLite, nil
default: default:
return DialectUnknown, fmt.Errorf("unknown dialect '%s'", engine) return DialectUnknown, fmt.Errorf("unknown dialect '%s'", engine)
} }
} }
type Rows interface {
Close() error
ColumnTypes() ([]*sql.ColumnType, error)
Columns() ([]string, error)
Err() error
Next() bool
NextResultSet() bool
Scan(...any) error
}
type Scannable interface { type Scannable interface {
Scan(...interface{}) error Scan(...interface{}) error
} }
@@ -54,19 +64,32 @@ type Scannable interface {
// Expected implementations of Scannable // Expected implementations of Scannable
var ( var (
_ Scannable = (*sql.Row)(nil) _ Scannable = (*sql.Row)(nil)
_ Scannable = (*sql.Rows)(nil) _ Scannable = (Rows)(nil)
) )
type ContextExecable interface { type UnderlyingContextExecable interface {
ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error)
QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error)
QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row
} }
type ContextExecable interface {
ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error)
QueryContext(ctx context.Context, query string, args ...interface{}) (Rows, error)
QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row
}
type UnderlyingExecable interface {
UnderlyingContextExecable
Exec(query string, args ...interface{}) (sql.Result, error)
Query(query string, args ...interface{}) (*sql.Rows, error)
QueryRow(query string, args ...interface{}) *sql.Row
}
type Execable interface { type Execable interface {
ContextExecable ContextExecable
Exec(query string, args ...interface{}) (sql.Result, error) Exec(query string, args ...interface{}) (sql.Result, error)
Query(query string, args ...interface{}) (*sql.Rows, error) Query(query string, args ...interface{}) (Rows, error)
QueryRow(query string, args ...interface{}) *sql.Row QueryRow(query string, args ...interface{}) *sql.Row
} }
@@ -78,11 +101,11 @@ type Transaction interface {
// Expected implementations of Execable // Expected implementations of Execable
var ( var (
_ Execable = (*sql.Tx)(nil) _ UnderlyingExecable = (*sql.Tx)(nil)
_ Execable = (*sql.DB)(nil) _ UnderlyingExecable = (*sql.DB)(nil)
_ Execable = (*LoggingExecable)(nil) _ Execable = (*LoggingExecable)(nil)
_ Transaction = (*LoggingTxn)(nil) _ Transaction = (*LoggingTxn)(nil)
_ ContextExecable = (*sql.Conn)(nil) _ UnderlyingContextExecable = (*sql.Conn)(nil)
) )
type Database struct { type Database struct {

View File

@@ -11,10 +11,10 @@ import (
) )
type DatabaseLogger interface { type DatabaseLogger interface {
QueryTiming(ctx context.Context, method, query string, args []interface{}, duration time.Duration) QueryTiming(ctx context.Context, method, query string, args []interface{}, nrows int, duration time.Duration)
WarnUnsupportedVersion(current, latest int) WarnUnsupportedVersion(current, latest int)
PrepareUpgrade(current, latest int) PrepareUpgrade(current, latest int)
DoUpgrade(from, to int, message string) DoUpgrade(from, to int, message string, txn bool)
// Deprecated: legacy warning method, return errors instead // Deprecated: legacy warning method, return errors instead
Warn(msg string, args ...interface{}) Warn(msg string, args ...interface{})
} }
@@ -25,10 +25,11 @@ var NoopLogger DatabaseLogger = &noopLogger{}
func (n noopLogger) WarnUnsupportedVersion(_, _ int) {} func (n noopLogger) WarnUnsupportedVersion(_, _ int) {}
func (n noopLogger) PrepareUpgrade(_, _ int) {} func (n noopLogger) PrepareUpgrade(_, _ int) {}
func (n noopLogger) DoUpgrade(_, _ int, _ string) {} func (n noopLogger) DoUpgrade(_, _ int, _ string, _ bool) {}
func (n noopLogger) Warn(msg string, args ...interface{}) {} func (n noopLogger) Warn(msg string, args ...interface{}) {}
func (n noopLogger) QueryTiming(_ context.Context, _, _ string, _ []interface{}, _ time.Duration) {} func (n noopLogger) QueryTiming(_ context.Context, _, _ string, _ []interface{}, _ int, _ time.Duration) {
}
type mauLogger struct { type mauLogger struct {
l maulogger.Logger l maulogger.Logger
@@ -46,11 +47,11 @@ func (m mauLogger) PrepareUpgrade(current, latest int) {
m.l.Infofln("Database currently on v%d, latest: v%d", current, latest) m.l.Infofln("Database currently on v%d, latest: v%d", current, latest)
} }
func (m mauLogger) DoUpgrade(from, to int, message string) { func (m mauLogger) DoUpgrade(from, to int, message string, _ bool) {
m.l.Infofln("Upgrading database from v%d to v%d: %s", from, to, message) m.l.Infofln("Upgrading database from v%d to v%d: %s", from, to, message)
} }
func (m mauLogger) QueryTiming(_ context.Context, method, query string, _ []interface{}, duration time.Duration) { func (m mauLogger) QueryTiming(_ context.Context, method, query string, _ []interface{}, _ int, duration time.Duration) {
if duration > 1*time.Second { if duration > 1*time.Second {
m.l.Warnfln("%s(%s) took %.3f seconds", method, query, duration.Seconds()) m.l.Warnfln("%s(%s) took %.3f seconds", method, query, duration.Seconds())
} }
@@ -90,17 +91,18 @@ func (z zeroLogger) PrepareUpgrade(current, latest int) {
} }
} }
func (z zeroLogger) DoUpgrade(from, to int, message string) { func (z zeroLogger) DoUpgrade(from, to int, message string, txn bool) {
z.l.Info(). z.l.Info().
Int("from", from). Int("from", from).
Int("to", to). Int("to", to).
Bool("single_txn", txn).
Str("description", message). Str("description", message).
Msg("Upgrading database") Msg("Upgrading database")
} }
var whitespaceRegex = regexp.MustCompile(`\s+`) var whitespaceRegex = regexp.MustCompile(`\s+`)
func (z zeroLogger) QueryTiming(ctx context.Context, method, query string, args []interface{}, duration time.Duration) { func (z zeroLogger) QueryTiming(ctx context.Context, method, query string, args []interface{}, nrows int, duration time.Duration) {
log := zerolog.Ctx(ctx) log := zerolog.Ctx(ctx)
if log.GetLevel() == zerolog.Disabled { if log.GetLevel() == zerolog.Disabled {
log = z.l log = z.l
@@ -108,6 +110,10 @@ func (z zeroLogger) QueryTiming(ctx context.Context, method, query string, args
if log.GetLevel() != zerolog.TraceLevel && duration < 1*time.Second { if log.GetLevel() != zerolog.TraceLevel && duration < 1*time.Second {
return return
} }
if nrows > -1 {
rowLog := log.With().Int("rows", nrows).Logger()
log = &rowLog
}
query = strings.TrimSpace(whitespaceRegex.ReplaceAllLiteralString(query, " ")) query = strings.TrimSpace(whitespaceRegex.ReplaceAllLiteralString(query, " "))
log.Trace(). log.Trace().
Int64("duration_µs", duration.Microseconds()). Int64("duration_µs", duration.Microseconds()).

View File

@@ -0,0 +1,4 @@
-- v4: Sample outside transaction
-- transaction: off
INSERT INTO foo VALUES ('meow', '{}');

View File

@@ -0,0 +1 @@
INSERT INTO foo VALUES ('meow', '{}');

View File

@@ -0,0 +1 @@
INSERT INTO foo VALUES ('meow', '{}');

View File

@@ -12,13 +12,14 @@ import (
"fmt" "fmt"
) )
type upgradeFunc func(Transaction, *Database) error type upgradeFunc func(Execable, *Database) error
type upgrade struct { type upgrade struct {
message string message string
fn upgradeFunc fn upgradeFunc
upgradesTo int upgradesTo int
transaction bool
} }
var ErrUnsupportedDatabaseVersion = fmt.Errorf("unsupported database schema version") var ErrUnsupportedDatabaseVersion = fmt.Errorf("unsupported database schema version")
@@ -93,7 +94,7 @@ func (db *Database) checkDatabaseOwner() error {
return nil return nil
} }
func (db *Database) setVersion(tx Transaction, version int) error { func (db *Database) setVersion(tx Execable, version int) error {
_, err := tx.Exec(fmt.Sprintf("DELETE FROM %s", db.VersionTable)) _, err := tx.Exec(fmt.Sprintf("DELETE FROM %s", db.VersionTable))
if err != nil { if err != nil {
return err return err
@@ -129,26 +130,34 @@ func (db *Database) Upgrade() error {
version++ version++
continue continue
} }
db.Log.DoUpgrade(logVersion, upgradeItem.upgradesTo, upgradeItem.message) db.Log.DoUpgrade(logVersion, upgradeItem.upgradesTo, upgradeItem.message, upgradeItem.transaction)
var tx Transaction var tx Transaction
var upgradeConn Execable
if upgradeItem.transaction {
tx, err = db.Begin() tx, err = db.Begin()
if err != nil { if err != nil {
return err return err
} }
err = upgradeItem.fn(tx, db) upgradeConn = tx
} else {
upgradeConn = db
}
err = upgradeItem.fn(upgradeConn, db)
if err != nil { if err != nil {
return err return err
} }
version = upgradeItem.upgradesTo version = upgradeItem.upgradesTo
logVersion = version logVersion = version
err = db.setVersion(tx, version) err = db.setVersion(upgradeConn, version)
if err != nil { if err != nil {
return err return err
} }
if tx != nil {
err = tx.Commit() err = tx.Commit()
if err != nil { if err != nil {
return err return err
} }
} }
}
return nil return nil
} }

View File

@@ -29,14 +29,14 @@ func (ut *UpgradeTable) extend(toSize int) {
} }
} }
func (ut *UpgradeTable) Register(from, to int, message string, fn upgradeFunc) { func (ut *UpgradeTable) Register(from, to int, message string, txn bool, fn upgradeFunc) {
if from < 0 { if from < 0 {
from += to from += to
} }
if from < 0 { if from < 0 {
panic("invalid from value in UpgradeTable.Register() call") panic("invalid from value in UpgradeTable.Register() call")
} }
upg := upgrade{message: message, fn: fn, upgradesTo: to} upg := upgrade{message: message, fn: fn, upgradesTo: to, transaction: txn}
if len(*ut) == from { if len(*ut) == from {
*ut = append(*ut, upg) *ut = append(*ut, upg)
return return
@@ -57,7 +57,14 @@ func (ut *UpgradeTable) Register(from, to int, message string, fn upgradeFunc) {
// -- v1: Message // -- v1: Message
var upgradeHeaderRegex = regexp.MustCompile(`^-- (?:v(\d+) -> )?v(\d+): (.+)$`) var upgradeHeaderRegex = regexp.MustCompile(`^-- (?:v(\d+) -> )?v(\d+): (.+)$`)
func parseFileHeader(file []byte) (from, to int, message string, lines [][]byte, err error) { // To disable wrapping the upgrade in a single transaction, put `--transaction: off` on the second line.
//
// -- v5: Upgrade without transaction
// -- transaction: off
// // do dangerous stuff
var transactionDisableRegex = regexp.MustCompile(`^-- transaction: (\w*)`)
func parseFileHeader(file []byte) (from, to int, message string, txn bool, lines [][]byte, err error) {
lines = bytes.Split(file, []byte("\n")) lines = bytes.Split(file, []byte("\n"))
if len(lines) < 2 { if len(lines) < 2 {
err = errors.New("upgrade file too short") err = errors.New("upgrade file too short")
@@ -81,6 +88,15 @@ func parseFileHeader(file []byte) (from, to int, message string, lines [][]byte,
from = -1 from = -1
} }
message = string(match[3]) message = string(match[3])
txn = true
match = transactionDisableRegex.FindSubmatch(lines[0])
if match != nil {
lines = lines[1:]
if string(match[1]) != "off" {
err = fmt.Errorf("invalid value %q for transaction flag", match[1])
}
txn = false
}
} }
return return
} }
@@ -163,7 +179,7 @@ func (db *Database) filterSQLUpgrade(lines [][]byte) (string, error) {
} }
func sqlUpgradeFunc(fileName string, lines [][]byte) upgradeFunc { func sqlUpgradeFunc(fileName string, lines [][]byte) upgradeFunc {
return func(tx Transaction, db *Database) error { return func(tx Execable, db *Database) error {
if skip, err := db.parseDialectFilter(lines[0]); err == nil && skip == skipNextLine { if skip, err := db.parseDialectFilter(lines[0]); err == nil && skip == skipNextLine {
return nil return nil
} else if upgradeSQL, err := db.filterSQLUpgrade(lines); err != nil { } else if upgradeSQL, err := db.filterSQLUpgrade(lines); err != nil {
@@ -176,7 +192,7 @@ func sqlUpgradeFunc(fileName string, lines [][]byte) upgradeFunc {
} }
func splitSQLUpgradeFunc(sqliteData, postgresData string) upgradeFunc { func splitSQLUpgradeFunc(sqliteData, postgresData string) upgradeFunc {
return func(tx Transaction, database *Database) (err error) { return func(tx Execable, database *Database) (err error) {
switch database.Dialect { switch database.Dialect {
case SQLite: case SQLite:
_, err = tx.Exec(sqliteData) _, err = tx.Exec(sqliteData)
@@ -189,7 +205,7 @@ func splitSQLUpgradeFunc(sqliteData, postgresData string) upgradeFunc {
} }
} }
func parseSplitSQLUpgrade(name string, fs fullFS, skipNames map[string]struct{}) (from, to int, message string, fn upgradeFunc) { func parseSplitSQLUpgrade(name string, fs fullFS, skipNames map[string]struct{}) (from, to int, message string, txn bool, fn upgradeFunc) {
postgresName := fmt.Sprintf("%s.postgres.sql", name) postgresName := fmt.Sprintf("%s.postgres.sql", name)
sqliteName := fmt.Sprintf("%s.sqlite.sql", name) sqliteName := fmt.Sprintf("%s.sqlite.sql", name)
skipNames[postgresName] = struct{}{} skipNames[postgresName] = struct{}{}
@@ -202,11 +218,11 @@ func parseSplitSQLUpgrade(name string, fs fullFS, skipNames map[string]struct{})
if err != nil { if err != nil {
panic(err) panic(err)
} }
from, to, message, _, err = parseFileHeader(postgresData) from, to, message, txn, _, err = parseFileHeader(postgresData)
if err != nil { if err != nil {
panic(fmt.Errorf("failed to parse header in %s: %w", postgresName, err)) panic(fmt.Errorf("failed to parse header in %s: %w", postgresName, err))
} }
sqliteFrom, sqliteTo, sqliteMessage, _, err := parseFileHeader(sqliteData) sqliteFrom, sqliteTo, sqliteMessage, sqliteTxn, _, err := parseFileHeader(sqliteData)
if err != nil { if err != nil {
panic(fmt.Errorf("failed to parse header in %s: %w", sqliteName, err)) panic(fmt.Errorf("failed to parse header in %s: %w", sqliteName, err))
} }
@@ -214,6 +230,8 @@ func parseSplitSQLUpgrade(name string, fs fullFS, skipNames map[string]struct{})
panic(fmt.Errorf("mismatching versions in postgres and sqlite versions of %s: %d/%d -> %d/%d", name, from, sqliteFrom, to, sqliteTo)) panic(fmt.Errorf("mismatching versions in postgres and sqlite versions of %s: %d/%d -> %d/%d", name, from, sqliteFrom, to, sqliteTo))
} else if message != sqliteMessage { } else if message != sqliteMessage {
panic(fmt.Errorf("mismatching message in postgres and sqlite versions of %s: %q != %q", name, message, sqliteMessage)) panic(fmt.Errorf("mismatching message in postgres and sqlite versions of %s: %q != %q", name, message, sqliteMessage))
} else if txn != sqliteTxn {
panic(fmt.Errorf("mismatching transaction flag in postgres and sqlite versions of %s: %t != %t", name, txn, sqliteTxn))
} }
fn = splitSQLUpgradeFunc(string(sqliteData), string(postgresData)) fn = splitSQLUpgradeFunc(string(sqliteData), string(postgresData))
return return
@@ -242,14 +260,14 @@ func (ut *UpgradeTable) RegisterFSPath(fs fullFS, dir string) {
} else if _, skip := skipNames[file.Name()]; skip { } else if _, skip := skipNames[file.Name()]; skip {
// also do nothing // also do nothing
} else if splitName := splitFileNameRegex.FindStringSubmatch(file.Name()); splitName != nil { } else if splitName := splitFileNameRegex.FindStringSubmatch(file.Name()); splitName != nil {
from, to, message, fn := parseSplitSQLUpgrade(splitName[1], fs, skipNames) from, to, message, txn, fn := parseSplitSQLUpgrade(splitName[1], fs, skipNames)
ut.Register(from, to, message, fn) ut.Register(from, to, message, txn, fn)
} else if data, err := fs.ReadFile(filepath.Join(dir, file.Name())); err != nil { } else if data, err := fs.ReadFile(filepath.Join(dir, file.Name())); err != nil {
panic(err) panic(err)
} else if from, to, message, lines, err := parseFileHeader(data); err != nil { } else if from, to, message, txn, lines, err := parseFileHeader(data); err != nil {
panic(fmt.Errorf("failed to parse header in %s: %w", file.Name(), err)) panic(fmt.Errorf("failed to parse header in %s: %w", file.Name(), err))
} else { } else {
ut.Register(from, to, message, sqlUpgradeFunc(file.Name(), lines)) ut.Register(from, to, message, txn, sqlUpgradeFunc(file.Name(), lines))
} }
} }
} }

View File

@@ -0,0 +1,86 @@
// Copyright (c) 2022 Tulir Asokan
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
package jsontime
import (
"encoding/json"
"time"
)
func UM(time time.Time) UnixMilli {
return UnixMilli{Time: time}
}
func UMInt(ts int64) UnixMilli {
return UM(time.UnixMilli(ts))
}
func UnixMilliNow() UnixMilli {
return UM(time.Now())
}
type UnixMilli struct {
time.Time
}
func (um UnixMilli) MarshalJSON() ([]byte, error) {
if um.IsZero() {
return []byte{'0'}, nil
}
return json.Marshal(um.UnixMilli())
}
func (um *UnixMilli) UnmarshalJSON(data []byte) error {
var val int64
err := json.Unmarshal(data, &val)
if err != nil {
return err
}
if val == 0 {
um.Time = time.Time{}
} else {
um.Time = time.UnixMilli(val)
}
return nil
}
func U(time time.Time) Unix {
return Unix{Time: time}
}
func UInt(ts int64) Unix {
return U(time.Unix(ts, 0))
}
func UnixNow() Unix {
return U(time.Now())
}
type Unix struct {
time.Time
}
func (u Unix) MarshalJSON() ([]byte, error) {
if u.IsZero() {
return []byte{'0'}, nil
}
return json.Marshal(u.Unix())
}
func (u *Unix) UnmarshalJSON(data []byte) error {
var val int64
err := json.Unmarshal(data, &val)
if err != nil {
return err
}
if val == 0 {
u.Time = time.Time{}
} else {
u.Time = time.Unix(val, 0)
}
return nil
}

View File

@@ -1,5 +1,5 @@
package mautrix package mautrix
const Version = "v0.12.2" const Version = "v0.12.3"
var DefaultUserAgent = "mautrix-go/" + Version var DefaultUserAgent = "mautrix-go/" + Version

11
vendor/modules.txt vendored
View File

@@ -60,7 +60,7 @@ github.com/mattn/go-isatty
# github.com/mattn/go-runewidth v0.0.12 # github.com/mattn/go-runewidth v0.0.12
## explicit; go 1.9 ## explicit; go 1.9
github.com/mattn/go-runewidth github.com/mattn/go-runewidth
# github.com/mattn/go-sqlite3 v1.14.15 # github.com/mattn/go-sqlite3 v1.14.16
## explicit; go 1.16 ## explicit; go 1.16
github.com/mattn/go-sqlite3 github.com/mattn/go-sqlite3
# github.com/mikesmitty/edkey v0.0.0-20170222072505-3356ea4e686a # github.com/mikesmitty/edkey v0.0.0-20170222072505-3356ea4e686a
@@ -101,7 +101,7 @@ github.com/tidwall/pretty
# github.com/tidwall/sjson v1.2.5 # github.com/tidwall/sjson v1.2.5
## explicit; go 1.14 ## explicit; go 1.14
github.com/tidwall/sjson github.com/tidwall/sjson
# github.com/yuin/goldmark v1.5.2 # github.com/yuin/goldmark v1.5.3
## explicit; go 1.18 ## explicit; go 1.18
github.com/yuin/goldmark github.com/yuin/goldmark
github.com/yuin/goldmark/ast github.com/yuin/goldmark/ast
@@ -130,12 +130,12 @@ gitlab.com/etke.cc/go/trysmtp
# gitlab.com/etke.cc/go/validator v1.0.3 # gitlab.com/etke.cc/go/validator v1.0.3
## explicit; go 1.18 ## explicit; go 1.18
gitlab.com/etke.cc/go/validator gitlab.com/etke.cc/go/validator
# gitlab.com/etke.cc/linkpearl v0.0.0-20221115164843-97f1e49414d9 # gitlab.com/etke.cc/linkpearl v0.0.0-20221116205701-65547c5608e6
## explicit; go 1.18 ## explicit; go 1.18
gitlab.com/etke.cc/linkpearl gitlab.com/etke.cc/linkpearl
gitlab.com/etke.cc/linkpearl/config gitlab.com/etke.cc/linkpearl/config
gitlab.com/etke.cc/linkpearl/store gitlab.com/etke.cc/linkpearl/store
# golang.org/x/crypto v0.2.0 # golang.org/x/crypto v0.3.0
## explicit; go 1.17 ## explicit; go 1.17
golang.org/x/crypto/argon2 golang.org/x/crypto/argon2
golang.org/x/crypto/blake2b golang.org/x/crypto/blake2b
@@ -180,7 +180,7 @@ gopkg.in/yaml.v3
# maunium.net/go/maulogger/v2 v2.3.2 # maunium.net/go/maulogger/v2 v2.3.2
## explicit; go 1.11 ## explicit; go 1.11
maunium.net/go/maulogger/v2 maunium.net/go/maulogger/v2
# maunium.net/go/mautrix v0.12.2 # maunium.net/go/mautrix v0.12.3
## explicit; go 1.18 ## explicit; go 1.18
maunium.net/go/mautrix maunium.net/go/mautrix
maunium.net/go/mautrix/appservice maunium.net/go/mautrix/appservice
@@ -202,3 +202,4 @@ maunium.net/go/mautrix/util
maunium.net/go/mautrix/util/base58 maunium.net/go/mautrix/util/base58
maunium.net/go/mautrix/util/configupgrade maunium.net/go/mautrix/util/configupgrade
maunium.net/go/mautrix/util/dbutil maunium.net/go/mautrix/util/dbutil
maunium.net/go/mautrix/util/jsontime