Browse Source

import

tags/0.0.1
Vladimir Smagin 4 weeks ago
commit
70823a02e2
56 changed files with 17206 additions and 0 deletions
  1. +1
    -0
      .gitignore
  2. +24
    -0
      README.md
  3. +4
    -0
      config.example.yaml
  4. +8
    -0
      go.mod
  5. +6
    -0
      go.sum
  6. +128
    -0
      main.go
  7. +4
    -0
      vendor/github.com/lib/pq/.gitignore
  8. +73
    -0
      vendor/github.com/lib/pq/.travis.sh
  9. +44
    -0
      vendor/github.com/lib/pq/.travis.yml
  10. +8
    -0
      vendor/github.com/lib/pq/LICENSE.md
  11. +33
    -0
      vendor/github.com/lib/pq/README.md
  12. +33
    -0
      vendor/github.com/lib/pq/TESTS.md
  13. +756
    -0
      vendor/github.com/lib/pq/array.go
  14. +91
    -0
      vendor/github.com/lib/pq/buf.go
  15. +1996
    -0
      vendor/github.com/lib/pq/conn.go
  16. +149
    -0
      vendor/github.com/lib/pq/conn_go18.go
  17. +115
    -0
      vendor/github.com/lib/pq/connector.go
  18. +284
    -0
      vendor/github.com/lib/pq/copy.go
  19. +263
    -0
      vendor/github.com/lib/pq/doc.go
  20. +622
    -0
      vendor/github.com/lib/pq/encode.go
  21. +515
    -0
      vendor/github.com/lib/pq/error.go
  22. +3
    -0
      vendor/github.com/lib/pq/go.mod
  23. +27
    -0
      vendor/github.com/lib/pq/krb.go
  24. +71
    -0
      vendor/github.com/lib/pq/notice.go
  25. +858
    -0
      vendor/github.com/lib/pq/notify.go
  26. +6
    -0
      vendor/github.com/lib/pq/oid/doc.go
  27. +343
    -0
      vendor/github.com/lib/pq/oid/types.go
  28. +93
    -0
      vendor/github.com/lib/pq/rows.go
  29. +264
    -0
      vendor/github.com/lib/pq/scram/scram.go
  30. +175
    -0
      vendor/github.com/lib/pq/ssl.go
  31. +20
    -0
      vendor/github.com/lib/pq/ssl_permissions.go
  32. +9
    -0
      vendor/github.com/lib/pq/ssl_windows.go
  33. +76
    -0
      vendor/github.com/lib/pq/url.go
  34. +24
    -0
      vendor/github.com/lib/pq/user_posix.go
  35. +27
    -0
      vendor/github.com/lib/pq/user_windows.go
  36. +23
    -0
      vendor/github.com/lib/pq/uuid.go
  37. +16
    -0
      vendor/gopkg.in/yaml.v2/.travis.yml
  38. +201
    -0
      vendor/gopkg.in/yaml.v2/LICENSE
  39. +31
    -0
      vendor/gopkg.in/yaml.v2/LICENSE.libyaml
  40. +13
    -0
      vendor/gopkg.in/yaml.v2/NOTICE
  41. +133
    -0
      vendor/gopkg.in/yaml.v2/README.md
  42. +740
    -0
      vendor/gopkg.in/yaml.v2/apic.go
  43. +815
    -0
      vendor/gopkg.in/yaml.v2/decode.go
  44. +1685
    -0
      vendor/gopkg.in/yaml.v2/emitterc.go
  45. +390
    -0
      vendor/gopkg.in/yaml.v2/encode.go
  46. +5
    -0
      vendor/gopkg.in/yaml.v2/go.mod
  47. +1095
    -0
      vendor/gopkg.in/yaml.v2/parserc.go
  48. +412
    -0
      vendor/gopkg.in/yaml.v2/readerc.go
  49. +258
    -0
      vendor/gopkg.in/yaml.v2/resolve.go
  50. +2711
    -0
      vendor/gopkg.in/yaml.v2/scannerc.go
  51. +113
    -0
      vendor/gopkg.in/yaml.v2/sorter.go
  52. +26
    -0
      vendor/gopkg.in/yaml.v2/writerc.go
  53. +466
    -0
      vendor/gopkg.in/yaml.v2/yaml.go
  54. +739
    -0
      vendor/gopkg.in/yaml.v2/yamlh.go
  55. +173
    -0
      vendor/gopkg.in/yaml.v2/yamlprivateh.go
  56. +8
    -0
      vendor/modules.txt

+ 1
- 0
.gitignore View File

@@ -0,0 +1 @@
config.yaml

+ 24
- 0
README.md View File

@@ -0,0 +1,24 @@
# Compare Postgresql tables

You have master-slave(s) servers and want to be sure what all tables is synced.

```yaml
credentials:
master: postgres://username:password@pg-master.server.ru:5433/mydatabase?sslmode=disable
slave1: postgres://username:password@pg-slave1.server.ru:5432/mydatabase?sslmode=disable
slave2: postgres://username:password@pg-slave2.server.ru:5435/mydatabase?sslmode=disable
```

Sample output, table `ga` is out of sync on `slave2`:

```log
2020/07/16 05:45:45 Loaded config: {map[master:postgres://username:password@pg-master.server.ru:5433/mydatabase?sslmode=disable postgres://username:password@pg-slave1.server.ru:5432/mydatabase?sslmode=disable postgres://username:password@pg-slave2.server.ru:5435/mydatabase?sslmode=disable]}
2020/07/16 05:45:45 master successfully connected!
2020/07/16 05:45:46 slave1 successfully connected!
2020/07/16 05:45:46 slave2 successfully connected!
2020/07/16 05:45:46 Table name: ga
2020/07/16 05:46:07 Postgres name: master Count: 25381158
2020/07/16 05:45:51 Postgres name: slave1 Count: 25381158
2020/07/16 05:46:06 Postgres name: slave2 Count: 25342939
2020/07/16 05:46:07 ------------
```

+ 4
- 0
config.example.yaml View File

@@ -0,0 +1,4 @@
credentials:
master: postgres://username:password@pg-master.server.ru:5433/mydatabase?sslmode=disable
slave1: postgres://username:password@pg-slave1.server.ru:5432/mydatabase?sslmode=disable
slave2: postgres://username:password@pg-slave2.server.ru:5435/mydatabase?sslmode=disable

+ 8
- 0
go.mod View File

@@ -0,0 +1,8 @@
module git.blindage.org/21h/compare-pg

go 1.14

require (
github.com/lib/pq v1.7.0
gopkg.in/yaml.v2 v2.3.0
)

+ 6
- 0
go.sum View File

@@ -0,0 +1,6 @@
github.com/lib/pq v1.7.0 h1:h93mCPfUSkaul3Ka/VG8uZdmW1uMHDGxzu0NWHuJmHY=
github.com/lib/pq v1.7.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

+ 128
- 0
main.go View File

@@ -0,0 +1,128 @@
package main

import (
"database/sql"
"fmt"
"io/ioutil"
"log"

_ "github.com/lib/pq"
"gopkg.in/yaml.v2"
)

type config struct {
Credentials map[string]string `yaml:"credentials"`
}

type pgConnections struct {
Handlers map[string]*sql.DB
}

func loadConfig() config {
var conf config

yamldata, err := ioutil.ReadFile("config.yaml")
if err != nil {
log.Fatalf("Error opening file %v", err)
}

marshErr := yaml.Unmarshal(yamldata, &conf)
if marshErr != nil {
log.Fatalf("Config file malformed: %v", marshErr)
}
log.Println("Loaded config:", conf)
return conf
}

func getCount(db *sql.DB, tableName string) int {
sqlStatement := fmt.Sprintf("SELECT count(*) FROM \"%v\";", tableName)
var cnt int
// Replace 3 with an ID from your database or another random
// value to test the no rows use case.
row := db.QueryRow(sqlStatement)
switch err := row.Scan(&cnt); err {
case sql.ErrNoRows:
log.Println("No rows were returned!")
case nil:
return cnt
default:
log.Println("Error", err, "DB", db)
}
return 0
}

func getTableNames(db *sql.DB) []string {
rows, err := db.Query("SELECT tablename FROM pg_catalog.pg_tables WHERE schemaname != 'pg_catalog' AND schemaname != 'information_schema';")
resultSet := []string{}
if err != nil {
log.Fatalln(err)
}
defer rows.Close()
for rows.Next() {
var tableName string
err = rows.Scan(&tableName)
if err != nil {
log.Fatalln(err)
}
resultSet = append(resultSet, tableName)
}
err = rows.Err()
if err != nil {
log.Fatalln(err)
}
return resultSet
}

func inList(list []string, value string) bool {
for _, val := range list {
if val == value {
return true
}
}
return false
}

func main() {

// Config
conf := loadConfig()
pg := pgConnections{}

// Connect to DBs
pgConfigs := make(map[string]*sql.DB)
for pgName, pgConnectString := range conf.Credentials {
db, err := sql.Open("postgres", pgConnectString)
if err != nil {
log.Fatalln(err)
}
defer db.Close()

err = db.Ping()
if err != nil {
log.Fatalln(err)
}
log.Println(pgName, "successfully connected!")
pgConfigs[pgName] = db
}
pg.Handlers = pgConfigs

// Now get tables list
tablesList := []string{}
for _, handlerDB := range pg.Handlers {
dbList := getTableNames(handlerDB)
for _, tableName := range dbList {
if !inList(tablesList, tableName) {
tablesList = append(tablesList, tableName)
}
}
}

// Compare records count
for _, tableName := range tablesList {
log.Println("Table name:", tableName)
for pgName, handlerDB := range pg.Handlers {
log.Println("Postgres name:", pgName, "Count:", getCount(handlerDB, tableName))
}
log.Println("------------")
}
}

+ 4
- 0
vendor/github.com/lib/pq/.gitignore View File

@@ -0,0 +1,4 @@
.db
*.test
*~
*.swp

+ 73
- 0
vendor/github.com/lib/pq/.travis.sh View File

@@ -0,0 +1,73 @@
#!/bin/bash

set -eu

client_configure() {
sudo chmod 600 $PQSSLCERTTEST_PATH/postgresql.key
}

pgdg_repository() {
local sourcelist='sources.list.d/postgresql.list'

curl -sS 'https://www.postgresql.org/media/keys/ACCC4CF8.asc' | sudo apt-key add -
echo deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main $PGVERSION | sudo tee "/etc/apt/$sourcelist"
sudo apt-get -o Dir::Etc::sourcelist="$sourcelist" -o Dir::Etc::sourceparts='-' -o APT::Get::List-Cleanup='0' update
}

postgresql_configure() {
sudo tee /etc/postgresql/$PGVERSION/main/pg_hba.conf > /dev/null <<-config
local all all trust
hostnossl all pqgossltest 127.0.0.1/32 reject
hostnossl all pqgosslcert 127.0.0.1/32 reject
hostssl all pqgossltest 127.0.0.1/32 trust
hostssl all pqgosslcert 127.0.0.1/32 cert
host all all 127.0.0.1/32 trust
hostnossl all pqgossltest ::1/128 reject
hostnossl all pqgosslcert ::1/128 reject
hostssl all pqgossltest ::1/128 trust
hostssl all pqgosslcert ::1/128 cert
host all all ::1/128 trust
config

xargs sudo install -o postgres -g postgres -m 600 -t /var/lib/postgresql/$PGVERSION/main/ <<-certificates
certs/root.crt
certs/server.crt
certs/server.key
certificates

sort -VCu <<-versions ||
$PGVERSION
9.2
versions
sudo tee -a /etc/postgresql/$PGVERSION/main/postgresql.conf > /dev/null <<-config
ssl_ca_file = 'root.crt'
ssl_cert_file = 'server.crt'
ssl_key_file = 'server.key'
config

echo 127.0.0.1 postgres | sudo tee -a /etc/hosts > /dev/null

sudo service postgresql restart
}

postgresql_install() {
xargs sudo apt-get -y -o Dpkg::Options::='--force-confdef' -o Dpkg::Options::='--force-confnew' install <<-packages
postgresql-$PGVERSION
postgresql-server-dev-$PGVERSION
postgresql-contrib-$PGVERSION
packages
}

postgresql_uninstall() {
sudo service postgresql stop
xargs sudo apt-get -y --purge remove <<-packages
libpq-dev
libpq5
postgresql
postgresql-client-common
postgresql-common
packages
sudo rm -rf /var/lib/postgresql
}

$1

+ 44
- 0
vendor/github.com/lib/pq/.travis.yml View File

@@ -0,0 +1,44 @@
language: go

go:
- 1.13.x
- 1.14.x
- master

sudo: true

env:
global:
- PGUSER=postgres
- PQGOSSLTESTS=1
- PQSSLCERTTEST_PATH=$PWD/certs
- PGHOST=127.0.0.1
matrix:
- PGVERSION=10
- PGVERSION=9.6
- PGVERSION=9.5
- PGVERSION=9.4

before_install:
- ./.travis.sh postgresql_uninstall
- ./.travis.sh pgdg_repository
- ./.travis.sh postgresql_install
- ./.travis.sh postgresql_configure
- ./.travis.sh client_configure
- go get golang.org/x/tools/cmd/goimports
- go get golang.org/x/lint/golint
- GO111MODULE=on go get honnef.co/go/tools/cmd/staticcheck@2020.1.3

before_script:
- createdb pqgotest
- createuser -DRS pqgossltest
- createuser -DRS pqgosslcert

script:
- >
goimports -d -e $(find -name '*.go') | awk '{ print } END { exit NR == 0 ? 0 : 1 }'
- go vet ./...
- staticcheck -go 1.13 ./...
- golint ./...
- PQTEST_BINARY_PARAMETERS=no go test -race -v ./...
- PQTEST_BINARY_PARAMETERS=yes go test -race -v ./...

+ 8
- 0
vendor/github.com/lib/pq/LICENSE.md View File

@@ -0,0 +1,8 @@
Copyright (c) 2011-2013, 'pq' Contributors
Portions Copyright (C) 2011 Blake Mizerany

Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

+ 33
- 0
vendor/github.com/lib/pq/README.md View File

@@ -0,0 +1,33 @@
# pq - A pure Go postgres driver for Go's database/sql package

[![GoDoc](https://godoc.org/github.com/lib/pq?status.svg)](https://pkg.go.dev/github.com/lib/pq?tab=doc)

## Install

go get github.com/lib/pq

## Features

* SSL
* Handles bad connections for `database/sql`
* Scan `time.Time` correctly (i.e. `timestamp[tz]`, `time[tz]`, `date`)
* Scan binary blobs correctly (i.e. `bytea`)
* Package for `hstore` support
* COPY FROM support
* pq.ParseURL for converting urls to connection strings for sql.Open.
* Many libpq compatible environment variables
* Unix socket support
* Notifications: `LISTEN`/`NOTIFY`
* pgpass support

## Optional Features

* GSS (Kerberos) auth (to use, see GoDoc)

## Tests

`go test` is used for testing. See [TESTS.md](TESTS.md) for more details.

## Status

This package is effectively in maintenance mode and is not actively developed. Small patches and features are only rarely reviewed and merged. We recommend using [pgx](https://github.com/jackc/pgx) which is actively maintained.

+ 33
- 0
vendor/github.com/lib/pq/TESTS.md View File

@@ -0,0 +1,33 @@
# Tests

## Running Tests

`go test` is used for testing. A running PostgreSQL
server is required, with the ability to log in. The
database to connect to test with is "pqgotest," on
"localhost" but these can be overridden using [environment
variables](https://www.postgresql.org/docs/9.3/static/libpq-envars.html).

Example:

PGHOST=/run/postgresql go test

## Benchmarks

A benchmark suite can be run as part of the tests:

go test -bench .

## Example setup (Docker)

Run a postgres container:

```
docker run --expose 5432:5432 postgres
```

Run tests:

```
PGHOST=localhost PGPORT=5432 PGUSER=postgres PGSSLMODE=disable PGDATABASE=postgres go test
```

+ 756
- 0
vendor/github.com/lib/pq/array.go View File

@@ -0,0 +1,756 @@
package pq

import (
"bytes"
"database/sql"
"database/sql/driver"
"encoding/hex"
"fmt"
"reflect"
"strconv"
"strings"
)

var typeByteSlice = reflect.TypeOf([]byte{})
var typeDriverValuer = reflect.TypeOf((*driver.Valuer)(nil)).Elem()
var typeSQLScanner = reflect.TypeOf((*sql.Scanner)(nil)).Elem()

// Array returns the optimal driver.Valuer and sql.Scanner for an array or
// slice of any dimension.
//
// For example:
// db.Query(`SELECT * FROM t WHERE id = ANY($1)`, pq.Array([]int{235, 401}))
//
// var x []sql.NullInt64
// db.QueryRow('SELECT ARRAY[235, 401]').Scan(pq.Array(&x))
//
// Scanning multi-dimensional arrays is not supported. Arrays where the lower
// bound is not one (such as `[0:0]={1}') are not supported.
func Array(a interface{}) interface {
driver.Valuer
sql.Scanner
} {
switch a := a.(type) {
case []bool:
return (*BoolArray)(&a)
case []float64:
return (*Float64Array)(&a)
case []int64:
return (*Int64Array)(&a)
case []string:
return (*StringArray)(&a)

case *[]bool:
return (*BoolArray)(a)
case *[]float64:
return (*Float64Array)(a)
case *[]int64:
return (*Int64Array)(a)
case *[]string:
return (*StringArray)(a)
}

return GenericArray{a}
}

// ArrayDelimiter may be optionally implemented by driver.Valuer or sql.Scanner
// to override the array delimiter used by GenericArray.
type ArrayDelimiter interface {
// ArrayDelimiter returns the delimiter character(s) for this element's type.
ArrayDelimiter() string
}

// BoolArray represents a one-dimensional array of the PostgreSQL boolean type.
type BoolArray []bool

// Scan implements the sql.Scanner interface.
func (a *BoolArray) Scan(src interface{}) error {
switch src := src.(type) {
case []byte:
return a.scanBytes(src)
case string:
return a.scanBytes([]byte(src))
case nil:
*a = nil
return nil
}

return fmt.Errorf("pq: cannot convert %T to BoolArray", src)
}

func (a *BoolArray) scanBytes(src []byte) error {
elems, err := scanLinearArray(src, []byte{','}, "BoolArray")
if err != nil {
return err
}
if *a != nil && len(elems) == 0 {
*a = (*a)[:0]
} else {
b := make(BoolArray, len(elems))
for i, v := range elems {
if len(v) != 1 {
return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v)
}
switch v[0] {
case 't':
b[i] = true
case 'f':
b[i] = false
default:
return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v)
}
}
*a = b
}
return nil
}

// Value implements the driver.Valuer interface.
func (a BoolArray) Value() (driver.Value, error) {
if a == nil {
return nil, nil
}

if n := len(a); n > 0 {
// There will be exactly two curly brackets, N bytes of values,
// and N-1 bytes of delimiters.
b := make([]byte, 1+2*n)

for i := 0; i < n; i++ {
b[2*i] = ','
if a[i] {
b[1+2*i] = 't'
} else {
b[1+2*i] = 'f'
}
}

b[0] = '{'
b[2*n] = '}'

return string(b), nil
}

return "{}", nil
}

// ByteaArray represents a one-dimensional array of the PostgreSQL bytea type.
type ByteaArray [][]byte

// Scan implements the sql.Scanner interface.
func (a *ByteaArray) Scan(src interface{}) error {
switch src := src.(type) {
case []byte:
return a.scanBytes(src)
case string:
return a.scanBytes([]byte(src))
case nil:
*a = nil
return nil
}

return fmt.Errorf("pq: cannot convert %T to ByteaArray", src)
}

func (a *ByteaArray) scanBytes(src []byte) error {
elems, err := scanLinearArray(src, []byte{','}, "ByteaArray")
if err != nil {
return err
}
if *a != nil && len(elems) == 0 {
*a = (*a)[:0]
} else {
b := make(ByteaArray, len(elems))
for i, v := range elems {
b[i], err = parseBytea(v)
if err != nil {
return fmt.Errorf("could not parse bytea array index %d: %s", i, err.Error())
}
}
*a = b
}
return nil
}

// Value implements the driver.Valuer interface. It uses the "hex" format which
// is only supported on PostgreSQL 9.0 or newer.
func (a ByteaArray) Value() (driver.Value, error) {
if a == nil {
return nil, nil
}

if n := len(a); n > 0 {
// There will be at least two curly brackets, 2*N bytes of quotes,
// 3*N bytes of hex formatting, and N-1 bytes of delimiters.
size := 1 + 6*n
for _, x := range a {
size += hex.EncodedLen(len(x))
}

b := make([]byte, size)

for i, s := 0, b; i < n; i++ {
o := copy(s, `,"\\x`)
o += hex.Encode(s[o:], a[i])
s[o] = '"'
s = s[o+1:]
}

b[0] = '{'
b[size-1] = '}'

return string(b), nil
}

return "{}", nil
}

// Float64Array represents a one-dimensional array of the PostgreSQL double
// precision type.
type Float64Array []float64

// Scan implements the sql.Scanner interface.
func (a *Float64Array) Scan(src interface{}) error {
switch src := src.(type) {
case []byte:
return a.scanBytes(src)
case string:
return a.scanBytes([]byte(src))
case nil:
*a = nil
return nil
}

return fmt.Errorf("pq: cannot convert %T to Float64Array", src)
}

func (a *Float64Array) scanBytes(src []byte) error {
elems, err := scanLinearArray(src, []byte{','}, "Float64Array")
if err != nil {
return err
}
if *a != nil && len(elems) == 0 {
*a = (*a)[:0]
} else {
b := make(Float64Array, len(elems))
for i, v := range elems {
if b[i], err = strconv.ParseFloat(string(v), 64); err != nil {
return fmt.Errorf("pq: parsing array element index %d: %v", i, err)
}
}
*a = b
}
return nil
}

// Value implements the driver.Valuer interface.
func (a Float64Array) Value() (driver.Value, error) {
if a == nil {
return nil, nil
}

if n := len(a); n > 0 {
// There will be at least two curly brackets, N bytes of values,
// and N-1 bytes of delimiters.
b := make([]byte, 1, 1+2*n)
b[0] = '{'

b = strconv.AppendFloat(b, a[0], 'f', -1, 64)
for i := 1; i < n; i++ {
b = append(b, ',')
b = strconv.AppendFloat(b, a[i], 'f', -1, 64)
}

return string(append(b, '}')), nil
}

return "{}", nil
}

// GenericArray implements the driver.Valuer and sql.Scanner interfaces for
// an array or slice of any dimension.
type GenericArray struct{ A interface{} }

func (GenericArray) evaluateDestination(rt reflect.Type) (reflect.Type, func([]byte, reflect.Value) error, string) {
var assign func([]byte, reflect.Value) error
var del = ","

// TODO calculate the assign function for other types
// TODO repeat this section on the element type of arrays or slices (multidimensional)
{
if reflect.PtrTo(rt).Implements(typeSQLScanner) {
// dest is always addressable because it is an element of a slice.
assign = func(src []byte, dest reflect.Value) (err error) {
ss := dest.Addr().Interface().(sql.Scanner)
if src == nil {
err = ss.Scan(nil)
} else {
err = ss.Scan(src)
}
return
}
goto FoundType
}

assign = func([]byte, reflect.Value) error {
return fmt.Errorf("pq: scanning to %s is not implemented; only sql.Scanner", rt)
}
}

FoundType:

if ad, ok := reflect.Zero(rt).Interface().(ArrayDelimiter); ok {
del = ad.ArrayDelimiter()
}

return rt, assign, del
}

// Scan implements the sql.Scanner interface.
func (a GenericArray) Scan(src interface{}) error {
dpv := reflect.ValueOf(a.A)
switch {
case dpv.Kind() != reflect.Ptr:
return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A)
case dpv.IsNil():
return fmt.Errorf("pq: destination %T is nil", a.A)
}

dv := dpv.Elem()
switch dv.Kind() {
case reflect.Slice:
case reflect.Array:
default:
return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A)
}

switch src := src.(type) {
case []byte:
return a.scanBytes(src, dv)
case string:
return a.scanBytes([]byte(src), dv)
case nil:
if dv.Kind() == reflect.Slice {
dv.Set(reflect.Zero(dv.Type()))
return nil
}
}

return fmt.Errorf("pq: cannot convert %T to %s", src, dv.Type())
}

func (a GenericArray) scanBytes(src []byte, dv reflect.Value) error {
dtype, assign, del := a.evaluateDestination(dv.Type().Elem())
dims, elems, err := parseArray(src, []byte(del))
if err != nil {
return err
}

// TODO allow multidimensional

if len(dims) > 1 {
return fmt.Errorf("pq: scanning from multidimensional ARRAY%s is not implemented",
strings.Replace(fmt.Sprint(dims), " ", "][", -1))
}

// Treat a zero-dimensional array like an array with a single dimension of zero.
if len(dims) == 0 {
dims = append(dims, 0)
}

for i, rt := 0, dv.Type(); i < len(dims); i, rt = i+1, rt.Elem() {
switch rt.Kind() {
case reflect.Slice:
case reflect.Array:
if rt.Len() != dims[i] {
return fmt.Errorf("pq: cannot convert ARRAY%s to %s",
strings.Replace(fmt.Sprint(dims), " ", "][", -1), dv.Type())
}
default:
// TODO handle multidimensional
}
}

values := reflect.MakeSlice(reflect.SliceOf(dtype), len(elems), len(elems))
for i, e := range elems {
if err := assign(e, values.Index(i)); err != nil {
return fmt.Errorf("pq: parsing array element index %d: %v", i, err)
}
}

// TODO handle multidimensional

switch dv.Kind() {
case reflect.Slice:
dv.Set(values.Slice(0, dims[0]))
case reflect.Array:
for i := 0; i < dims[0]; i++ {
dv.Index(i).Set(values.Index(i))
}
}

return nil
}

// Value implements the driver.Valuer interface.
func (a GenericArray) Value() (driver.Value, error) {
if a.A == nil {
return nil, nil
}

rv := reflect.ValueOf(a.A)

switch rv.Kind() {
case reflect.Slice:
if rv.IsNil() {
return nil, nil
}
case reflect.Array:
default:
return nil, fmt.Errorf("pq: Unable to convert %T to array", a.A)
}

if n := rv.Len(); n > 0 {
// There will be at least two curly brackets, N bytes of values,
// and N-1 bytes of delimiters.
b := make([]byte, 0, 1+2*n)

b, _, err := appendArray(b, rv, n)
return string(b), err
}

return "{}", nil
}

// Int64Array represents a one-dimensional array of the PostgreSQL integer types.
type Int64Array []int64

// Scan implements the sql.Scanner interface.
func (a *Int64Array) Scan(src interface{}) error {
switch src := src.(type) {
case []byte:
return a.scanBytes(src)
case string:
return a.scanBytes([]byte(src))
case nil:
*a = nil
return nil
}

return fmt.Errorf("pq: cannot convert %T to Int64Array", src)
}

func (a *Int64Array) scanBytes(src []byte) error {
elems, err := scanLinearArray(src, []byte{','}, "Int64Array")
if err != nil {
return err
}
if *a != nil && len(elems) == 0 {
*a = (*a)[:0]
} else {
b := make(Int64Array, len(elems))
for i, v := range elems {
if b[i], err = strconv.ParseInt(string(v), 10, 64); err != nil {
return fmt.Errorf("pq: parsing array element index %d: %v", i, err)
}
}
*a = b
}
return nil
}

// Value implements the driver.Valuer interface.
func (a Int64Array) Value() (driver.Value, error) {
if a == nil {
return nil, nil
}

if n := len(a); n > 0 {
// There will be at least two curly brackets, N bytes of values,
// and N-1 bytes of delimiters.
b := make([]byte, 1, 1+2*n)
b[0] = '{'

b = strconv.AppendInt(b, a[0], 10)
for i := 1; i < n; i++ {
b = append(b, ',')
b = strconv.AppendInt(b, a[i], 10)
}

return string(append(b, '}')), nil
}

return "{}", nil
}

// StringArray represents a one-dimensional array of the PostgreSQL character types.
type StringArray []string

// Scan implements the sql.Scanner interface.
func (a *StringArray) Scan(src interface{}) error {
switch src := src.(type) {
case []byte:
return a.scanBytes(src)
case string:
return a.scanBytes([]byte(src))
case nil:
*a = nil
return nil
}

return fmt.Errorf("pq: cannot convert %T to StringArray", src)
}

func (a *StringArray) scanBytes(src []byte) error {
elems, err := scanLinearArray(src, []byte{','}, "StringArray")
if err != nil {
return err
}
if *a != nil && len(elems) == 0 {
*a = (*a)[:0]
} else {
b := make(StringArray, len(elems))
for i, v := range elems {
if b[i] = string(v); v == nil {
return fmt.Errorf("pq: parsing array element index %d: cannot convert nil to string", i)
}
}
*a = b
}
return nil
}

// Value implements the driver.Valuer interface.
func (a StringArray) Value() (driver.Value, error) {
if a == nil {
return nil, nil
}

if n := len(a); n > 0 {
// There will be at least two curly brackets, 2*N bytes of quotes,
// and N-1 bytes of delimiters.
b := make([]byte, 1, 1+3*n)
b[0] = '{'

b = appendArrayQuotedBytes(b, []byte(a[0]))
for i := 1; i < n; i++ {
b = append(b, ',')
b = appendArrayQuotedBytes(b, []byte(a[i]))
}

return string(append(b, '}')), nil
}

return "{}", nil
}

// appendArray appends rv to the buffer, returning the extended buffer and
// the delimiter used between elements.
//
// It panics when n <= 0 or rv's Kind is not reflect.Array nor reflect.Slice.
func appendArray(b []byte, rv reflect.Value, n int) ([]byte, string, error) {
var del string
var err error

b = append(b, '{')

if b, del, err = appendArrayElement(b, rv.Index(0)); err != nil {
return b, del, err
}

for i := 1; i < n; i++ {
b = append(b, del...)
if b, del, err = appendArrayElement(b, rv.Index(i)); err != nil {
return b, del, err
}
}

return append(b, '}'), del, nil
}

// appendArrayElement appends rv to the buffer, returning the extended buffer
// and the delimiter to use before the next element.
//
// When rv's Kind is neither reflect.Array nor reflect.Slice, it is converted
// using driver.DefaultParameterConverter and the resulting []byte or string
// is double-quoted.
//
// See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO
func appendArrayElement(b []byte, rv reflect.Value) ([]byte, string, error) {
if k := rv.Kind(); k == reflect.Array || k == reflect.Slice {
if t := rv.Type(); t != typeByteSlice && !t.Implements(typeDriverValuer) {
if n := rv.Len(); n > 0 {
return appendArray(b, rv, n)
}

return b, "", nil
}
}

var del = ","
var err error
var iv interface{} = rv.Interface()

if ad, ok := iv.(ArrayDelimiter); ok {
del = ad.ArrayDelimiter()
}

if iv, err = driver.DefaultParameterConverter.ConvertValue(iv); err != nil {
return b, del, err
}

switch v := iv.(type) {
case nil:
return append(b, "NULL"...), del, nil
case []byte:
return appendArrayQuotedBytes(b, v), del, nil
case string:
return appendArrayQuotedBytes(b, []byte(v)), del, nil
}

b, err = appendValue(b, iv)
return b, del, err
}

func appendArrayQuotedBytes(b, v []byte) []byte {
b = append(b, '"')
for {
i := bytes.IndexAny(v, `"\`)
if i < 0 {
b = append(b, v...)
break
}
if i > 0 {
b = append(b, v[:i]...)
}
b = append(b, '\\', v[i])
v = v[i+1:]
}
return append(b, '"')
}

func appendValue(b []byte, v driver.Value) ([]byte, error) {
return append(b, encode(nil, v, 0)...), nil
}

// parseArray extracts the dimensions and elements of an array represented in
// text format. Only representations emitted by the backend are supported.
// Notably, whitespace around brackets and delimiters is significant, and NULL
// is case-sensitive.
//
// See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO
func parseArray(src, del []byte) (dims []int, elems [][]byte, err error) {
var depth, i int

if len(src) < 1 || src[0] != '{' {
return nil, nil, fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '{', 0)
}

Open:
for i < len(src) {
switch src[i] {
case '{':
depth++
i++
case '}':
elems = make([][]byte, 0)
goto Close
default:
break Open
}
}
dims = make([]int, i)

Element:
for i < len(src) {
switch src[i] {
case '{':
if depth == len(dims) {
break Element
}
depth++
dims[depth-1] = 0
i++
case '"':
var elem = []byte{}
var escape bool
for i++; i < len(src); i++ {
if escape {
elem = append(elem, src[i])
escape = false
} else {
switch src[i] {
default:
elem = append(elem, src[i])
case '\\':
escape = true
case '"':
elems = append(elems, elem)
i++
break Element
}
}
}
default:
for start := i; i < len(src); i++ {
if bytes.HasPrefix(src[i:], del) || src[i] == '}' {
elem := src[start:i]
if len(elem) == 0 {
return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i)
}
if bytes.Equal(elem, []byte("NULL")) {
elem = nil
}
elems = append(elems, elem)
break Element
}
}
}
}

for i < len(src) {
if bytes.HasPrefix(src[i:], del) && depth > 0 {
dims[depth-1]++
i += len(del)
goto Element
} else if src[i] == '}' && depth > 0 {
dims[depth-1]++
depth--
i++
} else {
return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i)
}
}

Close:
for i < len(src) {
if src[i] == '}' && depth > 0 {
depth--
i++
} else {
return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i)
}
}
if depth > 0 {
err = fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '}', i)
}
if err == nil {
for _, d := range dims {
if (len(elems) % d) != 0 {
err = fmt.Errorf("pq: multidimensional arrays must have elements with matching dimensions")
}
}
}
return
}

func scanLinearArray(src, del []byte, typ string) (elems [][]byte, err error) {
dims, elems, err := parseArray(src, del)
if err != nil {
return nil, err
}
if len(dims) > 1 {
return nil, fmt.Errorf("pq: cannot convert ARRAY%s to %s", strings.Replace(fmt.Sprint(dims), " ", "][", -1), typ)
}
return elems, err
}

+ 91
- 0
vendor/github.com/lib/pq/buf.go View File

@@ -0,0 +1,91 @@
package pq

import (
"bytes"
"encoding/binary"

"github.com/lib/pq/oid"
)

type readBuf []byte

func (b *readBuf) int32() (n int) {
n = int(int32(binary.BigEndian.Uint32(*b)))
*b = (*b)[4:]
return
}

func (b *readBuf) oid() (n oid.Oid) {
n = oid.Oid(binary.BigEndian.Uint32(*b))
*b = (*b)[4:]
return
}

// N.B: this is actually an unsigned 16-bit integer, unlike int32
func (b *readBuf) int16() (n int) {
n = int(binary.BigEndian.Uint16(*b))
*b = (*b)[2:]
return
}

func (b *readBuf) string() string {
i := bytes.IndexByte(*b, 0)
if i < 0 {
errorf("invalid message format; expected string terminator")
}
s := (*b)[:i]
*b = (*b)[i+1:]
return string(s)
}

func (b *readBuf) next(n int) (v []byte) {
v = (*b)[:n]
*b = (*b)[n:]
return
}

func (b *readBuf) byte() byte {
return b.next(1)[0]
}

type writeBuf struct {
buf []byte
pos int
}

func (b *writeBuf) int32(n int) {
x := make([]byte, 4)
binary.BigEndian.PutUint32(x, uint32(n))
b.buf = append(b.buf, x...)
}

func (b *writeBuf) int16(n int) {
x := make([]byte, 2)
binary.BigEndian.PutUint16(x, uint16(n))
b.buf = append(b.buf, x...)
}

func (b *writeBuf) string(s string) {
b.buf = append(append(b.buf, s...), '\000')
}

func (b *writeBuf) byte(c byte) {
b.buf = append(b.buf, c)
}

func (b *writeBuf) bytes(v []byte) {
b.buf = append(b.buf, v...)
}

func (b *writeBuf) wrap() []byte {
p := b.buf[b.pos:]
binary.BigEndian.PutUint32(p, uint32(len(p)))
return b.buf
}

func (b *writeBuf) next(c byte) {
p := b.buf[b.pos:]
binary.BigEndian.PutUint32(p, uint32(len(p)))
b.pos = len(b.buf) + 1
b.buf = append(b.buf, c, 0, 0, 0, 0)
}

+ 1996
- 0
vendor/github.com/lib/pq/conn.go
File diff suppressed because it is too large
View File


+ 149
- 0
vendor/github.com/lib/pq/conn_go18.go View File

@@ -0,0 +1,149 @@
package pq

import (
"context"
"database/sql"
"database/sql/driver"
"fmt"
"io"
"io/ioutil"
"time"
)

// Implement the "QueryerContext" interface
func (cn *conn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) {
list := make([]driver.Value, len(args))
for i, nv := range args {
list[i] = nv.Value
}
finish := cn.watchCancel(ctx)
r, err := cn.query(query, list)
if err != nil {
if finish != nil {
finish()
}
return nil, err
}
r.finish = finish
return r, nil
}

// Implement the "ExecerContext" interface
func (cn *conn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
list := make([]driver.Value, len(args))
for i, nv := range args {
list[i] = nv.Value
}

if finish := cn.watchCancel(ctx); finish != nil {
defer finish()
}

return cn.Exec(query, list)
}

// Implement the "ConnBeginTx" interface
func (cn *conn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
var mode string

switch sql.IsolationLevel(opts.Isolation) {
case sql.LevelDefault:
// Don't touch mode: use the server's default
case sql.LevelReadUncommitted:
mode = " ISOLATION LEVEL READ UNCOMMITTED"
case sql.LevelReadCommitted:
mode = " ISOLATION LEVEL READ COMMITTED"
case sql.LevelRepeatableRead:
mode = " ISOLATION LEVEL REPEATABLE READ"
case sql.LevelSerializable:
mode = " ISOLATION LEVEL SERIALIZABLE"
default:
return nil, fmt.Errorf("pq: isolation level not supported: %d", opts.Isolation)
}

if opts.ReadOnly {
mode += " READ ONLY"
} else {
mode += " READ WRITE"
}

tx, err := cn.begin(mode)
if err != nil {
return nil, err
}
cn.txnFinish = cn.watchCancel(ctx)
return tx, nil
}

func (cn *conn) Ping(ctx context.Context) error {
if finish := cn.watchCancel(ctx); finish != nil {
defer finish()
}
rows, err := cn.simpleQuery(";")
if err != nil {
return driver.ErrBadConn // https://golang.org/pkg/database/sql/driver/#Pinger
}
rows.Close()
return nil
}

func (cn *conn) watchCancel(ctx context.Context) func() {
if done := ctx.Done(); done != nil {
finished := make(chan struct{})
go func() {
select {
case <-done:
// At this point the function level context is canceled,
// so it must not be used for the additional network
// request to cancel the query.
// Create a new context to pass into the dial.
ctxCancel, cancel := context.WithTimeout(context.Background(), time.Second*10)
defer cancel()

_ = cn.cancel(ctxCancel)
finished <- struct{}{}
case <-finished:
}
}()
return func() {
select {
case <-finished:
case finished <- struct{}{}:
}
}
}
return nil
}

func (cn *conn) cancel(ctx context.Context) error {
c, err := dial(ctx, cn.dialer, cn.opts)
if err != nil {
return err
}
defer c.Close()

{
can := conn{
c: c,
}
err = can.ssl(cn.opts)
if err != nil {
return err
}

w := can.writeBuf(0)
w.int32(80877102) // cancel request code
w.int32(cn.processID)
w.int32(cn.secretKey)

if err := can.sendStartupPacket(w); err != nil {
return err
}
}

// Read until EOF to ensure that the server received the cancel.
{
_, err := io.Copy(ioutil.Discard, c)
return err
}
}

+ 115
- 0
vendor/github.com/lib/pq/connector.go View File

@@ -0,0 +1,115 @@
package pq

import (
"context"
"database/sql/driver"
"errors"
"fmt"
"os"
"strings"
)

// Connector represents a fixed configuration for the pq driver with a given
// name. Connector satisfies the database/sql/driver Connector interface and
// can be used to create any number of DB Conn's via the database/sql OpenDB
// function.
//
// See https://golang.org/pkg/database/sql/driver/#Connector.
// See https://golang.org/pkg/database/sql/#OpenDB.
type Connector struct {
opts values
dialer Dialer
}

// Connect returns a connection to the database using the fixed configuration
// of this Connector. Context is not used.
func (c *Connector) Connect(ctx context.Context) (driver.Conn, error) {
return c.open(ctx)
}

// Driver returnst the underlying driver of this Connector.
func (c *Connector) Driver() driver.Driver {
return &Driver{}
}

// NewConnector returns a connector for the pq driver in a fixed configuration
// with the given dsn. The returned connector can be used to create any number
// of equivalent Conn's. The returned connector is intended to be used with
// database/sql.OpenDB.
//
// See https://golang.org/pkg/database/sql/driver/#Connector.
// See https://golang.org/pkg/database/sql/#OpenDB.
func NewConnector(dsn string) (*Connector, error) {
var err error
o := make(values)

// A number of defaults are applied here, in this order:
//
// * Very low precedence defaults applied in every situation
// * Environment variables
// * Explicitly passed connection information
o["host"] = "localhost"
o["port"] = "5432"
// N.B.: Extra float digits should be set to 3, but that breaks
// Postgres 8.4 and older, where the max is 2.
o["extra_float_digits"] = "2"
for k, v := range parseEnviron(os.Environ()) {
o[k] = v
}

if strings.HasPrefix(dsn, "postgres://") || strings.HasPrefix(dsn, "postgresql://") {
dsn, err = ParseURL(dsn)
if err != nil {
return nil, err
}
}

if err := parseOpts(dsn, o); err != nil {
return nil, err
}

// Use the "fallback" application name if necessary
if fallback, ok := o["fallback_application_name"]; ok {
if _, ok := o["application_name"]; !ok {
o["application_name"] = fallback
}
}

// We can't work with any client_encoding other than UTF-8 currently.
// However, we have historically allowed the user to set it to UTF-8
// explicitly, and there's no reason to break such programs, so allow that.
// Note that the "options" setting could also set client_encoding, but
// parsing its value is not worth it. Instead, we always explicitly send
// client_encoding as a separate run-time parameter, which should override
// anything set in options.
if enc, ok := o["client_encoding"]; ok && !isUTF8(enc) {
return nil, errors.New("client_encoding must be absent or 'UTF8'")
}
o["client_encoding"] = "UTF8"
// DateStyle needs a similar treatment.
if datestyle, ok := o["datestyle"]; ok {
if datestyle != "ISO, MDY" {
return nil, fmt.Errorf("setting datestyle must be absent or %v; got %v", "ISO, MDY", datestyle)
}
} else {
o["datestyle"] = "ISO, MDY"
}

// If a user is not provided by any other means, the last
// resort is to use the current operating system provided user
// name.
if _, ok := o["user"]; !ok {
u, err := userCurrent()
if err != nil {
return nil, err
}
o["user"] = u
}

// SSL is not necessary or supported over UNIX domain sockets
if network, _ := network(o); network == "unix" {
o["sslmode"] = "disable"
}

return &Connector{opts: o, dialer: defaultDialer{}}, nil
}

+ 284
- 0
vendor/github.com/lib/pq/copy.go View File

@@ -0,0 +1,284 @@
package pq

import (
"database/sql/driver"
"encoding/binary"
"errors"
"fmt"
"sync"
)

var (
errCopyInClosed = errors.New("pq: copyin statement has already been closed")
errBinaryCopyNotSupported = errors.New("pq: only text format supported for COPY")
errCopyToNotSupported = errors.New("pq: COPY TO is not supported")
errCopyNotSupportedOutsideTxn = errors.New("pq: COPY is only allowed inside a transaction")
errCopyInProgress = errors.New("pq: COPY in progress")
)

// CopyIn creates a COPY FROM statement which can be prepared with
// Tx.Prepare(). The target table should be visible in search_path.
func CopyIn(table string, columns ...string) string {
stmt := "COPY " + QuoteIdentifier(table) + " ("
for i, col := range columns {
if i != 0 {
stmt += ", "
}
stmt += QuoteIdentifier(col)
}
stmt += ") FROM STDIN"
return stmt
}

// CopyInSchema creates a COPY FROM statement which can be prepared with
// Tx.Prepare().
func CopyInSchema(schema, table string, columns ...string) string {
stmt := "COPY " + QuoteIdentifier(schema) + "." + QuoteIdentifier(table) + " ("
for i, col := range columns {
if i != 0 {
stmt += ", "
}
stmt += QuoteIdentifier(col)
}
stmt += ") FROM STDIN"
return stmt
}

type copyin struct {
cn *conn
buffer []byte
rowData chan []byte
done chan bool

closed bool

sync.Mutex // guards err
err error
}

const ciBufferSize = 64 * 1024

// flush buffer before the buffer is filled up and needs reallocation
const ciBufferFlushSize = 63 * 1024

func (cn *conn) prepareCopyIn(q string) (_ driver.Stmt, err error) {
if !cn.isInTransaction() {
return nil, errCopyNotSupportedOutsideTxn
}

ci := &copyin{
cn: cn,
buffer: make([]byte, 0, ciBufferSize),
rowData: make(chan []byte),
done: make(chan bool, 1),
}
// add CopyData identifier + 4 bytes for message length
ci.buffer = append(ci.buffer, 'd', 0, 0, 0, 0)

b := cn.writeBuf('Q')
b.string(q)
cn.send(b)

awaitCopyInResponse:
for {
t, r := cn.recv1()
switch t {
case 'G':
if r.byte() != 0 {
err = errBinaryCopyNotSupported
break awaitCopyInResponse
}
go ci.resploop()
return ci, nil
case 'H':
err = errCopyToNotSupported
break awaitCopyInResponse
case 'E':
err = parseError(r)
case 'Z':
if err == nil {
ci.setBad()
errorf("unexpected ReadyForQuery in response to COPY")
}
cn.processReadyForQuery(r)
return nil, err
default:
ci.setBad()
errorf("unknown response for copy query: %q", t)
}
}

// something went wrong, abort COPY before we return
b = cn.writeBuf('f')
b.string(err.Error())
cn.send(b)

for {
t, r := cn.recv1()
switch t {
case 'c', 'C', 'E':
case 'Z':
// correctly aborted, we're done
cn.processReadyForQuery(r)
return nil, err
default:
ci.setBad()
errorf("unknown response for CopyFail: %q", t)
}
}
}

func (ci *copyin) flush(buf []byte) {
// set message length (without message identifier)
binary.BigEndian.PutUint32(buf[1:], uint32(len(buf)-1))

_, err := ci.cn.c.Write(buf)
if err != nil {
panic(err)
}
}

func (ci *copyin) resploop() {
for {
var r readBuf
t, err := ci.cn.recvMessage(&r)
if err != nil {
ci.setBad()
ci.setError(err)
ci.done <- true
return
}
switch t {
case 'C':
// complete
case 'N':
if n := ci.cn.noticeHandler; n != nil {
n(parseError(&r))
}
case 'Z':
ci.cn.processReadyForQuery(&r)
ci.done <- true
return
case 'E':
err := parseError(&r)
ci.setError(err)
default:
ci.setBad()
ci.setError(fmt.Errorf("unknown response during CopyIn: %q", t))
ci.done <- true
return
}
}
}

func (ci *copyin) setBad() {
ci.Lock()
ci.cn.bad = true
ci.Unlock()
}

func (ci *copyin) isBad() bool {
ci.Lock()
b := ci.cn.bad
ci.Unlock()
return b
}

func (ci *copyin) isErrorSet() bool {
ci.Lock()
isSet := (ci.err != nil)
ci.Unlock()
return isSet
}

// setError() sets ci.err if one has not been set already. Caller must not be
// holding ci.Mutex.
func (ci *copyin) setError(err error) {
ci.Lock()
if ci.err == nil {
ci.err = err
}
ci.Unlock()
}

func (ci *copyin) NumInput() int {
return -1
}

func (ci *copyin) Query(v []driver.Value) (r driver.Rows, err error) {
return nil, ErrNotSupported
}

// Exec inserts values into the COPY stream. The insert is asynchronous
// and Exec can return errors from previous Exec calls to the same
// COPY stmt.
//
// You need to call Exec(nil) to sync the COPY stream and to get any
// errors from pending data, since Stmt.Close() doesn't return errors
// to the user.
func (ci *copyin) Exec(v []driver.Value) (r driver.Result, err error) {
if ci.closed {
return nil, errCopyInClosed
}

if ci.isBad() {
return nil, driver.ErrBadConn
}
defer ci.cn.errRecover(&err)

if ci.isErrorSet() {
return nil, ci.err
}

if len(v) == 0 {
return driver.RowsAffected(0), ci.Close()
}

numValues := len(v)
for i, value := range v {
ci.buffer = appendEncodedText(&ci.cn.parameterStatus, ci.buffer, value)
if i < numValues-1 {
ci.buffer = append(ci.buffer, '\t')
}
}

ci.buffer = append(ci.buffer, '\n')

if len(ci.buffer) > ciBufferFlushSize {
ci.flush(ci.buffer)
// reset buffer, keep bytes for message identifier and length
ci.buffer = ci.buffer[:5]
}

return driver.RowsAffected(0), nil
}

func (ci *copyin) Close() (err error) {
if ci.closed { // Don't do anything, we're already closed
return nil
}
ci.closed = true

if ci.isBad() {
return driver.ErrBadConn
}
defer ci.cn.errRecover(&err)

if len(ci.buffer) > 0 {
ci.flush(ci.buffer)
}
// Avoid touching the scratch buffer as resploop could be using it.
err = ci.cn.sendSimpleMessage('c')
if err != nil {
return err
}

<-ci.done
ci.cn.inCopy = false

if ci.isErrorSet() {
err = ci.err
return err
}
return nil
}

+ 263
- 0
vendor/github.com/lib/pq/doc.go View File

@@ -0,0 +1,263 @@
/*
Package pq is a pure Go Postgres driver for the database/sql package.

In most cases clients will use the database/sql package instead of
using this package directly. For example:

import (
"database/sql"

_ "github.com/lib/pq"
)

func main() {
connStr := "user=pqgotest dbname=pqgotest sslmode=verify-full"
db, err := sql.Open("postgres", connStr)
if err != nil {
log.Fatal(err)
}

age := 21
rows, err := db.Query("SELECT name FROM users WHERE age = $1", age)
}

You can also connect to a database using a URL. For example:

connStr := "postgres://pqgotest:password@localhost/pqgotest?sslmode=verify-full"
db, err := sql.Open("postgres", connStr)


Connection String Parameters


Similarly to libpq, when establishing a connection using pq you are expected to
supply a connection string containing zero or more parameters.
A subset of the connection parameters supported by libpq are also supported by pq.
Additionally, pq also lets you specify run-time parameters (such as search_path or work_mem)
directly in the connection string. This is different from libpq, which does not allow
run-time parameters in the connection string, instead requiring you to supply
them in the options parameter.

For compatibility with libpq, the following special connection parameters are
supported:

* dbname - The name of the database to connect to
* user - The user to sign in as
* password - The user's password
* host - The host to connect to. Values that start with / are for unix
domain sockets. (default is localhost)
* port - The port to bind to. (default is 5432)
* sslmode - Whether or not to use SSL (default is require, this is not
the default for libpq)
* fallback_application_name - An application_name to fall back to if one isn't provided.
* connect_timeout - Maximum wait for connection, in seconds. Zero or
not specified means wait indefinitely.
* sslcert - Cert file location. The file must contain PEM encoded data.
* sslkey - Key file location. The file must contain PEM encoded data.
* sslrootcert - The location of the root certificate file. The file
must contain PEM encoded data.
* spn - Configures GSS (Kerberos) SPN.
* service - GSS (Kerberos) service name to use when constructing the SPN (default is `postgres`).

Valid values for sslmode are:

* disable - No SSL
* require - Always SSL (skip verification)
* verify-ca - Always SSL (verify that the certificate presented by the
server was signed by a trusted CA)
* verify-full - Always SSL (verify that the certification presented by
the server was signed by a trusted CA and the server host name
matches the one in the certificate)

See http://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNSTRING
for more information about connection string parameters.

Use single quotes for values that contain whitespace:

"user=pqgotest password='with spaces'"

A backslash will escape the next character in values:

"user=space\ man password='it\'s valid'"

Note that the connection parameter client_encoding (which sets the
text encoding for the connection) may be set but must be "UTF8",
matching with the same rules as Postgres. It is an error to provide
any other value.

In addition to the parameters listed above, any run-time parameter that can be
set at backend start time can be set in the connection string. For more
information, see
http://www.postgresql.org/docs/current/static/runtime-config.html.

Most environment variables as specified at http://www.postgresql.org/docs/current/static/libpq-envars.html
supported by libpq are also supported by pq. If any of the environment
variables not supported by pq are set, pq will panic during connection
establishment. Environment variables have a lower precedence than explicitly
provided connection parameters.

The pgpass mechanism as described in http://www.postgresql.org/docs/current/static/libpq-pgpass.html
is supported, but on Windows PGPASSFILE must be specified explicitly.


Queries


database/sql does not dictate any specific format for parameter
markers in query strings, and pq uses the Postgres-native ordinal markers,
as shown above. The same marker can be reused for the same parameter:

rows, err := db.Query(`SELECT name FROM users WHERE favorite_fruit = $1
OR age BETWEEN $2 AND $2 + 3`, "orange", 64)

pq does not support the LastInsertId() method of the Result type in database/sql.
To return the identifier of an INSERT (or UPDATE or DELETE), use the Postgres
RETURNING clause with a standard Query or QueryRow call:

var userid int
err := db.QueryRow(`INSERT INTO users(name, favorite_fruit, age)
VALUES('beatrice', 'starfruit', 93) RETURNING id`).Scan(&userid)

For more details on RETURNING, see the Postgres documentation:

http://www.postgresql.org/docs/current/static/sql-insert.html
http://www.postgresql.org/docs/current/static/sql-update.html
http://www.postgresql.org/docs/current/static/sql-delete.html

For additional instructions on querying see the documentation for the database/sql package.


Data Types


Parameters pass through driver.DefaultParameterConverter before they are handled
by this package. When the binary_parameters connection option is enabled,
[]byte values are sent directly to the backend as data in binary format.

This package returns the following types for values from the PostgreSQL backend:

- integer types smallint, integer, and bigint are returned as int64
- floating-point types real and double precision are returned as float64
- character types char, varchar, and text are returned as string
- temporal types date, time, timetz, timestamp, and timestamptz are
returned as time.Time
- the boolean type is returned as bool
- the bytea type is returned as []byte

All other types are returned directly from the backend as []byte values in text format.


Errors


pq may return errors of type *pq.Error which can be interrogated for error details:

if err, ok := err.(*pq.Error); ok {
fmt.Println("pq error:", err.Code.Name())
}

See the pq.Error type for details.


Bulk imports

You can perform bulk imports by preparing a statement returned by pq.CopyIn (or
pq.CopyInSchema) in an explicit transaction (sql.Tx). The returned statement
handle can then be repeatedly "executed" to copy data into the target table.
After all data has been processed you should call Exec() once with no arguments
to flush all buffered data. Any call to Exec() might return an error which
should be handled appropriately, but because of the internal buffering an error
returned by Exec() might not be related to the data passed in the call that
failed.

CopyIn uses COPY FROM internally. It is not possible to COPY outside of an
explicit transaction in pq.

Usage example:

txn, err := db.Begin()
if err != nil {
log.Fatal(err)
}

stmt, err := txn.Prepare(pq.CopyIn("users", "name", "age"))
if err != nil {
log.Fatal(err)
}

for _, user := range users {
_, err = stmt.Exec(user.Name, int64(user.Age))
if err != nil {
log.Fatal(err)
}
}

_, err = stmt.Exec()
if err != nil {
log.Fatal(err)
}

err = stmt.Close()
if err != nil {
log.Fatal(err)
}

err = txn.Commit()
if err != nil {
log.Fatal(err)
}


Notifications


PostgreSQL supports a simple publish/subscribe model over database
connections. See http://www.postgresql.org/docs/current/static/sql-notify.html
for more information about the general mechanism.

To start listening for notifications, you first have to open a new connection
to the database by calling NewListener. This connection can not be used for
anything other than LISTEN / NOTIFY. Calling Listen will open a "notification
channel"; once a notification channel is open, a notification generated on that
channel will effect a send on the Listener.Notify channel. A notification
channel will remain open until Unlisten is called, though connection loss might
result in some notifications being lost. To solve this problem, Listener sends
a nil pointer over the Notify channel any time the connection is re-established
following a connection loss. The application can get information about the
state of the underlying connection by setting an event callback in the call to
NewListener.

A single Listener can safely be used from concurrent goroutines, which means
that there is often no need to create more than one Listener in your
application. However, a Listener is always connected to a single database, so
you will need to create a new Listener instance for every database you want to
receive notifications in.

The channel name in both Listen and Unlisten is case sensitive, and can contain
any characters legal in an identifier (see
http://www.postgresql.org/docs/current/static/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS
for more information). Note that the channel name will be truncated to 63
bytes by the PostgreSQL server.

You can find a complete, working example of Listener usage at
https://godoc.org/github.com/lib/pq/example/listen.


Kerberos Support


If you need support for Kerberos authentication, add the following to your main
package:

import "github.com/lib/pq/auth/kerberos"

func init() {
pq.RegisterGSSProvider(func() (pq.Gss, error) { return kerberos.NewGSS() })
}

This package is in a separate module so that users who don't need Kerberos
don't have to download unnecessary dependencies.

*/
package pq

+ 622
- 0
vendor/github.com/lib/pq/encode.go View File

@@ -0,0 +1,622 @@
package pq

import (
"bytes"
"database/sql/driver"
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"math"
"regexp"
"strconv"
"strings"
"sync"
"time"

"github.com/lib/pq/oid"
)

var time2400Regex = regexp.MustCompile(`^(24:00(?::00(?:\.0+)?)?)(?:[Z+-].*)?$`)

func binaryEncode(parameterStatus *parameterStatus, x interface{}) []byte {
switch v := x.(type) {
case []byte:
return v
default:
return encode(parameterStatus, x, oid.T_unknown)
}
}

func encode(parameterStatus *parameterStatus, x interface{}, pgtypOid oid.Oid) []byte {
switch v := x.(type) {
case int64:
return strconv.AppendInt(nil, v, 10)
case float64:
return strconv.AppendFloat(nil, v, 'f', -1, 64)
case []byte:
if pgtypOid == oid.T_bytea {
return encodeBytea(parameterStatus.serverVersion, v)
}

return v
case string:
if pgtypOid == oid.T_bytea {
return encodeBytea(parameterStatus.serverVersion, []byte(v))
}

return []byte(v)
case bool:
return strconv.AppendBool(nil, v)
case time.Time:
return formatTs(v)

default:
errorf("encode: unknown type for %T", v)
}

panic("not reached")
}

func decode(parameterStatus *parameterStatus, s []byte, typ oid.Oid, f format) interface{} {
switch f {
case formatBinary:
return binaryDecode(parameterStatus, s, typ)
case formatText:
return textDecode(parameterStatus, s, typ)
default:
panic("not reached")
}
}

func binaryDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} {
switch typ {
case oid.T_bytea:
return s
case oid.T_int8:
return int64(binary.BigEndian.Uint64(s))
case oid.T_int4:
return int64(int32(binary.BigEndian.Uint32(s)))