(Engine): Database system improvements (#358)

* Migrated to goose & sqlboiler

* create tests with sqlboiler

* code clean up

* Added gct -> sqlboiler config gen

* dropped pgx support

* dropped pgx support because who needs connection pools

* reenable sqlite audit tests

* first pass of migration changes

* stuff is broken :D

* sqlboiler :D

* end of date commit

* Added comments code clean up

* revert go module files back to upstream

* bug fix

* pushed go.mod update to use correc goose version

* renamed sqlite to sqlite3 for consistency across codebase and PR feedback changes

* makefile updates

* things are broken end of day commit

* added postgresql test

* use correct database name

* travis fixes for env vars

* travis fixes for env vars

* test fixes

* run migration on test setup

* test adding postgres support to appveyor

* Skip tests on appveyor due to issues with missing binaries

* oh yeah i have to support windows don't i

* bumped goose version up

* add postgres to osx

* fix travis config as osx does not support services move spin up to before_script

* added PGDATA path fix

* pass PG_DATA to pg_ctl

* added initdb to before install

* fixes to wording and bumps up goose version

* who needs ssl anyway

* moved ssl to correct section :D

* bumped goose version up

* unbreak travis

* unbreak travis

* fix if database is disabled in config

* move strings to consts

* converted more strings to const

* improvements to sqlboiler mmodel gen

* Added contrib\sqlboiler file

* sqlboiler windows contrib fixes

* bumped goose version up

* :D whoops

* further fixes to sql models

* further fixes to sql models

* database type fix for config gen

* README update

* go.mod clean up

* added config details for appveyor

* appveyor ordering fix

* force psql9.6

* appveyor config changes

* all the environmen vars

* model changes for psql

* model changes for psql

* sqlite model fixes

* attempt at osx fix

* added error check for migration

* typos and check against goose error instead of string :D

* updated sqlboiler commit id

* bump sqlboiler version again

* set decimal package to @0bb1631

* readme and makefile updates

* bump goose version update readme and add override flag to config gen

* README typo fix and lowered inserts in test down to 20 as we are only testing that inserts work running 200 was unnecessary

* added gctcli command for audit event

* Added debug output toggle to config added both postgres & sqlite support to gctcli command

* Wording changes on errors

* set sqlite to 1 connection to stop locke database issues

* Usage update for order

* README updates with config examples

* go.mod/sum tidy

* removed lines in import second

* removed lines in imports

* convert local time to utc for database and display output

* go mod clean up and error checking to time

* renamed all packages to sqlite3

* added windows command output for sql model gen

* time conversion fix

* time conversion on gctcli
This commit is contained in:
Andrew
2019-10-08 15:28:31 +11:00
committed by Adrian Gallagher
parent 2a13551dd1
commit 92147cdc5f
66 changed files with 6018 additions and 2745 deletions

View File

@@ -13,15 +13,35 @@ environment:
GO111MODULE: on
NODEJS_VER: 10.15.3
APPVEYOR_SAVE_CACHE_ON_ERROR: true
POSTGRES_PATH: C:\Program Files\PostgreSQL\9.6
PGUSER: postgres
PGPASSWORD: Password12!
POSTGRES_ENV_POSTGRES_USER: postgres
POSTGRES_ENV_POSTGRES_PASSWORD: Password12!
POSTGRES_ENV_POSTGRES_DB: gct_dev_ci
PSQL_USER: postgres
PSQL_HOST: localhost
PSQL_PASS: Password12!
PSQL_DBNAME: gct_dev_ci
PSQL_SSLMODE: disable
stack: go 1.12.3
services:
- postgresql96
init:
- SET PATH=%POSTGRES_PATH%\bin;%PATH%
install:
- set Path=C:\mingw-w64\x86_64-8.1.0-posix-seh-rt_v6-rev0\mingw64\bin;%Path%
- ps: Install-Product node $env:NODEJS_VER
- cd c:\gopath\src\github.com\thrasher-corp\gocryptotrader\web
- npm install
build_script:
- createdb gct_dev_ci
before_test:
- cd c:\gopath\src\github.com\thrasher-corp\gocryptotrader
- go get

2
.gitignore vendored
View File

@@ -25,3 +25,5 @@ gocryptotrader
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
sqlboiler.toml
sqlboiler.json

View File

@@ -13,7 +13,6 @@ matrix:
script:
- npm run lint
- npm run build
- language: go
dist: xenial
name: 'GoCryptoTrader [back-end] [linux]'
@@ -21,11 +20,17 @@ matrix:
- 1.13.x
env:
- GO111MODULE=on
- PSQL_USER=postgres
- PSQL_HOST=localhost
- PSQL_DBNAME=gct_dev_ci
install: true
cache:
directories:
- $GOPATH/pkg/mod
services:
- postgresql
before_script:
- psql -c 'create database gct_dev_ci;' -U postgres
script:
- make check
after_success:
@@ -38,11 +43,22 @@ matrix:
- 1.13.x
env:
- GO111MODULE=on
- PSQL_USER=postgres
- PSQL_HOST=localhost
- PSQL_DBNAME=gct_dev_ci
- PSQL_SSLMODE=disable
- PSQL_SKIPSQLCMD=true
- PSQL_TESTDBNAME=gct_dev_ci
install: true
cache:
directories:
- $GOPATH/pkg/mod
before_install:
- rm -rf /usr/local/var/postgres
- initdb /usr/local/var/postgres
- pg_ctl start --pgdata /usr/local/var/postgres
- createuser -s postgres
- psql -c 'create database gct_dev_ci;' -U postgres
script:
- make check
after_success:

View File

@@ -5,6 +5,7 @@ LINTBIN = $(GOPATH)/bin/golangci-lint
GCTLISTENPORT=9050
GCTPROFILERLISTENPORT=8085
CRON = $(TRAVIS_EVENT_TYPE)
DRIVER ?= psql
get:
GO111MODULE=on go get $(GCTPKG)
@@ -46,5 +47,9 @@ profile_heap:
profile_cpu:
go tool pprof -http "localhost:$(GCTPROFILERLISTENPORT)" 'http://localhost:$(GCTLISTENPORT)/debug/pprof/profile'
db_migrate:
go run ./cmd/dbmigrate
gen_db_models:
ifeq ($(DRIVER), psql)
sqlboiler -o database/models/postgres -p postgres --no-auto-timestamps --wipe $(DRIVER)
else
sqlboiler -o database/models/sqlite3 -p sqlite3 --no-auto-timestamps --wipe $(DRIVER)
endif

View File

@@ -1,74 +1,46 @@
package main
import (
"errors"
"flag"
"fmt"
"os"
"path/filepath"
"runtime"
"strconv"
"time"
"github.com/thrasher-corp/gocryptotrader/common"
"github.com/thrasher-corp/gocryptotrader/config"
"github.com/thrasher-corp/gocryptotrader/core"
"github.com/thrasher-corp/gocryptotrader/database"
db "github.com/thrasher-corp/gocryptotrader/database/drivers/postgres"
dbsqlite3 "github.com/thrasher-corp/gocryptotrader/database/drivers/sqlite"
mg "github.com/thrasher-corp/gocryptotrader/database/migration"
dbPSQL "github.com/thrasher-corp/gocryptotrader/database/drivers/postgres"
dbsqlite3 "github.com/thrasher-corp/gocryptotrader/database/drivers/sqlite3"
"github.com/thrasher-corp/gocryptotrader/database/repository"
"github.com/thrasher-corp/goose"
)
var (
dbConn *database.Database
configFile string
defaultDataDir string
createMigration string
migrationDir string
dbConn *database.Db
configFile string
defaultDataDir string
migrationDir string
command string
args string
)
var defaultMigration = []byte(`-- up
-- down
`)
func openDbConnection(driver string) (err error) {
if driver == "postgres" {
dbConn, err = db.Connect()
if driver == database.DBPostgreSQL {
dbConn, err = dbPSQL.Connect()
if err != nil {
return fmt.Errorf("database failed to connect: %v Some features that utilise a database will be unavailable", err)
}
dbConn.SQL.SetMaxOpenConns(2)
dbConn.SQL.SetMaxIdleConns(1)
dbConn.SQL.SetConnMaxLifetime(time.Hour)
} else if driver == "sqlite" {
return nil
} else if driver == database.DBSQLite || driver == database.DBSQLite3 {
dbConn, err = dbsqlite3.Connect()
if err != nil {
return fmt.Errorf("database failed to connect: %v Some features that utilise a database will be unavailable", err)
}
return nil
}
return nil
}
type tmpLogger struct{}
// Printf implantation of migration Logger interface
// Passes directly to Printf from fmt package
func (t tmpLogger) Printf(format string, v ...interface{}) {
fmt.Printf(format, v...)
}
// Println implantation of migration Logger interface
// Passes directly to Println from fmt package
func (t tmpLogger) Println(v ...interface{}) {
fmt.Println(v...)
}
// Errorf implantation of migration Logger interface
// Passes directly to Printf from fmt package
func (t tmpLogger) Errorf(format string, v ...interface{}) {
fmt.Printf(format, v...)
return errors.New("no connection established")
}
func main() {
@@ -82,35 +54,15 @@ func main() {
os.Exit(1)
}
flag.StringVar(&command, "command", "", "command to run status|up|up-by-one|up-to|down|create")
flag.StringVar(&args, "args", "", "arguments to pass to goose")
flag.StringVar(&configFile, "config", defaultPath, "config file to load")
flag.StringVar(&defaultDataDir, "datadir", common.GetDefaultDataDir(runtime.GOOS), "default data directory for GoCryptoTrader files")
flag.StringVar(&createMigration, "create", "", "create a new empty migration file")
flag.StringVar(&migrationDir, "migrationdir", mg.MigrationDir, "override migration folder")
flag.StringVar(&migrationDir, "migrationdir", database.MigrationDir, "override migration folder")
flag.Parse()
if createMigration != "" {
err = newMigrationFile(createMigration)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
fmt.Println("Migration created successfully")
os.Exit(0)
}
tempLogger := tmpLogger{}
temp := mg.Migrator{
Log: tempLogger,
}
err = temp.LoadMigrations()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
conf := config.GetConfig()
err = conf.LoadConfig(configFile, true)
@@ -119,49 +71,32 @@ func main() {
os.Exit(1)
}
if !conf.Database.Enabled {
fmt.Println("Database support is disabled")
os.Exit(1)
}
err = openDbConnection(conf.Database.Driver)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
fmt.Printf("Connected to: %s\n", conf.Database.Host)
drv := repository.GetSQLDialect()
temp.Conn = dbConn
if drv == database.DBSQLite || drv == database.DBSQLite3 {
fmt.Printf("Database file: %s\n", conf.Database.Database)
} else {
fmt.Printf("Connected to: %s\n", conf.Database.Host)
}
err = temp.RunMigration()
if err != nil {
if command == "" {
_ = goose.Run("status", dbConn.SQL, drv, migrationDir, "")
fmt.Println()
flag.Usage()
return
}
if err = goose.Run(command, dbConn.SQL, drv, migrationDir, args); err != nil {
fmt.Println(err)
os.Exit(1)
}
if dbConn.SQL != nil {
err = dbConn.SQL.Close()
if err != nil {
fmt.Println(err)
}
}
}
func newMigrationFile(filename string) error {
curTime := strconv.FormatInt(time.Now().Unix(), 10)
path := filepath.Join(migrationDir, curTime+"_"+filename+".sql")
err := common.CreateDir(migrationDir)
if err != nil {
return err
}
fmt.Printf("Creating new empty migration: %v\n", path)
f, err := os.Create(path)
if err != nil {
return err
}
_, err = f.Write(defaultMigration)
if err != nil {
return err
}
return f.Close()
}

View File

@@ -9,6 +9,7 @@ import (
"runtime"
"strconv"
"strings"
"time"
"github.com/thrasher-corp/gocryptotrader/common"
"github.com/thrasher-corp/gocryptotrader/currency"
@@ -2805,3 +2806,112 @@ func clearScreen() error {
return cmd.Run()
}
}
const timeFormat = "2006-01-02 15:04:05"
var startTime, endTime, order string
var limit int
var getAuditEventCommand = cli.Command{
Name: "getauditevent",
Usage: "gets audit events matching query parameters",
ArgsUsage: "<starttime> <endtime> <orderby> <limit>",
Action: getAuditEvent,
Flags: []cli.Flag{
cli.StringFlag{
Name: "start, s",
Usage: "start date to search",
Value: time.Now().Add(-time.Hour).Format(timeFormat),
Destination: &startTime,
},
cli.StringFlag{
Name: "end, e",
Usage: "end time to search",
Value: time.Now().Format(timeFormat),
Destination: &endTime,
},
cli.StringFlag{
Name: "order, o",
Usage: "order results by ascending/descending",
Value: "asc",
Destination: &order,
},
cli.IntFlag{
Name: "limit, l",
Usage: "how many results to retrieve",
Value: 100,
Destination: &limit,
},
},
}
func getAuditEvent(c *cli.Context) error {
if !c.IsSet("start") {
if c.Args().Get(0) != "" {
startTime = c.Args().Get(0)
}
}
if !c.IsSet("end") {
if c.Args().Get(1) != "" {
endTime = c.Args().Get(1)
}
}
if !c.IsSet("order") {
if c.Args().Get(2) != "" {
order = c.Args().Get(2)
}
}
if !c.IsSet("limit") {
if c.Args().Get(3) != "" {
limitStr, err := strconv.ParseInt(c.Args().Get(3), 10, 32)
if err == nil {
limit = int(limitStr)
}
}
}
s, err := time.Parse(timeFormat, startTime)
if err != nil {
return fmt.Errorf("invalid time format for start: %v", err)
}
e, err := time.Parse(timeFormat, endTime)
if err != nil {
return fmt.Errorf("invalid time format for end: %v", err)
}
if e.Before(s) {
return errors.New("start cannot be after before")
}
conn, err := setupClient()
if err != nil {
return err
}
defer conn.Close()
client := gctrpc.NewGoCryptoTraderClient(conn)
_, offset := time.Now().Zone()
loc := time.FixedZone("", -offset)
result, err := client.GetAuditEvent(context.Background(),
&gctrpc.GetAuditEventRequest{
StartDate: s.In(loc).Format(timeFormat),
EndDate: e.In(loc).Format(timeFormat),
Limit: int32(limit),
OrderBy: order,
Offset: int32(offset),
})
if err != nil {
return err
}
jsonOutput(result)
return nil
}

View File

@@ -132,6 +132,7 @@ func main() {
getExchangeOrderbookStreamCommand,
getTickerStreamCommand,
getExchangeTickerStreamCommand,
getAuditEventCommand,
}
err := app.Run(os.Args)

View File

@@ -0,0 +1,107 @@
package main
import (
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"github.com/thrasher-corp/gocryptotrader/common"
"github.com/thrasher-corp/gocryptotrader/config"
"github.com/thrasher-corp/gocryptotrader/core"
"github.com/thrasher-corp/gocryptotrader/database"
"github.com/thrasher-corp/gocryptotrader/database/repository"
)
var (
configFile string
defaultDataDir string
outputFolder string
)
var sqlboilerConfig map[string]driverConfig
type driverConfig struct {
DBName string `json:"dbname,omitempty"`
Host string `json:"host,omitempty"`
Port uint16 `json:"port,omitempty"`
User string `json:"user,omitempty"`
Pass string `json:"pass,omitempty"`
Schema string `json:"schema,omitempty"`
SSLMode string `json:"sslmode,omitempty"`
Blacklist []string `json:"blacklist,omitempty"`
}
func main() {
fmt.Println("GoCryptoTrader SQLBoiler config generation tool")
fmt.Println(core.Copyright)
fmt.Println()
defaultPath, err := config.GetFilePath("")
if err != nil {
fmt.Println(err)
os.Exit(1)
}
flag.StringVar(&configFile, "config", defaultPath, "config file to load")
flag.StringVar(&defaultDataDir, "datadir", common.GetDefaultDataDir(runtime.GOOS), "default data directory for GoCryptoTrader files")
flag.StringVar(&outputFolder, "outdir", "", "overwrite default output folder")
flag.Parse()
conf := config.GetConfig()
err = conf.LoadConfig(configFile, true)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
convertGCTtoSQLBoilerConfig(&conf.Database)
jsonOutput, err := json.MarshalIndent(sqlboilerConfig, "", " ")
if err != nil {
fmt.Printf("Marshal failed: %v", err)
os.Exit(1)
}
path := filepath.Join(outputFolder, "sqlboiler.json")
err = ioutil.WriteFile(path, jsonOutput, 0644)
if err != nil {
fmt.Printf("Write failed: %v", err)
os.Exit(1)
}
fmt.Println("sqlboiler.json file created")
}
func convertGCTtoSQLBoilerConfig(c *database.Config) {
tempConfig := driverConfig{
Blacklist: []string{"goose_db_version"},
}
sqlboilerConfig = make(map[string]driverConfig)
dbType := repository.GetSQLDialect()
if dbType == database.DBPostgreSQL {
dbType = "psql"
}
if dbType == database.DBSQLite || dbType == database.DBSQLite3 {
tempConfig.DBName = convertDBName(c.Database)
} else {
tempConfig.User = c.Username
tempConfig.Pass = c.Password
tempConfig.Port = c.Port
tempConfig.Host = c.Host
tempConfig.DBName = c.Database
tempConfig.SSLMode = c.SSLMode
}
sqlboilerConfig[dbType] = tempConfig
}
func convertDBName(in string) string {
return filepath.Join(common.GetDefaultDataDir(runtime.GOOS), "/database", in)
}

View File

@@ -8,7 +8,7 @@ tenets:
- import: codelingo/effective-go/unnecessary-else
- import: codelingo/code-review-comments/declare-empty-slice
- import: codelingo/effective-go/defer-close-file
- import: codelingo/effective-go/comment-first-word-when-empty
# - import: codelingo/effective-go/comment-first-word-when-empty # this has been disabled temporarily
- name: missing-stop-ticker
actions:
codelingo/review:

View File

@@ -1293,7 +1293,7 @@ func (c *Config) checkDatabaseConfig() error {
defer m.Unlock()
if (c.Database == database.Config{}) {
c.Database.Driver = "sqlite"
c.Database.Driver = database.DBSQLite3
c.Database.Database = database.DefaultSQLiteDatabase
}
@@ -1306,16 +1306,16 @@ func (c *Config) checkDatabaseConfig() error {
return fmt.Errorf("unsupported database driver %v, database disabled", c.Database.Driver)
}
if c.Database.Driver == "sqlite" {
if c.Database.Driver == database.DBSQLite || c.Database.Driver == database.DBSQLite3 {
databaseDir := filepath.Join(common.GetDefaultDataDir(runtime.GOOS), "/database")
err := common.CreateDir(databaseDir)
if err != nil {
return err
}
database.Conn.DataPath = databaseDir
database.DB.DataPath = databaseDir
}
database.Conn.Config = &c.Database
database.DB.Config = &c.Database
return nil
}

View File

@@ -1821,7 +1821,7 @@ func TestCheckDatabaseConfig(t *testing.T) {
t.Error(err)
}
if c.Database.Driver != "sqlite" ||
if c.Database.Driver != database.DBSQLite3 ||
c.Database.Database != database.DefaultSQLiteDatabase ||
c.Database.Enabled {
t.Error("unexpected results")
@@ -1833,7 +1833,7 @@ func TestCheckDatabaseConfig(t *testing.T) {
t.Error("unexpected result")
}
c.Database.Driver = "sqlite"
c.Database.Driver = database.DBSQLite3
c.Database.Enabled = true
if err := c.checkDatabaseConfig(); err != nil {
t.Error(err)

20
contrib/sqlboiler.cmd Normal file
View File

@@ -0,0 +1,20 @@
@echo off
title GoCryptoTrader Database Model Generation
IF NOT DEFINED GOPATH (
echo "GOPATH not set"
exit
)
IF NOT DEFINED DRIVER (
SET DRIVER=psql
)
IF %DRIVER%==psql (
IF NOT DEFINED MODEL (SET MODEL=postgres)
) ELSE (
IF NOT DEFINED MODEL (SET MODEL=sqlite3)
)
cd ..\
start %GOPATH%\\bin\\sqlboiler -o database\\models\\%MODEL% -p %MODEL% --no-auto-timestamps --wipe %DRIVER%
pause

View File

@@ -804,7 +804,7 @@ var (
STQ = NewCode("STQ")
INK = NewCode("INK")
HBZ = NewCode("HBZ")
USDT_ETH = NewCode("USDT_ETH") // nolint: golint
USDT_ETH = NewCode("USDT_ETH") // nolint: golint,stylecheck
QTUM_ETH = NewCode("QTUM_ETH") // nolint: golint
BTM_ETH = NewCode("BTM_ETH") // nolint: golint
FIL = NewCode("FIL")

View File

@@ -14,46 +14,135 @@ This database package is part of the GoCryptoTrader codebase.
## This is still in active development
You can track ideas, planned features and what's in progresss on this Trello board: [https://trello.com/b/ZAhMhpOy/gocryptotrader](https://trello.com/b/ZAhMhpOy/gocryptotrader).
You can track ideas, planned features and what's in progress on this Trello board: [https://trello.com/b/ZAhMhpOy/gocryptotrader](https://trello.com/b/ZAhMhpOy/gocryptotrader).
Join our slack to discuss all things related to GoCryptoTrader! [GoCryptoTrader Slack](https://join.slack.com/t/gocryptotrader/shared_invite/enQtNTQ5NDAxMjA2Mjc5LTc5ZDE1ZTNiOGM3ZGMyMmY1NTAxYWZhODE0MWM5N2JlZDk1NDU0YTViYzk4NTk3OTRiMDQzNGQ1YTc4YmRlMTk)
## Current Features for database package
+ Establishes & Maintains database connection across program life cycle
+ Multiple database support via simple repository model
+ Run migration on connection to assure database is at correct version
+ Migration handed by [Goose](https://github.com/thrasher-corp/goose)
+ Model generation handled by [SQLBoiler](https://github.com/thrasher-corp/sqlboiler)
## How to use
##### To Manually migrate to the latest database you can run the "dbmigrate" helper in the cmd folder
##### Prerequisites
This will parse and run all migration files in your $GoCryptoTrader/database/migrations
_This is also run from the bot when a connection is established to the database_
```sh
go run ./cmd/dbmigrate
```
A Makefile command has also been added for this
```sh
make db_migrate
[SQLBoiler](https://github.com/thrasher-corp/sqlboiler)
```shell script
go get -u github.com/thrasher-corp/sqlboiler
```
##### To create a new migrate file you can also run the same command with the -create "migration name" flag
[Postgres Driver](https://github.com/thrasher-corp/sqlboiler/drivers/sqlboiler-psql)
```shell script
go get -u github.com/thrasher-corp/sqlboiler/drivers/sqlboiler-psql
```
[SQLite Driver](https://github.com/thrasher-corp/sqlboiler-sqlite3)
```shell script
go get -u github.com/thrasher-corp/sqlboiler-sqlite3
```
##### Configuration
The database configuration struct is currently:
```shell script
type Config struct {
Enabled bool `json:"enabled"`
Verbose bool `json:"verbose"`
Driver string `json:"driver"`
drivers.ConnectionDetails `json:"connectionDetails"`
}
```
And Connection Details:
```sh
type ConnectionDetails struct {
Host string `json:"host"`
Port uint16 `json:"port"`
Username string `json:"username"`
Password string `json:"password"`
Database string `json:"database"`
SSLMode string `json:"sslmode"`
}
```
With an example configuration being:
```sh
go run ./cmd/dbmigrate -create "alter some table"
"database": {
"enabled": true,
"verbose": true,
"driver": "postgres",
"connectionDetails": {
"host": "localhost",
"port": 5432,
"username": "gct-dev",
"password": "gct-dev",
"database": "gct-dev",
"sslmode": "disable"
}
},
```
##### Create and Run migrations
Migrations are created using a modified version of [Goose](https://github.com/thrasher-corp/goose)
A helper tool sits in the ./cmd/dbmigrate folder that includes the following features:
+ Check current database version with the "status" command
```shell script
dbmigrate -command status
```
+ Create a new migration
```sh
dbmigrate -command "create" -args "model"
```
_This will create a folder in the ./database/migration folder that contains postgres.sql and sqlite.sql files_
+ Run dbmigrate command with -command up
```shell script
dbmigrate -command "up"
```
dbmigrate provides a -migrationdir flag override to tell it what path to look in for migrations
##### Adding a new model
Model's are generated using [SQLBoiler](https://github.com/thrasher-corp/sqlboiler)
A helper tool has been made located in gen_sqlboiler_config that will parse your GoCryptoTrader config and output a SQLBoiler config
+ Create Model in github.com/thrasher-corp/gocryptotrader/database/models directory
```sh
gen_sqlboiler_config
```
By default this will look in your gocryptotrader data folder and default config, these can be overwritten
along with the location of the sqlboiler generated config
```shell script
-config "configname.json"
-datadir "~/.gocryptotrader/"
-outdir "~/.gocryptotrader/"
```
Generate a new model that gets placed in ./database/models/<databasetype> folder
Linux:
```shell script
sqlboiler -o database/models/postgres -p postgres --no-auto-timestamps --wipe psql
```
Windows:
```sh
sqlboiler -o database\\models\\postgres -p postgres --no-auto-timestamps --wipe psql
```
Helpers have been provided in the Makefile for linux users
```
make gen_db_models
```
And in the contrib/sqlboiler.cmd for windows users
##### Adding a Repository
+ Create Repository directory in github.com/thrasher-corp/gocryptotrader/database/repository/
+ Create a base Repository interface with any required Methods
+ Create a per driver implementation of the Repository that implement all required methods to match the interface
## Contribution

View File

@@ -0,0 +1,12 @@
package database
import log "github.com/thrasher-corp/gocryptotrader/logger"
// Logger implements io.Writer interface to redirect SQLBoiler debug output to GCT logger
type Logger struct{}
// Write takes input and sends to GCT logger
func (l Logger) Write(p []byte) (n int, err error) {
log.Debugf(log.DatabaseMgr, "SQL: %s", p)
return 0, nil
}

View File

@@ -0,0 +1,54 @@
package database
import (
"database/sql"
"errors"
"path/filepath"
"sync"
"github.com/thrasher-corp/gocryptotrader/database/drivers"
)
// Db holds all information for a database instance
type Db struct {
SQL *sql.DB
DataPath string
Config *Config
Connected bool
Mu sync.RWMutex
}
// Config holds all database configurable options including enable/disabled & DSN settings
type Config struct {
Enabled bool `json:"enabled"`
Verbose bool `json:"verbose"`
Driver string `json:"driver"`
drivers.ConnectionDetails `json:"connectionDetails"`
}
var (
// DB Global Database Connection
DB = &Db{}
// MigrationDir which folder to look in for current migrations
MigrationDir = filepath.Join("..", "..", "database", "migrations")
// ErrNoDatabaseProvided error to display when no database is provided
ErrNoDatabaseProvided = errors.New("no database provided")
// SupportedDrivers slice of supported database driver types
SupportedDrivers = []string{DBSQLite, DBSQLite3, DBPostgreSQL}
// DefaultSQLiteDatabase is the default sqlite3 database name to use
DefaultSQLiteDatabase = "gocryptotrader.db"
)
const (
// DBSQLite const string for sqlite across code base
DBSQLite = "sqlite"
// DBSQLite3 const string for sqlite3 across code base
DBSQLite3 = "sqlite3"
// DBPostgreSQL const string for PostgreSQL across code base
DBPostgreSQL = "postgres"
)

View File

@@ -1,41 +0,0 @@
package database
import (
"errors"
"sync"
"github.com/jmoiron/sqlx"
"github.com/thrasher-corp/gocryptotrader/database/drivers"
)
// Database holds a pointer to sql connection, DataPath which is used for file based databases
// and a pointer to a Config struct
type Database struct {
Config *Config
DataPath string
SQL *sqlx.DB
Connected bool
Mu sync.RWMutex
}
// Config holds connection information about the database what the driver type is and if its enabled or not
type Config struct {
Enabled bool `json:"enabled"`
Driver string `json:"driver"`
drivers.ConnectionDetails `json:"connectionDetails"`
}
// Conn is a global copy of Database{} struct
var Conn = &Database{}
var (
// ErrNoDatabaseProvided error to display when no database is provided
ErrNoDatabaseProvided = errors.New("no database provided")
// SupportedDrivers slice of supported database driver types
SupportedDrivers = []string{"sqlite", "postgres"}
// DefaultSQLiteDatabase is the default sqlite database name to use
DefaultSQLiteDatabase = "gocryptotrader.db"
)

View File

@@ -2,10 +2,10 @@ package drivers
// ConnectionDetails holds DSN information
type ConnectionDetails struct {
Host string
Port uint16
Username string
Password string
Database string
SSLMode string
Host string `json:"host"`
Port uint16 `json:"port"`
Username string `json:"username"`
Password string `json:"password"`
Database string `json:"database"`
SSLMode string `json:"sslmode"`
}

View File

@@ -0,0 +1,43 @@
package postgres
import (
"database/sql"
"fmt"
"time"
// import go libpq driver package
_ "github.com/lib/pq"
"github.com/thrasher-corp/gocryptotrader/database"
)
// Connect opens a connection to Postgres database and returns a pointer to database.DB
func Connect() (*database.Db, error) {
if database.DB.Config.SSLMode == "" {
database.DB.Config.SSLMode = "disable"
}
configDSN := fmt.Sprintf("postgres://%s:%s@%s:%d/%s?sslmode=%s",
database.DB.Config.Username,
database.DB.Config.Password,
database.DB.Config.Host,
database.DB.Config.Port,
database.DB.Config.Database,
database.DB.Config.SSLMode)
db, err := sql.Open(database.DBPostgreSQL, configDSN)
if err != nil {
return nil, err
}
err = db.Ping()
if err != nil {
return nil, err
}
database.DB.SQL = db
database.DB.SQL.SetMaxOpenConns(2)
database.DB.SQL.SetMaxIdleConns(1)
database.DB.SQL.SetConnMaxLifetime(time.Hour)
return database.DB, nil
}

View File

@@ -1,41 +0,0 @@
package postgres
import (
"fmt"
"time"
"github.com/jackc/pgx"
"github.com/jackc/pgx/stdlib"
"github.com/jmoiron/sqlx"
"github.com/thrasher-corp/gocryptotrader/database"
)
// Connect establishes a connection pool to the database
func Connect() (*database.Database, error) {
configDSN := fmt.Sprintf("host=%s port=%d user=%s password=%s database=%s sslmode=%s",
database.Conn.Config.Host,
database.Conn.Config.Port,
database.Conn.Config.Username,
database.Conn.Config.Password,
database.Conn.Config.Database,
database.Conn.Config.SSLMode)
connConfig, err := pgx.ParseDSN(configDSN)
if err != nil {
return nil, err
}
connPool, err := pgx.NewConnPool(pgx.ConnPoolConfig{
ConnConfig: connConfig,
AfterConnect: nil,
MaxConnections: 20,
AcquireTimeout: 30 * time.Second,
})
if err != nil {
return nil, err
}
sqlxDB := stdlib.OpenDBFromPool(connPool)
database.Conn.SQL = sqlx.NewDb(sqlxDB, "pgx")
return database.Conn, nil
}

View File

@@ -1,28 +0,0 @@
package sqlite
import (
"path/filepath"
"github.com/jmoiron/sqlx"
// import sqlite3 driver
_ "github.com/mattn/go-sqlite3"
"github.com/thrasher-corp/gocryptotrader/database"
)
// Connect creates a connection to the entered database
// With SQLite the database is not created until first read/write
func Connect() (*database.Database, error) {
if database.Conn.Config.Database == "" {
return nil, database.ErrNoDatabaseProvided
}
databaseFullLocation := filepath.Join(database.Conn.DataPath, database.Conn.Config.Database)
dbConn, err := sqlx.Open("sqlite3", databaseFullLocation)
if err != nil {
return nil, err
}
database.Conn.SQL = dbConn
return database.Conn, nil
}

View File

@@ -0,0 +1,29 @@
package sqlite
import (
"database/sql"
"path/filepath"
// import sqlite3 driver
_ "github.com/mattn/go-sqlite3"
"github.com/thrasher-corp/gocryptotrader/database"
)
// Connect opens a connection to sqlite database and returns a pointer to database.DB
func Connect() (*database.Db, error) {
if database.DB.Config.Database == "" {
return nil, database.ErrNoDatabaseProvided
}
databaseFullLocation := filepath.Join(database.DB.DataPath, database.DB.Config.Database)
dbConn, err := sql.Open("sqlite3", databaseFullLocation)
if err != nil {
return nil, err
}
database.DB.SQL = dbConn
database.DB.SQL.SetMaxOpenConns(1)
return database.DB, nil
}

View File

@@ -1,180 +0,0 @@
package migrations
import (
"bytes"
"database/sql"
"errors"
"flag"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
)
// LoadMigrations will load all migrations in the ./database/migration/migrations folder
func (m *Migrator) LoadMigrations() error {
flag.Visit(func(f *flag.Flag) {
if f.Name == "migrationdir" {
MigrationDir = flag.Lookup("migrationdir").Value.String()
}
})
m.Log.Printf("Using migration folder %s\n", MigrationDir)
migration, err := filepath.Glob(MigrationDir + "/*.sql")
if err != nil {
return errors.New("failed to load migrations")
}
if len(migration) == 0 {
return errors.New("no migration files found")
}
sort.Strings(migration)
for x := range migration {
err = m.loadMigration(migration[x])
if err != nil {
return err
}
}
return nil
}
func (m *Migrator) loadMigration(migration string) error {
file, err := os.Open(migration)
if err != nil {
return err
}
fileData := strings.Trim(file.Name(), MigrationDir)
fileSeq := strings.Split(fileData, "_")
seq, _ := strconv.Atoi(fileSeq[0])
b, err := ioutil.ReadAll(file)
if err != nil {
return err
}
up := bytes.Split(b, []byte("-- up"))
if len(up) == 1 {
return fmt.Errorf("invalid migration file %v", file.Name())
}
down := strings.Split(string(up[1]), "-- down")
temp := Migration{
Sequence: seq,
UpSQL: down[0],
DownSQL: down[1],
}
m.Migrations = append(m.Migrations, temp)
return nil
}
// RunMigration attempts to run current migrations against a database
func (m *Migrator) RunMigration() (err error) {
v, err := m.getCurrentVersion()
if err != nil {
return
}
m.Log.Printf("Current database version: %v\n", v)
latestSeq := m.Migrations[len(m.Migrations)-1].Sequence
if v > latestSeq {
return errors.New("current database version is greater than latest migration halting further migrations")
}
if v == latestSeq {
m.Log.Println("no migrations to be run")
return
}
tx, err := m.Conn.SQL.Begin()
if err != nil {
return
}
for y := 0; y < len(m.Migrations); y++ {
if m.Migrations[y].Sequence <= v {
continue
}
err = m.txBegin(tx, m.checkConvert(m.Migrations[y].UpSQL))
if err != nil {
return tx.Rollback()
}
_, err = tx.Exec("update version set version=$1", m.Migrations[y].Sequence)
if err != nil {
return tx.Rollback()
}
}
err = tx.Commit()
if err != nil {
return tx.Rollback()
}
m.Log.Println("Migration completed")
m.Log.Printf("New database version: %v\n", latestSeq)
return nil
}
func (m *Migrator) txBegin(tx *sql.Tx, input string) error {
_, err := tx.Exec(input)
if err != nil {
m.Log.Errorf("%v", err)
return tx.Rollback()
}
return nil
}
func (m *Migrator) getCurrentVersion() (v int, err error) {
err = m.checkVersionTableExists()
if err != nil {
return
}
err = m.Conn.SQL.QueryRow("select version from version").Scan(&v)
return
}
func (m *Migrator) checkVersionTableExists() error {
query := `
CREATE TABLE IF NOT EXISTS version(
version int not null
);
INSERT INTO version SELECT 0 WHERE 0=(SELECT COUNT(*) from version);
`
_, err := m.Conn.SQL.Exec(m.checkConvert(query))
if err != nil {
return err
}
return nil
}
func (m *Migrator) checkConvert(input string) string {
if m.Conn.Config.Driver != "sqlite" {
return input
}
// Common PSQL -> SQLITE conversion
// TODO: Find a better way to handle this list
r := strings.NewReplacer(
"bigserial", "integer",
"int", "integer",
"now()", "CURRENT_TIMESTAMP")
return r.Replace(input)
}

View File

@@ -1,37 +0,0 @@
package migrations
import (
"path/filepath"
"github.com/thrasher-corp/gocryptotrader/database"
)
var (
// MigrationDir Default folder to look for migrations to apply
MigrationDir = filepath.Join("./database", "migration", "migrations")
)
// Migration holds all information passes from a migration file
// Includes: Sequence(version), SQL queries to run on up & down
type Migration struct {
Sequence int
Name string
UpSQL string
DownSQL string
}
// Migrator holds pointer to database struct slice of Migrations and logger
type Migrator struct {
Conn *database.Database
Migrations []Migration
Log Logger
}
// Logger interface implementation
// Allows you to BYO Logging/Printing
type Logger interface {
Printf(format string, v ...interface{})
Println(v ...interface{})
Errorf(format string, v ...interface{})
}

View File

@@ -1,25 +0,0 @@
package migrations
import (
log "github.com/thrasher-corp/gocryptotrader/logger"
)
type MLogger struct{}
// Printf implantation of migration Logger interface
// Passes off to log.Infof
func (t MLogger) Printf(format string, v ...interface{}) {
log.Infof(log.DatabaseMgr, format, v...)
}
// Println implantation of migration Logger interface
// Passes off to log.Infoln
func (t MLogger) Println(v ...interface{}) {
log.Infoln(log.DatabaseMgr, v...)
}
// Errorf implantation of migration Logger interface
// Passes off to log.Errorf
func (t MLogger) Errorf(format string, v ...interface{}) {
log.Errorf(log.DatabaseMgr, format, v...)
}

View File

@@ -1,11 +0,0 @@
-- up
CREATE TABLE IF NOT EXISTS audit_event
(
id bigserial PRIMARY KEY NOT NULL,
Type varchar(255) NOT NULL,
Identifier varchar(255) NOT NULL,
Message text NOT NULL,
created_at TIMESTAMP NOT NULL DEFAULT now()
);
-- down
DROP TABLE audit_event;

View File

@@ -0,0 +1,13 @@
-- +goose Up
-- SQL in this section is executed when the migration is applied.
CREATE TABLE IF NOT EXISTS audit_event
(
id bigserial PRIMARY KEY NOT NULL,
type varchar(255) NOT NULL,
identifier varchar(255) NOT NULL,
message text NOT NULL,
created_at TIMESTAMP NOT NULL DEFAULT (now() at time zone 'utc')
);
-- +goose Down
-- SQL in this section is executed when the migration is rolled back.
DROP TABLE audit_event;

View File

@@ -0,0 +1,13 @@
-- +goose Up
-- SQL in this section is executed when the migration is applied.
CREATE TABLE "audit_event" (
id integer not null primary key,
type text not null,
identifier text not null,
message text not null,
created_at timestamp not null default CURRENT_TIMESTAMP
);
-- +goose Down
-- SQL in this section is executed when the migration is rolled back.
DROP TABLE audit_event;

View File

@@ -1,8 +0,0 @@
package models
// AuditEvent is a model of how the data is represented in a database
type AuditEvent struct {
Type string
Identifier string
Message string
}

View File

@@ -0,0 +1,925 @@
// Code generated by SQLBoiler 3.5.0-gct (https://github.com/thrasher-corp/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package postgres
import (
"context"
"database/sql"
"fmt"
"reflect"
"strconv"
"strings"
"sync"
"time"
"github.com/pkg/errors"
"github.com/thrasher-corp/sqlboiler/boil"
"github.com/thrasher-corp/sqlboiler/queries"
"github.com/thrasher-corp/sqlboiler/queries/qm"
"github.com/thrasher-corp/sqlboiler/queries/qmhelper"
"github.com/thrasher-corp/sqlboiler/strmangle"
)
// AuditEvent is an object representing the database table.
type AuditEvent struct {
ID int64 `boil:"id" json:"id" toml:"id" yaml:"id"`
Type string `boil:"type" json:"type" toml:"type" yaml:"type"`
Identifier string `boil:"identifier" json:"identifier" toml:"identifier" yaml:"identifier"`
Message string `boil:"message" json:"message" toml:"message" yaml:"message"`
CreatedAt time.Time `boil:"created_at" json:"created_at" toml:"created_at" yaml:"created_at"`
R *auditEventR `boil:"-" json:"-" toml:"-" yaml:"-"`
L auditEventL `boil:"-" json:"-" toml:"-" yaml:"-"`
}
var AuditEventColumns = struct {
ID string
Type string
Identifier string
Message string
CreatedAt string
}{
ID: "id",
Type: "type",
Identifier: "identifier",
Message: "message",
CreatedAt: "created_at",
}
// Generated where
type whereHelperint64 struct{ field string }
func (w whereHelperint64) EQ(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.EQ, x) }
func (w whereHelperint64) NEQ(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.NEQ, x) }
func (w whereHelperint64) LT(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LT, x) }
func (w whereHelperint64) LTE(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LTE, x) }
func (w whereHelperint64) GT(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GT, x) }
func (w whereHelperint64) GTE(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GTE, x) }
type whereHelperstring struct{ field string }
func (w whereHelperstring) EQ(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.EQ, x) }
func (w whereHelperstring) NEQ(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.NEQ, x) }
func (w whereHelperstring) LT(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LT, x) }
func (w whereHelperstring) LTE(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LTE, x) }
func (w whereHelperstring) GT(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GT, x) }
func (w whereHelperstring) GTE(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GTE, x) }
func (w whereHelperstring) IN(slice []string) qm.QueryMod {
values := make([]interface{}, 0, len(slice))
for _, value := range slice {
values = append(values, value)
}
return qm.WhereIn(fmt.Sprintf("%s IN ?", w.field), values...)
}
type whereHelpertime_Time struct{ field string }
func (w whereHelpertime_Time) EQ(x time.Time) qm.QueryMod {
return qmhelper.Where(w.field, qmhelper.EQ, x)
}
func (w whereHelpertime_Time) NEQ(x time.Time) qm.QueryMod {
return qmhelper.Where(w.field, qmhelper.NEQ, x)
}
func (w whereHelpertime_Time) LT(x time.Time) qm.QueryMod {
return qmhelper.Where(w.field, qmhelper.LT, x)
}
func (w whereHelpertime_Time) LTE(x time.Time) qm.QueryMod {
return qmhelper.Where(w.field, qmhelper.LTE, x)
}
func (w whereHelpertime_Time) GT(x time.Time) qm.QueryMod {
return qmhelper.Where(w.field, qmhelper.GT, x)
}
func (w whereHelpertime_Time) GTE(x time.Time) qm.QueryMod {
return qmhelper.Where(w.field, qmhelper.GTE, x)
}
var AuditEventWhere = struct {
ID whereHelperint64
Type whereHelperstring
Identifier whereHelperstring
Message whereHelperstring
CreatedAt whereHelpertime_Time
}{
ID: whereHelperint64{field: "\"audit_event\".\"id\""},
Type: whereHelperstring{field: "\"audit_event\".\"type\""},
Identifier: whereHelperstring{field: "\"audit_event\".\"identifier\""},
Message: whereHelperstring{field: "\"audit_event\".\"message\""},
CreatedAt: whereHelpertime_Time{field: "\"audit_event\".\"created_at\""},
}
// AuditEventRels is where relationship names are stored.
var AuditEventRels = struct {
}{}
// auditEventR is where relationships are stored.
type auditEventR struct {
}
// NewStruct creates a new relationship struct
func (*auditEventR) NewStruct() *auditEventR {
return &auditEventR{}
}
// auditEventL is where Load methods for each relationship are stored.
type auditEventL struct{}
var (
auditEventAllColumns = []string{"id", "type", "identifier", "message", "created_at"}
auditEventColumnsWithoutDefault = []string{"type", "identifier", "message"}
auditEventColumnsWithDefault = []string{"id", "created_at"}
auditEventPrimaryKeyColumns = []string{"id"}
)
type (
// AuditEventSlice is an alias for a slice of pointers to AuditEvent.
// This should generally be used opposed to []AuditEvent.
AuditEventSlice []*AuditEvent
// AuditEventHook is the signature for custom AuditEvent hook methods
AuditEventHook func(context.Context, boil.ContextExecutor, *AuditEvent) error
auditEventQuery struct {
*queries.Query
}
)
// Cache for insert, update and upsert
var (
auditEventType = reflect.TypeOf(&AuditEvent{})
auditEventMapping = queries.MakeStructMapping(auditEventType)
auditEventPrimaryKeyMapping, _ = queries.BindMapping(auditEventType, auditEventMapping, auditEventPrimaryKeyColumns)
auditEventInsertCacheMut sync.RWMutex
auditEventInsertCache = make(map[string]insertCache)
auditEventUpdateCacheMut sync.RWMutex
auditEventUpdateCache = make(map[string]updateCache)
auditEventUpsertCacheMut sync.RWMutex
auditEventUpsertCache = make(map[string]insertCache)
)
var (
// Force time package dependency for automated UpdatedAt/CreatedAt.
_ = time.Second
// Force qmhelper dependency for where clause generation (which doesn't
// always happen)
_ = qmhelper.Where
)
var auditEventBeforeInsertHooks []AuditEventHook
var auditEventBeforeUpdateHooks []AuditEventHook
var auditEventBeforeDeleteHooks []AuditEventHook
var auditEventBeforeUpsertHooks []AuditEventHook
var auditEventAfterInsertHooks []AuditEventHook
var auditEventAfterSelectHooks []AuditEventHook
var auditEventAfterUpdateHooks []AuditEventHook
var auditEventAfterDeleteHooks []AuditEventHook
var auditEventAfterUpsertHooks []AuditEventHook
// doBeforeInsertHooks executes all "before insert" hooks.
func (o *AuditEvent) doBeforeInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range auditEventBeforeInsertHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doBeforeUpdateHooks executes all "before Update" hooks.
func (o *AuditEvent) doBeforeUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range auditEventBeforeUpdateHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doBeforeDeleteHooks executes all "before Delete" hooks.
func (o *AuditEvent) doBeforeDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range auditEventBeforeDeleteHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doBeforeUpsertHooks executes all "before Upsert" hooks.
func (o *AuditEvent) doBeforeUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range auditEventBeforeUpsertHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doAfterInsertHooks executes all "after Insert" hooks.
func (o *AuditEvent) doAfterInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range auditEventAfterInsertHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doAfterSelectHooks executes all "after Select" hooks.
func (o *AuditEvent) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range auditEventAfterSelectHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doAfterUpdateHooks executes all "after Update" hooks.
func (o *AuditEvent) doAfterUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range auditEventAfterUpdateHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doAfterDeleteHooks executes all "after Delete" hooks.
func (o *AuditEvent) doAfterDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range auditEventAfterDeleteHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doAfterUpsertHooks executes all "after Upsert" hooks.
func (o *AuditEvent) doAfterUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range auditEventAfterUpsertHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// AddAuditEventHook registers your hook function for all future operations.
func AddAuditEventHook(hookPoint boil.HookPoint, auditEventHook AuditEventHook) {
switch hookPoint {
case boil.BeforeInsertHook:
auditEventBeforeInsertHooks = append(auditEventBeforeInsertHooks, auditEventHook)
case boil.BeforeUpdateHook:
auditEventBeforeUpdateHooks = append(auditEventBeforeUpdateHooks, auditEventHook)
case boil.BeforeDeleteHook:
auditEventBeforeDeleteHooks = append(auditEventBeforeDeleteHooks, auditEventHook)
case boil.BeforeUpsertHook:
auditEventBeforeUpsertHooks = append(auditEventBeforeUpsertHooks, auditEventHook)
case boil.AfterInsertHook:
auditEventAfterInsertHooks = append(auditEventAfterInsertHooks, auditEventHook)
case boil.AfterSelectHook:
auditEventAfterSelectHooks = append(auditEventAfterSelectHooks, auditEventHook)
case boil.AfterUpdateHook:
auditEventAfterUpdateHooks = append(auditEventAfterUpdateHooks, auditEventHook)
case boil.AfterDeleteHook:
auditEventAfterDeleteHooks = append(auditEventAfterDeleteHooks, auditEventHook)
case boil.AfterUpsertHook:
auditEventAfterUpsertHooks = append(auditEventAfterUpsertHooks, auditEventHook)
}
}
// One returns a single auditEvent record from the query.
func (q auditEventQuery) One(ctx context.Context, exec boil.ContextExecutor) (*AuditEvent, error) {
o := &AuditEvent{}
queries.SetLimit(q.Query, 1)
err := q.Bind(ctx, exec, o)
if err != nil {
if errors.Cause(err) == sql.ErrNoRows {
return nil, sql.ErrNoRows
}
return nil, errors.Wrap(err, "postgres: failed to execute a one query for audit_event")
}
if err := o.doAfterSelectHooks(ctx, exec); err != nil {
return o, err
}
return o, nil
}
// All returns all AuditEvent records from the query.
func (q auditEventQuery) All(ctx context.Context, exec boil.ContextExecutor) (AuditEventSlice, error) {
var o []*AuditEvent
err := q.Bind(ctx, exec, &o)
if err != nil {
return nil, errors.Wrap(err, "postgres: failed to assign all query results to AuditEvent slice")
}
if len(auditEventAfterSelectHooks) != 0 {
for _, obj := range o {
if err := obj.doAfterSelectHooks(ctx, exec); err != nil {
return o, err
}
}
}
return o, nil
}
// Count returns the count of all AuditEvent records in the query.
func (q auditEventQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
var count int64
queries.SetSelect(q.Query, nil)
queries.SetCount(q.Query)
err := q.Query.QueryRowContext(ctx, exec).Scan(&count)
if err != nil {
return 0, errors.Wrap(err, "postgres: failed to count audit_event rows")
}
return count, nil
}
// Exists checks if the row exists in the table.
func (q auditEventQuery) Exists(ctx context.Context, exec boil.ContextExecutor) (bool, error) {
var count int64
queries.SetSelect(q.Query, nil)
queries.SetCount(q.Query)
queries.SetLimit(q.Query, 1)
err := q.Query.QueryRowContext(ctx, exec).Scan(&count)
if err != nil {
return false, errors.Wrap(err, "postgres: failed to check if audit_event exists")
}
return count > 0, nil
}
// AuditEvents retrieves all the records using an executor.
func AuditEvents(mods ...qm.QueryMod) auditEventQuery {
mods = append(mods, qm.From("\"audit_event\""))
return auditEventQuery{NewQuery(mods...)}
}
// FindAuditEvent retrieves a single record by ID with an executor.
// If selectCols is empty Find will return all columns.
func FindAuditEvent(ctx context.Context, exec boil.ContextExecutor, iD int64, selectCols ...string) (*AuditEvent, error) {
auditEventObj := &AuditEvent{}
sel := "*"
if len(selectCols) > 0 {
sel = strings.Join(strmangle.IdentQuoteSlice(dialect.LQ, dialect.RQ, selectCols), ",")
}
query := fmt.Sprintf(
"select %s from \"audit_event\" where \"id\"=$1", sel,
)
q := queries.Raw(query, iD)
err := q.Bind(ctx, exec, auditEventObj)
if err != nil {
if errors.Cause(err) == sql.ErrNoRows {
return nil, sql.ErrNoRows
}
return nil, errors.Wrap(err, "postgres: unable to select from audit_event")
}
return auditEventObj, nil
}
// Insert a single record using an executor.
// See boil.Columns.InsertColumnSet documentation to understand column list inference for inserts.
func (o *AuditEvent) Insert(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) error {
if o == nil {
return errors.New("postgres: no audit_event provided for insertion")
}
var err error
if err := o.doBeforeInsertHooks(ctx, exec); err != nil {
return err
}
nzDefaults := queries.NonZeroDefaultSet(auditEventColumnsWithDefault, o)
key := makeCacheKey(columns, nzDefaults)
auditEventInsertCacheMut.RLock()
cache, cached := auditEventInsertCache[key]
auditEventInsertCacheMut.RUnlock()
if !cached {
wl, returnColumns := columns.InsertColumnSet(
auditEventAllColumns,
auditEventColumnsWithDefault,
auditEventColumnsWithoutDefault,
nzDefaults,
)
cache.valueMapping, err = queries.BindMapping(auditEventType, auditEventMapping, wl)
if err != nil {
return err
}
cache.retMapping, err = queries.BindMapping(auditEventType, auditEventMapping, returnColumns)
if err != nil {
return err
}
if len(wl) != 0 {
cache.query = fmt.Sprintf("INSERT INTO \"audit_event\" (\"%s\") %%sVALUES (%s)%%s", strings.Join(wl, "\",\""), strmangle.Placeholders(dialect.UseIndexPlaceholders, len(wl), 1, 1))
} else {
cache.query = "INSERT INTO \"audit_event\" %sDEFAULT VALUES%s"
}
var queryOutput, queryReturning string
if len(cache.retMapping) != 0 {
queryReturning = fmt.Sprintf(" RETURNING \"%s\"", strings.Join(returnColumns, "\",\""))
}
cache.query = fmt.Sprintf(cache.query, queryOutput, queryReturning)
}
value := reflect.Indirect(reflect.ValueOf(o))
vals := queries.ValuesFromMapping(value, cache.valueMapping)
if boil.DebugMode {
fmt.Fprintln(boil.DebugWriter, cache.query)
fmt.Fprintln(boil.DebugWriter, vals)
}
if len(cache.retMapping) != 0 {
err = exec.QueryRowContext(ctx, cache.query, vals...).Scan(queries.PtrsFromMapping(value, cache.retMapping)...)
} else {
_, err = exec.ExecContext(ctx, cache.query, vals...)
}
if err != nil {
return errors.Wrap(err, "postgres: unable to insert into audit_event")
}
if !cached {
auditEventInsertCacheMut.Lock()
auditEventInsertCache[key] = cache
auditEventInsertCacheMut.Unlock()
}
return o.doAfterInsertHooks(ctx, exec)
}
// Update uses an executor to update the AuditEvent.
// See boil.Columns.UpdateColumnSet documentation to understand column list inference for updates.
// Update does not automatically update the record in case of default values. Use .Reload() to refresh the records.
func (o *AuditEvent) Update(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) (int64, error) {
var err error
if err = o.doBeforeUpdateHooks(ctx, exec); err != nil {
return 0, err
}
key := makeCacheKey(columns, nil)
auditEventUpdateCacheMut.RLock()
cache, cached := auditEventUpdateCache[key]
auditEventUpdateCacheMut.RUnlock()
if !cached {
wl := columns.UpdateColumnSet(
auditEventAllColumns,
auditEventPrimaryKeyColumns,
)
if len(wl) == 0 {
return 0, errors.New("postgres: unable to update audit_event, could not build whitelist")
}
cache.query = fmt.Sprintf("UPDATE \"audit_event\" SET %s WHERE %s",
strmangle.SetParamNames("\"", "\"", 1, wl),
strmangle.WhereClause("\"", "\"", len(wl)+1, auditEventPrimaryKeyColumns),
)
cache.valueMapping, err = queries.BindMapping(auditEventType, auditEventMapping, append(wl, auditEventPrimaryKeyColumns...))
if err != nil {
return 0, err
}
}
values := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping)
if boil.DebugMode {
fmt.Fprintln(boil.DebugWriter, cache.query)
fmt.Fprintln(boil.DebugWriter, values)
}
var result sql.Result
result, err = exec.ExecContext(ctx, cache.query, values...)
if err != nil {
return 0, errors.Wrap(err, "postgres: unable to update audit_event row")
}
rowsAff, err := result.RowsAffected()
if err != nil {
return 0, errors.Wrap(err, "postgres: failed to get rows affected by update for audit_event")
}
if !cached {
auditEventUpdateCacheMut.Lock()
auditEventUpdateCache[key] = cache
auditEventUpdateCacheMut.Unlock()
}
return rowsAff, o.doAfterUpdateHooks(ctx, exec)
}
// UpdateAll updates all rows with the specified column values.
func (q auditEventQuery) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) {
queries.SetUpdate(q.Query, cols)
result, err := q.Query.ExecContext(ctx, exec)
if err != nil {
return 0, errors.Wrap(err, "postgres: unable to update all for audit_event")
}
rowsAff, err := result.RowsAffected()
if err != nil {
return 0, errors.Wrap(err, "postgres: unable to retrieve rows affected for audit_event")
}
return rowsAff, nil
}
// UpdateAll updates all rows with the specified column values, using an executor.
func (o AuditEventSlice) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) {
ln := int64(len(o))
if ln == 0 {
return 0, nil
}
if len(cols) == 0 {
return 0, errors.New("postgres: update all requires at least one column argument")
}
colNames := make([]string, len(cols))
args := make([]interface{}, len(cols))
i := 0
for name, value := range cols {
colNames[i] = name
args[i] = value
i++
}
// Append all of the primary key values for each column
for _, obj := range o {
pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), auditEventPrimaryKeyMapping)
args = append(args, pkeyArgs...)
}
sql := fmt.Sprintf("UPDATE \"audit_event\" SET %s WHERE %s",
strmangle.SetParamNames("\"", "\"", 1, colNames),
strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), len(colNames)+1, auditEventPrimaryKeyColumns, len(o)))
if boil.DebugMode {
fmt.Fprintln(boil.DebugWriter, sql)
fmt.Fprintln(boil.DebugWriter, args...)
}
result, err := exec.ExecContext(ctx, sql, args...)
if err != nil {
return 0, errors.Wrap(err, "postgres: unable to update all in auditEvent slice")
}
rowsAff, err := result.RowsAffected()
if err != nil {
return 0, errors.Wrap(err, "postgres: unable to retrieve rows affected all in update all auditEvent")
}
return rowsAff, nil
}
// Upsert attempts an insert using an executor, and does an update or ignore on conflict.
// See boil.Columns documentation for how to properly use updateColumns and insertColumns.
func (o *AuditEvent) Upsert(ctx context.Context, exec boil.ContextExecutor, updateOnConflict bool, conflictColumns []string, updateColumns, insertColumns boil.Columns) error {
if o == nil {
return errors.New("postgres: no audit_event provided for upsert")
}
if err := o.doBeforeUpsertHooks(ctx, exec); err != nil {
return err
}
nzDefaults := queries.NonZeroDefaultSet(auditEventColumnsWithDefault, o)
// Build cache key in-line uglily - mysql vs psql problems
buf := strmangle.GetBuffer()
if updateOnConflict {
buf.WriteByte('t')
} else {
buf.WriteByte('f')
}
buf.WriteByte('.')
for _, c := range conflictColumns {
buf.WriteString(c)
}
buf.WriteByte('.')
buf.WriteString(strconv.Itoa(updateColumns.Kind))
for _, c := range updateColumns.Cols {
buf.WriteString(c)
}
buf.WriteByte('.')
buf.WriteString(strconv.Itoa(insertColumns.Kind))
for _, c := range insertColumns.Cols {
buf.WriteString(c)
}
buf.WriteByte('.')
for _, c := range nzDefaults {
buf.WriteString(c)
}
key := buf.String()
strmangle.PutBuffer(buf)
auditEventUpsertCacheMut.RLock()
cache, cached := auditEventUpsertCache[key]
auditEventUpsertCacheMut.RUnlock()
var err error
if !cached {
insert, ret := insertColumns.InsertColumnSet(
auditEventAllColumns,
auditEventColumnsWithDefault,
auditEventColumnsWithoutDefault,
nzDefaults,
)
update := updateColumns.UpdateColumnSet(
auditEventAllColumns,
auditEventPrimaryKeyColumns,
)
if updateOnConflict && len(update) == 0 {
return errors.New("postgres: unable to upsert audit_event, could not build update column list")
}
conflict := conflictColumns
if len(conflict) == 0 {
conflict = make([]string, len(auditEventPrimaryKeyColumns))
copy(conflict, auditEventPrimaryKeyColumns)
}
cache.query = buildUpsertQueryPostgres(dialect, "\"audit_event\"", updateOnConflict, ret, update, conflict, insert)
cache.valueMapping, err = queries.BindMapping(auditEventType, auditEventMapping, insert)
if err != nil {
return err
}
if len(ret) != 0 {
cache.retMapping, err = queries.BindMapping(auditEventType, auditEventMapping, ret)
if err != nil {
return err
}
}
}
value := reflect.Indirect(reflect.ValueOf(o))
vals := queries.ValuesFromMapping(value, cache.valueMapping)
var returns []interface{}
if len(cache.retMapping) != 0 {
returns = queries.PtrsFromMapping(value, cache.retMapping)
}
if boil.DebugMode {
fmt.Fprintln(boil.DebugWriter, cache.query)
fmt.Fprintln(boil.DebugWriter, vals)
}
if len(cache.retMapping) != 0 {
err = exec.QueryRowContext(ctx, cache.query, vals...).Scan(returns...)
if err == sql.ErrNoRows {
err = nil // Postgres doesn't return anything when there's no update
}
} else {
_, err = exec.ExecContext(ctx, cache.query, vals...)
}
if err != nil {
return errors.Wrap(err, "postgres: unable to upsert audit_event")
}
if !cached {
auditEventUpsertCacheMut.Lock()
auditEventUpsertCache[key] = cache
auditEventUpsertCacheMut.Unlock()
}
return o.doAfterUpsertHooks(ctx, exec)
}
// Delete deletes a single AuditEvent record with an executor.
// Delete will match against the primary key column to find the record to delete.
func (o *AuditEvent) Delete(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
if o == nil {
return 0, errors.New("postgres: no AuditEvent provided for delete")
}
if err := o.doBeforeDeleteHooks(ctx, exec); err != nil {
return 0, err
}
args := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), auditEventPrimaryKeyMapping)
sql := "DELETE FROM \"audit_event\" WHERE \"id\"=$1"
if boil.DebugMode {
fmt.Fprintln(boil.DebugWriter, sql)
fmt.Fprintln(boil.DebugWriter, args...)
}
result, err := exec.ExecContext(ctx, sql, args...)
if err != nil {
return 0, errors.Wrap(err, "postgres: unable to delete from audit_event")
}
rowsAff, err := result.RowsAffected()
if err != nil {
return 0, errors.Wrap(err, "postgres: failed to get rows affected by delete for audit_event")
}
if err := o.doAfterDeleteHooks(ctx, exec); err != nil {
return 0, err
}
return rowsAff, nil
}
// DeleteAll deletes all matching rows.
func (q auditEventQuery) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
if q.Query == nil {
return 0, errors.New("postgres: no auditEventQuery provided for delete all")
}
queries.SetDelete(q.Query)
result, err := q.Query.ExecContext(ctx, exec)
if err != nil {
return 0, errors.Wrap(err, "postgres: unable to delete all from audit_event")
}
rowsAff, err := result.RowsAffected()
if err != nil {
return 0, errors.Wrap(err, "postgres: failed to get rows affected by deleteall for audit_event")
}
return rowsAff, nil
}
// DeleteAll deletes all rows in the slice, using an executor.
func (o AuditEventSlice) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
if len(o) == 0 {
return 0, nil
}
if len(auditEventBeforeDeleteHooks) != 0 {
for _, obj := range o {
if err := obj.doBeforeDeleteHooks(ctx, exec); err != nil {
return 0, err
}
}
}
var args []interface{}
for _, obj := range o {
pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), auditEventPrimaryKeyMapping)
args = append(args, pkeyArgs...)
}
sql := "DELETE FROM \"audit_event\" WHERE " +
strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 1, auditEventPrimaryKeyColumns, len(o))
if boil.DebugMode {
fmt.Fprintln(boil.DebugWriter, sql)
fmt.Fprintln(boil.DebugWriter, args)
}
result, err := exec.ExecContext(ctx, sql, args...)
if err != nil {
return 0, errors.Wrap(err, "postgres: unable to delete all from auditEvent slice")
}
rowsAff, err := result.RowsAffected()
if err != nil {
return 0, errors.Wrap(err, "postgres: failed to get rows affected by deleteall for audit_event")
}
if len(auditEventAfterDeleteHooks) != 0 {
for _, obj := range o {
if err := obj.doAfterDeleteHooks(ctx, exec); err != nil {
return 0, err
}
}
}
return rowsAff, nil
}
// Reload refetches the object from the database
// using the primary keys with an executor.
func (o *AuditEvent) Reload(ctx context.Context, exec boil.ContextExecutor) error {
ret, err := FindAuditEvent(ctx, exec, o.ID)
if err != nil {
return err
}
*o = *ret
return nil
}
// ReloadAll refetches every row with matching primary key column values
// and overwrites the original object slice with the newly updated slice.
func (o *AuditEventSlice) ReloadAll(ctx context.Context, exec boil.ContextExecutor) error {
if o == nil || len(*o) == 0 {
return nil
}
slice := AuditEventSlice{}
var args []interface{}
for _, obj := range *o {
pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), auditEventPrimaryKeyMapping)
args = append(args, pkeyArgs...)
}
sql := "SELECT \"audit_event\".* FROM \"audit_event\" WHERE " +
strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 1, auditEventPrimaryKeyColumns, len(*o))
q := queries.Raw(sql, args...)
err := q.Bind(ctx, exec, &slice)
if err != nil {
return errors.Wrap(err, "postgres: unable to reload all in AuditEventSlice")
}
*o = slice
return nil
}
// AuditEventExists checks if the AuditEvent row exists.
func AuditEventExists(ctx context.Context, exec boil.ContextExecutor, iD int64) (bool, error) {
var exists bool
sql := "select exists(select 1 from \"audit_event\" where \"id\"=$1 limit 1)"
if boil.DebugMode {
fmt.Fprintln(boil.DebugWriter, sql)
fmt.Fprintln(boil.DebugWriter, iD)
}
row := exec.QueryRowContext(ctx, sql, iD)
err := row.Scan(&exists)
if err != nil {
return false, errors.Wrap(err, "postgres: unable to check if audit_event exists")
}
return exists, nil
}

View File

@@ -0,0 +1,732 @@
// Code generated by SQLBoiler 3.5.0-gct (https://github.com/thrasher-corp/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package postgres
import (
"bytes"
"context"
"reflect"
"testing"
"github.com/thrasher-corp/sqlboiler/boil"
"github.com/thrasher-corp/sqlboiler/queries"
"github.com/thrasher-corp/sqlboiler/randomize"
"github.com/thrasher-corp/sqlboiler/strmangle"
)
var (
// Relationships sometimes use the reflection helper queries.Equal/queries.Assign
// so force a package dependency in case they don't.
_ = queries.Equal
)
func testAuditEvents(t *testing.T) {
t.Parallel()
query := AuditEvents()
if query.Query == nil {
t.Error("expected a query, got nothing")
}
}
func testAuditEventsDelete(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &AuditEvent{}
if err = randomize.Struct(seed, o, auditEventDBTypes, true, auditEventColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize AuditEvent struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if rowsAff, err := o.Delete(ctx, tx); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only have deleted one row, but affected:", rowsAff)
}
count, err := AuditEvents().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 0 {
t.Error("want zero records, got:", count)
}
}
func testAuditEventsQueryDeleteAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &AuditEvent{}
if err = randomize.Struct(seed, o, auditEventDBTypes, true, auditEventColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize AuditEvent struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if rowsAff, err := AuditEvents().DeleteAll(ctx, tx); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only have deleted one row, but affected:", rowsAff)
}
count, err := AuditEvents().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 0 {
t.Error("want zero records, got:", count)
}
}
func testAuditEventsSliceDeleteAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &AuditEvent{}
if err = randomize.Struct(seed, o, auditEventDBTypes, true, auditEventColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize AuditEvent struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice := AuditEventSlice{o}
if rowsAff, err := slice.DeleteAll(ctx, tx); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only have deleted one row, but affected:", rowsAff)
}
count, err := AuditEvents().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 0 {
t.Error("want zero records, got:", count)
}
}
func testAuditEventsExists(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &AuditEvent{}
if err = randomize.Struct(seed, o, auditEventDBTypes, true, auditEventColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize AuditEvent struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
e, err := AuditEventExists(ctx, tx, o.ID)
if err != nil {
t.Errorf("Unable to check if AuditEvent exists: %s", err)
}
if !e {
t.Errorf("Expected AuditEventExists to return true, but got false.")
}
}
func testAuditEventsFind(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &AuditEvent{}
if err = randomize.Struct(seed, o, auditEventDBTypes, true, auditEventColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize AuditEvent struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
auditEventFound, err := FindAuditEvent(ctx, tx, o.ID)
if err != nil {
t.Error(err)
}
if auditEventFound == nil {
t.Error("want a record, got nil")
}
}
func testAuditEventsBind(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &AuditEvent{}
if err = randomize.Struct(seed, o, auditEventDBTypes, true, auditEventColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize AuditEvent struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = AuditEvents().Bind(ctx, tx, o); err != nil {
t.Error(err)
}
}
func testAuditEventsOne(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &AuditEvent{}
if err = randomize.Struct(seed, o, auditEventDBTypes, true, auditEventColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize AuditEvent struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if x, err := AuditEvents().One(ctx, tx); err != nil {
t.Error(err)
} else if x == nil {
t.Error("expected to get a non nil record")
}
}
func testAuditEventsAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
auditEventOne := &AuditEvent{}
auditEventTwo := &AuditEvent{}
if err = randomize.Struct(seed, auditEventOne, auditEventDBTypes, false, auditEventColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize AuditEvent struct: %s", err)
}
if err = randomize.Struct(seed, auditEventTwo, auditEventDBTypes, false, auditEventColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize AuditEvent struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = auditEventOne.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = auditEventTwo.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice, err := AuditEvents().All(ctx, tx)
if err != nil {
t.Error(err)
}
if len(slice) != 2 {
t.Error("want 2 records, got:", len(slice))
}
}
func testAuditEventsCount(t *testing.T) {
t.Parallel()
var err error
seed := randomize.NewSeed()
auditEventOne := &AuditEvent{}
auditEventTwo := &AuditEvent{}
if err = randomize.Struct(seed, auditEventOne, auditEventDBTypes, false, auditEventColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize AuditEvent struct: %s", err)
}
if err = randomize.Struct(seed, auditEventTwo, auditEventDBTypes, false, auditEventColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize AuditEvent struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = auditEventOne.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = auditEventTwo.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := AuditEvents().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 2 {
t.Error("want 2 records, got:", count)
}
}
func auditEventBeforeInsertHook(ctx context.Context, e boil.ContextExecutor, o *AuditEvent) error {
*o = AuditEvent{}
return nil
}
func auditEventAfterInsertHook(ctx context.Context, e boil.ContextExecutor, o *AuditEvent) error {
*o = AuditEvent{}
return nil
}
func auditEventAfterSelectHook(ctx context.Context, e boil.ContextExecutor, o *AuditEvent) error {
*o = AuditEvent{}
return nil
}
func auditEventBeforeUpdateHook(ctx context.Context, e boil.ContextExecutor, o *AuditEvent) error {
*o = AuditEvent{}
return nil
}
func auditEventAfterUpdateHook(ctx context.Context, e boil.ContextExecutor, o *AuditEvent) error {
*o = AuditEvent{}
return nil
}
func auditEventBeforeDeleteHook(ctx context.Context, e boil.ContextExecutor, o *AuditEvent) error {
*o = AuditEvent{}
return nil
}
func auditEventAfterDeleteHook(ctx context.Context, e boil.ContextExecutor, o *AuditEvent) error {
*o = AuditEvent{}
return nil
}
func auditEventBeforeUpsertHook(ctx context.Context, e boil.ContextExecutor, o *AuditEvent) error {
*o = AuditEvent{}
return nil
}
func auditEventAfterUpsertHook(ctx context.Context, e boil.ContextExecutor, o *AuditEvent) error {
*o = AuditEvent{}
return nil
}
func testAuditEventsHooks(t *testing.T) {
t.Parallel()
var err error
ctx := context.Background()
empty := &AuditEvent{}
o := &AuditEvent{}
seed := randomize.NewSeed()
if err = randomize.Struct(seed, o, auditEventDBTypes, false); err != nil {
t.Errorf("Unable to randomize AuditEvent object: %s", err)
}
AddAuditEventHook(boil.BeforeInsertHook, auditEventBeforeInsertHook)
if err = o.doBeforeInsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeInsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeInsertHook function to empty object, but got: %#v", o)
}
auditEventBeforeInsertHooks = []AuditEventHook{}
AddAuditEventHook(boil.AfterInsertHook, auditEventAfterInsertHook)
if err = o.doAfterInsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterInsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterInsertHook function to empty object, but got: %#v", o)
}
auditEventAfterInsertHooks = []AuditEventHook{}
AddAuditEventHook(boil.AfterSelectHook, auditEventAfterSelectHook)
if err = o.doAfterSelectHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterSelectHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterSelectHook function to empty object, but got: %#v", o)
}
auditEventAfterSelectHooks = []AuditEventHook{}
AddAuditEventHook(boil.BeforeUpdateHook, auditEventBeforeUpdateHook)
if err = o.doBeforeUpdateHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeUpdateHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeUpdateHook function to empty object, but got: %#v", o)
}
auditEventBeforeUpdateHooks = []AuditEventHook{}
AddAuditEventHook(boil.AfterUpdateHook, auditEventAfterUpdateHook)
if err = o.doAfterUpdateHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterUpdateHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterUpdateHook function to empty object, but got: %#v", o)
}
auditEventAfterUpdateHooks = []AuditEventHook{}
AddAuditEventHook(boil.BeforeDeleteHook, auditEventBeforeDeleteHook)
if err = o.doBeforeDeleteHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeDeleteHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeDeleteHook function to empty object, but got: %#v", o)
}
auditEventBeforeDeleteHooks = []AuditEventHook{}
AddAuditEventHook(boil.AfterDeleteHook, auditEventAfterDeleteHook)
if err = o.doAfterDeleteHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterDeleteHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterDeleteHook function to empty object, but got: %#v", o)
}
auditEventAfterDeleteHooks = []AuditEventHook{}
AddAuditEventHook(boil.BeforeUpsertHook, auditEventBeforeUpsertHook)
if err = o.doBeforeUpsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeUpsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeUpsertHook function to empty object, but got: %#v", o)
}
auditEventBeforeUpsertHooks = []AuditEventHook{}
AddAuditEventHook(boil.AfterUpsertHook, auditEventAfterUpsertHook)
if err = o.doAfterUpsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterUpsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterUpsertHook function to empty object, but got: %#v", o)
}
auditEventAfterUpsertHooks = []AuditEventHook{}
}
func testAuditEventsInsert(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &AuditEvent{}
if err = randomize.Struct(seed, o, auditEventDBTypes, true, auditEventColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize AuditEvent struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := AuditEvents().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
}
func testAuditEventsInsertWhitelist(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &AuditEvent{}
if err = randomize.Struct(seed, o, auditEventDBTypes, true); err != nil {
t.Errorf("Unable to randomize AuditEvent struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Whitelist(auditEventColumnsWithoutDefault...)); err != nil {
t.Error(err)
}
count, err := AuditEvents().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
}
func testAuditEventsReload(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &AuditEvent{}
if err = randomize.Struct(seed, o, auditEventDBTypes, true, auditEventColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize AuditEvent struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = o.Reload(ctx, tx); err != nil {
t.Error(err)
}
}
func testAuditEventsReloadAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &AuditEvent{}
if err = randomize.Struct(seed, o, auditEventDBTypes, true, auditEventColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize AuditEvent struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice := AuditEventSlice{o}
if err = slice.ReloadAll(ctx, tx); err != nil {
t.Error(err)
}
}
func testAuditEventsSelect(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &AuditEvent{}
if err = randomize.Struct(seed, o, auditEventDBTypes, true, auditEventColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize AuditEvent struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice, err := AuditEvents().All(ctx, tx)
if err != nil {
t.Error(err)
}
if len(slice) != 1 {
t.Error("want one record, got:", len(slice))
}
}
var (
auditEventDBTypes = map[string]string{`ID`: `bigint`, `Type`: `character varying`, `Identifier`: `character varying`, `Message`: `text`, `CreatedAt`: `timestamp with time zone`}
_ = bytes.MinRead
)
func testAuditEventsUpdate(t *testing.T) {
t.Parallel()
if 0 == len(auditEventPrimaryKeyColumns) {
t.Skip("Skipping table with no primary key columns")
}
if len(auditEventAllColumns) == len(auditEventPrimaryKeyColumns) {
t.Skip("Skipping table with only primary key columns")
}
seed := randomize.NewSeed()
var err error
o := &AuditEvent{}
if err = randomize.Struct(seed, o, auditEventDBTypes, true, auditEventColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize AuditEvent struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := AuditEvents().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
if err = randomize.Struct(seed, o, auditEventDBTypes, true, auditEventPrimaryKeyColumns...); err != nil {
t.Errorf("Unable to randomize AuditEvent struct: %s", err)
}
if rowsAff, err := o.Update(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only affect one row but affected", rowsAff)
}
}
func testAuditEventsSliceUpdateAll(t *testing.T) {
t.Parallel()
if len(auditEventAllColumns) == len(auditEventPrimaryKeyColumns) {
t.Skip("Skipping table with only primary key columns")
}
seed := randomize.NewSeed()
var err error
o := &AuditEvent{}
if err = randomize.Struct(seed, o, auditEventDBTypes, true, auditEventColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize AuditEvent struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := AuditEvents().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
if err = randomize.Struct(seed, o, auditEventDBTypes, true, auditEventPrimaryKeyColumns...); err != nil {
t.Errorf("Unable to randomize AuditEvent struct: %s", err)
}
// Remove Primary keys and unique columns from what we plan to update
var fields []string
if strmangle.StringSliceMatch(auditEventAllColumns, auditEventPrimaryKeyColumns) {
fields = auditEventAllColumns
} else {
fields = strmangle.SetComplement(
auditEventAllColumns,
auditEventPrimaryKeyColumns,
)
}
value := reflect.Indirect(reflect.ValueOf(o))
typ := reflect.TypeOf(o).Elem()
n := typ.NumField()
updateMap := M{}
for _, col := range fields {
for i := 0; i < n; i++ {
f := typ.Field(i)
if f.Tag.Get("boil") == col {
updateMap[col] = value.Field(i).Interface()
}
}
}
slice := AuditEventSlice{o}
if rowsAff, err := slice.UpdateAll(ctx, tx, updateMap); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("wanted one record updated but got", rowsAff)
}
}
func testAuditEventsUpsert(t *testing.T) {
t.Parallel()
if len(auditEventAllColumns) == len(auditEventPrimaryKeyColumns) {
t.Skip("Skipping table with only primary key columns")
}
seed := randomize.NewSeed()
var err error
// Attempt the INSERT side of an UPSERT
o := AuditEvent{}
if err = randomize.Struct(seed, &o, auditEventDBTypes, true); err != nil {
t.Errorf("Unable to randomize AuditEvent struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Upsert(ctx, tx, false, nil, boil.Infer(), boil.Infer()); err != nil {
t.Errorf("Unable to upsert AuditEvent: %s", err)
}
count, err := AuditEvents().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
// Attempt the UPDATE side of an UPSERT
if err = randomize.Struct(seed, &o, auditEventDBTypes, false, auditEventPrimaryKeyColumns...); err != nil {
t.Errorf("Unable to randomize AuditEvent struct: %s", err)
}
if err = o.Upsert(ctx, tx, true, nil, boil.Infer(), boil.Infer()); err != nil {
t.Errorf("Unable to upsert AuditEvent: %s", err)
}
count, err = AuditEvents().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
}

View File

@@ -0,0 +1,119 @@
// Code generated by SQLBoiler 3.5.0-gct (https://github.com/thrasher-corp/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package postgres
import (
"database/sql"
"flag"
"fmt"
"math/rand"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/spf13/viper"
"github.com/thrasher-corp/sqlboiler/boil"
)
var flagDebugMode = flag.Bool("test.sqldebug", false, "Turns on debug mode for SQL statements")
var flagConfigFile = flag.String("test.config", "", "Overrides the default config")
const outputDirDepth = 3
var (
dbMain tester
)
type tester interface {
setup() error
conn() (*sql.DB, error)
teardown() error
}
func TestMain(m *testing.M) {
if dbMain == nil {
fmt.Println("no dbMain tester interface was ready")
os.Exit(-1)
}
rand.Seed(time.Now().UnixNano())
flag.Parse()
var err error
// Load configuration
err = initViper()
if err != nil {
fmt.Println("unable to load config file")
os.Exit(-2)
}
// Set DebugMode so we can see generated sql statements
boil.DebugMode = *flagDebugMode
if err = dbMain.setup(); err != nil {
fmt.Println("Unable to execute setup:", err)
os.Exit(-4)
}
conn, err := dbMain.conn()
if err != nil {
fmt.Println("failed to get connection:", err)
}
var code int
boil.SetDB(conn)
code = m.Run()
if err = dbMain.teardown(); err != nil {
fmt.Println("Unable to execute teardown:", err)
os.Exit(-5)
}
os.Exit(code)
}
func initViper() error {
if flagConfigFile != nil && *flagConfigFile != "" {
viper.SetConfigFile(*flagConfigFile)
if err := viper.ReadInConfig(); err != nil {
return err
}
return nil
}
var err error
viper.SetConfigName("sqlboiler")
configHome := os.Getenv("XDG_CONFIG_HOME")
homePath := os.Getenv("HOME")
wd, err := os.Getwd()
if err != nil {
wd = strings.Repeat("../", outputDirDepth)
} else {
wd = wd + strings.Repeat("/..", outputDirDepth)
}
configPaths := []string{wd}
if len(configHome) > 0 {
configPaths = append(configPaths, filepath.Join(configHome, "sqlboiler"))
} else {
configPaths = append(configPaths, filepath.Join(homePath, ".config/sqlboiler"))
}
for _, p := range configPaths {
viper.AddConfigPath(p)
}
// Ignore errors here, fall back to defaults and validation to provide errs
_ = viper.ReadInConfig()
viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
viper.AutomaticEnv()
return nil
}

View File

@@ -0,0 +1,33 @@
// Code generated by SQLBoiler 3.5.0-gct (https://github.com/thrasher-corp/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package postgres
import (
"github.com/thrasher-corp/sqlboiler/drivers"
"github.com/thrasher-corp/sqlboiler/queries"
"github.com/thrasher-corp/sqlboiler/queries/qm"
)
var dialect = drivers.Dialect{
LQ: 0x22,
RQ: 0x22,
UseIndexPlaceholders: true,
UseLastInsertID: false,
UseSchema: false,
UseDefaultKeyword: true,
UseAutoColumns: false,
UseTopClause: false,
UseOutputClause: false,
UseCaseWhenExistsClause: false,
}
// NewQuery initializes a new Query using the passed in QueryMods
func NewQuery(mods ...qm.QueryMod) *queries.Query {
q := &queries.Query{}
queries.SetDialect(q, &dialect)
qm.Apply(q, mods...)
return q
}

View File

@@ -0,0 +1,52 @@
// Code generated by SQLBoiler 3.5.0-gct (https://github.com/thrasher-corp/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package postgres
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"math/rand"
"regexp"
"github.com/thrasher-corp/sqlboiler/boil"
)
var dbNameRand *rand.Rand
func MustTx(transactor boil.ContextTransactor, err error) boil.ContextTransactor {
if err != nil {
panic(fmt.Sprintf("Cannot create a transactor: %s", err))
}
return transactor
}
func newFKeyDestroyer(regex *regexp.Regexp, reader io.Reader) io.Reader {
return &fKeyDestroyer{
reader: reader,
rgx: regex,
}
}
type fKeyDestroyer struct {
reader io.Reader
buf *bytes.Buffer
rgx *regexp.Regexp
}
func (f *fKeyDestroyer) Read(b []byte) (int, error) {
if f.buf == nil {
all, err := ioutil.ReadAll(f.reader)
if err != nil {
return 0, err
}
all = bytes.Replace(all, []byte{'\r', '\n'}, []byte{'\n'}, -1)
all = f.rgx.ReplaceAll(all, []byte{})
f.buf = bytes.NewBuffer(all)
}
return f.buf.Read(b)
}

View File

@@ -0,0 +1,121 @@
// Code generated by SQLBoiler 3.5.0-gct (https://github.com/thrasher-corp/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package postgres
import "testing"
// This test suite runs each operation test in parallel.
// Example, if your database has 3 tables, the suite will run:
// table1, table2 and table3 Delete in parallel
// table1, table2 and table3 Insert in parallel, and so forth.
// It does NOT run each operation group in parallel.
// Separating the tests thusly grants avoidance of Postgres deadlocks.
func TestParent(t *testing.T) {
t.Run("AuditEvents", testAuditEvents)
}
func TestDelete(t *testing.T) {
t.Run("AuditEvents", testAuditEventsDelete)
}
func TestQueryDeleteAll(t *testing.T) {
t.Run("AuditEvents", testAuditEventsQueryDeleteAll)
}
func TestSliceDeleteAll(t *testing.T) {
t.Run("AuditEvents", testAuditEventsSliceDeleteAll)
}
func TestExists(t *testing.T) {
t.Run("AuditEvents", testAuditEventsExists)
}
func TestFind(t *testing.T) {
t.Run("AuditEvents", testAuditEventsFind)
}
func TestBind(t *testing.T) {
t.Run("AuditEvents", testAuditEventsBind)
}
func TestOne(t *testing.T) {
t.Run("AuditEvents", testAuditEventsOne)
}
func TestAll(t *testing.T) {
t.Run("AuditEvents", testAuditEventsAll)
}
func TestCount(t *testing.T) {
t.Run("AuditEvents", testAuditEventsCount)
}
func TestHooks(t *testing.T) {
t.Run("AuditEvents", testAuditEventsHooks)
}
func TestInsert(t *testing.T) {
t.Run("AuditEvents", testAuditEventsInsert)
t.Run("AuditEvents", testAuditEventsInsertWhitelist)
}
// TestToOne tests cannot be run in parallel
// or deadlocks can occur.
func TestToOne(t *testing.T) {}
// TestOneToOne tests cannot be run in parallel
// or deadlocks can occur.
func TestOneToOne(t *testing.T) {}
// TestToMany tests cannot be run in parallel
// or deadlocks can occur.
func TestToMany(t *testing.T) {}
// TestToOneSet tests cannot be run in parallel
// or deadlocks can occur.
func TestToOneSet(t *testing.T) {}
// TestToOneRemove tests cannot be run in parallel
// or deadlocks can occur.
func TestToOneRemove(t *testing.T) {}
// TestOneToOneSet tests cannot be run in parallel
// or deadlocks can occur.
func TestOneToOneSet(t *testing.T) {}
// TestOneToOneRemove tests cannot be run in parallel
// or deadlocks can occur.
func TestOneToOneRemove(t *testing.T) {}
// TestToManyAdd tests cannot be run in parallel
// or deadlocks can occur.
func TestToManyAdd(t *testing.T) {}
// TestToManySet tests cannot be run in parallel
// or deadlocks can occur.
func TestToManySet(t *testing.T) {}
// TestToManyRemove tests cannot be run in parallel
// or deadlocks can occur.
func TestToManyRemove(t *testing.T) {}
func TestReload(t *testing.T) {
t.Run("AuditEvents", testAuditEventsReload)
}
func TestReloadAll(t *testing.T) {
t.Run("AuditEvents", testAuditEventsReloadAll)
}
func TestSelect(t *testing.T) {
t.Run("AuditEvents", testAuditEventsSelect)
}
func TestUpdate(t *testing.T) {
t.Run("AuditEvents", testAuditEventsUpdate)
}
func TestSliceUpdateAll(t *testing.T) {
t.Run("AuditEvents", testAuditEventsSliceUpdateAll)
}

View File

@@ -0,0 +1,10 @@
// Code generated by SQLBoiler 3.5.0-gct (https://github.com/thrasher-corp/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package postgres
var TableNames = struct {
AuditEvent string
}{
AuditEvent: "audit_event",
}

View File

@@ -0,0 +1,52 @@
// Code generated by SQLBoiler 3.5.0-gct (https://github.com/thrasher-corp/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package postgres
import (
"strconv"
"github.com/pkg/errors"
"github.com/thrasher-corp/sqlboiler/boil"
"github.com/thrasher-corp/sqlboiler/strmangle"
)
// M type is for providing columns and column values to UpdateAll.
type M map[string]interface{}
// ErrSyncFail occurs during insert when the record could not be retrieved in
// order to populate default value information. This usually happens when LastInsertId
// fails or there was a primary key configuration that was not resolvable.
var ErrSyncFail = errors.New("postgres: failed to synchronize data after insert")
type insertCache struct {
query string
retQuery string
valueMapping []uint64
retMapping []uint64
}
type updateCache struct {
query string
valueMapping []uint64
}
func makeCacheKey(cols boil.Columns, nzDefaults []string) string {
buf := strmangle.GetBuffer()
buf.WriteString(strconv.Itoa(cols.Kind))
for _, w := range cols.Cols {
buf.WriteString(w)
}
if len(nzDefaults) != 0 {
buf.WriteByte('.')
}
for _, nz := range nzDefaults {
buf.WriteString(nz)
}
str := buf.String()
strmangle.PutBuffer(buf)
return str
}

View File

@@ -0,0 +1,243 @@
// Code generated by SQLBoiler 3.5.0-gct (https://github.com/thrasher-corp/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package postgres
import (
"bytes"
"database/sql"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
"github.com/kat-co/vala"
_ "github.com/lib/pq"
"github.com/pkg/errors"
"github.com/spf13/viper"
"github.com/thrasher-corp/goose"
"github.com/thrasher-corp/sqlboiler/drivers/sqlboiler-psql/driver"
"github.com/thrasher-corp/sqlboiler/randomize"
)
var rgxPGFkey = regexp.MustCompile(`(?m)^ALTER TABLE ONLY .*\n\s+ADD CONSTRAINT .*? FOREIGN KEY .*?;\n`)
type pgTester struct {
dbConn *sql.DB
dbName string
host string
user string
pass string
sslmode string
port int
pgPassFile string
testDBName string
skipSQLCmd bool
}
func init() {
dbMain = &pgTester{}
}
// setup dumps the database schema and imports it into a temporary randomly
// generated test database so that tests can be run against it using the
// generated sqlboiler ORM package.
func (p *pgTester) setup() error {
var err error
viper.SetDefault("psql.schema", "public")
viper.SetDefault("psql.port", 5432)
viper.SetDefault("psql.sslmode", "require")
p.dbName = viper.GetString("psql.dbname")
p.host = viper.GetString("psql.host")
p.user = viper.GetString("psql.user")
p.pass = viper.GetString("psql.pass")
p.port = viper.GetInt("psql.port")
p.sslmode = viper.GetString("psql.sslmode")
p.testDBName = viper.GetString("psql.testdbname")
p.skipSQLCmd = viper.GetBool("psql.skipsqlcmd")
err = vala.BeginValidation().Validate(
vala.StringNotEmpty(p.user, "psql.user"),
vala.StringNotEmpty(p.host, "psql.host"),
vala.Not(vala.Equals(p.port, 0, "psql.port")),
vala.StringNotEmpty(p.dbName, "psql.dbname"),
vala.StringNotEmpty(p.sslmode, "psql.sslmode"),
).Check()
if err != nil {
return err
}
// if no testing DB passed
if len(p.testDBName) == 0 {
// Create a randomized db name.
p.testDBName = randomize.StableDBName(p.dbName)
}
if err = p.makePGPassFile(); err != nil {
return err
}
if !p.skipSQLCmd {
if err = p.dropTestDB(); err != nil {
return err
}
if err = p.createTestDB(); err != nil {
return err
}
dumpCmd := exec.Command("pg_dump", "--schema-only", p.dbName)
dumpCmd.Env = append(os.Environ(), p.pgEnv()...)
createCmd := exec.Command("psql", p.testDBName)
createCmd.Env = append(os.Environ(), p.pgEnv()...)
r, w := io.Pipe()
dumpCmdStderr := &bytes.Buffer{}
createCmdStderr := &bytes.Buffer{}
dumpCmd.Stdout = w
dumpCmd.Stderr = dumpCmdStderr
createCmd.Stdin = newFKeyDestroyer(rgxPGFkey, r)
createCmd.Stderr = createCmdStderr
if err = dumpCmd.Start(); err != nil {
return errors.Wrap(err, "failed to start pg_dump command")
}
if err = createCmd.Start(); err != nil {
return errors.Wrap(err, "failed to start psql command")
}
if err = dumpCmd.Wait(); err != nil {
fmt.Println(err)
fmt.Println(dumpCmdStderr.String())
return errors.Wrap(err, "failed to wait for pg_dump command")
}
_ = w.Close() // After dumpCmd is done, close the write end of the pipe
if err = createCmd.Wait(); err != nil {
fmt.Println(err)
fmt.Println(createCmdStderr.String())
return errors.Wrap(err, "failed to wait for psql command")
}
}
return nil
}
func (p *pgTester) runCmd(stdin, command string, args ...string) error {
cmd := exec.Command(command, args...)
cmd.Env = append(os.Environ(), p.pgEnv()...)
if len(stdin) != 0 {
cmd.Stdin = strings.NewReader(stdin)
}
stdout := &bytes.Buffer{}
stderr := &bytes.Buffer{}
cmd.Stdout = stdout
cmd.Stderr = stderr
if err := cmd.Run(); err != nil {
fmt.Println("failed running:", command, args)
fmt.Println(stdout.String())
fmt.Println(stderr.String())
return err
}
return nil
}
func (p *pgTester) pgEnv() []string {
return []string{
fmt.Sprintf("PGHOST=%s", p.host),
fmt.Sprintf("PGPORT=%d", p.port),
fmt.Sprintf("PGUSER=%s", p.user),
fmt.Sprintf("PGPASSFILE=%s", p.pgPassFile),
}
}
func (p *pgTester) makePGPassFile() error {
tmp, err := ioutil.TempFile("", "pgpass")
if err != nil {
return errors.Wrap(err, "failed to create option file")
}
fmt.Fprintf(tmp, "%s:%d:postgres:%s", p.host, p.port, p.user)
if len(p.pass) != 0 {
fmt.Fprintf(tmp, ":%s", p.pass)
}
fmt.Fprintln(tmp)
fmt.Fprintf(tmp, "%s:%d:%s:%s", p.host, p.port, p.dbName, p.user)
if len(p.pass) != 0 {
fmt.Fprintf(tmp, ":%s", p.pass)
}
fmt.Fprintln(tmp)
fmt.Fprintf(tmp, "%s:%d:%s:%s", p.host, p.port, p.testDBName, p.user)
if len(p.pass) != 0 {
fmt.Fprintf(tmp, ":%s", p.pass)
}
fmt.Fprintln(tmp)
p.pgPassFile = tmp.Name()
return tmp.Close()
}
func (p *pgTester) createTestDB() error {
return p.runCmd("", "createdb", p.testDBName)
}
func (p *pgTester) dropTestDB() error {
return p.runCmd("", "dropdb", "--if-exists", p.testDBName)
}
// teardown executes cleanup tasks when the tests finish running
func (p *pgTester) teardown() error {
var err error
if err = p.dbConn.Close(); err != nil {
return err
}
p.dbConn = nil
if !p.skipSQLCmd {
if err = p.dropTestDB(); err != nil {
return err
}
}
return os.Remove(p.pgPassFile)
}
func (p *pgTester) conn() (*sql.DB, error) {
if p.dbConn != nil {
return p.dbConn, nil
}
var err error
p.dbConn, err = sql.Open("postgres", driver.PSQLBuildQueryString(p.user, p.pass, p.testDBName, p.host, p.port, p.sslmode))
if err != nil {
return nil, err
}
path := filepath.Join("..", "..", "migrations")
err = goose.Run("up", p.dbConn, "postgres", path, "")
if err != nil {
if err == goose.ErrNoNextVersion {
return p.dbConn, nil
}
return nil, err
}
return p.dbConn, nil
}

View File

@@ -0,0 +1,10 @@
// Code generated by SQLBoiler 3.5.0-gct (https://github.com/thrasher-corp/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package postgres
import "testing"
func TestUpsert(t *testing.T) {
t.Run("AuditEvents", testAuditEventsUpsert)
}

View File

@@ -0,0 +1,61 @@
// Code generated by SQLBoiler 3.5.0-gct (https://github.com/thrasher-corp/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package postgres
import (
"fmt"
"strings"
"github.com/thrasher-corp/sqlboiler/drivers"
"github.com/thrasher-corp/sqlboiler/strmangle"
)
// buildUpsertQueryPostgres builds a SQL statement string using the upsertData provided.
func buildUpsertQueryPostgres(dia drivers.Dialect, tableName string, updateOnConflict bool, ret, update, conflict, whitelist []string) string {
conflict = strmangle.IdentQuoteSlice(dia.LQ, dia.RQ, conflict)
whitelist = strmangle.IdentQuoteSlice(dia.LQ, dia.RQ, whitelist)
ret = strmangle.IdentQuoteSlice(dia.LQ, dia.RQ, ret)
buf := strmangle.GetBuffer()
defer strmangle.PutBuffer(buf)
columns := "DEFAULT VALUES"
if len(whitelist) != 0 {
columns = fmt.Sprintf("(%s) VALUES (%s)",
strings.Join(whitelist, ", "),
strmangle.Placeholders(dia.UseIndexPlaceholders, len(whitelist), 1, 1))
}
fmt.Fprintf(
buf,
"INSERT INTO %s %s ON CONFLICT ",
tableName,
columns,
)
if !updateOnConflict || len(update) == 0 {
buf.WriteString("DO NOTHING")
} else {
buf.WriteByte('(')
buf.WriteString(strings.Join(conflict, ", "))
buf.WriteString(") DO UPDATE SET ")
for i, v := range update {
if i != 0 {
buf.WriteByte(',')
}
quoted := strmangle.IdentQuote(dia.LQ, dia.RQ, v)
buf.WriteString(quoted)
buf.WriteString(" = EXCLUDED.")
buf.WriteString(quoted)
}
}
if len(ret) != 0 {
buf.WriteString(" RETURNING ")
buf.WriteString(strings.Join(ret, ", "))
}
return buf.String()
}

View File

@@ -0,0 +1,816 @@
// Code generated by SQLBoiler 3.5.0-gct (https://github.com/thrasher-corp/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package sqlite3
import (
"context"
"database/sql"
"fmt"
"reflect"
"strings"
"sync"
"time"
"github.com/pkg/errors"
"github.com/thrasher-corp/sqlboiler/boil"
"github.com/thrasher-corp/sqlboiler/queries"
"github.com/thrasher-corp/sqlboiler/queries/qm"
"github.com/thrasher-corp/sqlboiler/queries/qmhelper"
"github.com/thrasher-corp/sqlboiler/strmangle"
)
// AuditEvent is an object representing the database table.
type AuditEvent struct {
ID int64 `boil:"id" json:"id" toml:"id" yaml:"id"`
Type string `boil:"type" json:"type" toml:"type" yaml:"type"`
Identifier string `boil:"identifier" json:"identifier" toml:"identifier" yaml:"identifier"`
Message string `boil:"message" json:"message" toml:"message" yaml:"message"`
CreatedAt string `boil:"created_at" json:"created_at" toml:"created_at" yaml:"created_at"`
R *auditEventR `boil:"-" json:"-" toml:"-" yaml:"-"`
L auditEventL `boil:"-" json:"-" toml:"-" yaml:"-"`
}
var AuditEventColumns = struct {
ID string
Type string
Identifier string
Message string
CreatedAt string
}{
ID: "id",
Type: "type",
Identifier: "identifier",
Message: "message",
CreatedAt: "created_at",
}
// Generated where
type whereHelperint64 struct{ field string }
func (w whereHelperint64) EQ(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.EQ, x) }
func (w whereHelperint64) NEQ(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.NEQ, x) }
func (w whereHelperint64) LT(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LT, x) }
func (w whereHelperint64) LTE(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LTE, x) }
func (w whereHelperint64) GT(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GT, x) }
func (w whereHelperint64) GTE(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GTE, x) }
type whereHelperstring struct{ field string }
func (w whereHelperstring) EQ(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.EQ, x) }
func (w whereHelperstring) NEQ(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.NEQ, x) }
func (w whereHelperstring) LT(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LT, x) }
func (w whereHelperstring) LTE(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LTE, x) }
func (w whereHelperstring) GT(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GT, x) }
func (w whereHelperstring) GTE(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GTE, x) }
func (w whereHelperstring) IN(slice []string) qm.QueryMod {
values := make([]interface{}, 0, len(slice))
for _, value := range slice {
values = append(values, value)
}
return qm.WhereIn(fmt.Sprintf("%s IN ?", w.field), values...)
}
var AuditEventWhere = struct {
ID whereHelperint64
Type whereHelperstring
Identifier whereHelperstring
Message whereHelperstring
CreatedAt whereHelperstring
}{
ID: whereHelperint64{field: "\"audit_event\".\"id\""},
Type: whereHelperstring{field: "\"audit_event\".\"type\""},
Identifier: whereHelperstring{field: "\"audit_event\".\"identifier\""},
Message: whereHelperstring{field: "\"audit_event\".\"message\""},
CreatedAt: whereHelperstring{field: "\"audit_event\".\"created_at\""},
}
// AuditEventRels is where relationship names are stored.
var AuditEventRels = struct {
}{}
// auditEventR is where relationships are stored.
type auditEventR struct {
}
// NewStruct creates a new relationship struct
func (*auditEventR) NewStruct() *auditEventR {
return &auditEventR{}
}
// auditEventL is where Load methods for each relationship are stored.
type auditEventL struct{}
var (
auditEventAllColumns = []string{"id", "type", "identifier", "message", "created_at"}
auditEventColumnsWithoutDefault = []string{"type", "identifier", "message"}
auditEventColumnsWithDefault = []string{"id", "created_at"}
auditEventPrimaryKeyColumns = []string{"id"}
)
type (
// AuditEventSlice is an alias for a slice of pointers to AuditEvent.
// This should generally be used opposed to []AuditEvent.
AuditEventSlice []*AuditEvent
// AuditEventHook is the signature for custom AuditEvent hook methods
AuditEventHook func(context.Context, boil.ContextExecutor, *AuditEvent) error
auditEventQuery struct {
*queries.Query
}
)
// Cache for insert, update and upsert
var (
auditEventType = reflect.TypeOf(&AuditEvent{})
auditEventMapping = queries.MakeStructMapping(auditEventType)
auditEventPrimaryKeyMapping, _ = queries.BindMapping(auditEventType, auditEventMapping, auditEventPrimaryKeyColumns)
auditEventInsertCacheMut sync.RWMutex
auditEventInsertCache = make(map[string]insertCache)
auditEventUpdateCacheMut sync.RWMutex
auditEventUpdateCache = make(map[string]updateCache)
auditEventUpsertCacheMut sync.RWMutex
auditEventUpsertCache = make(map[string]insertCache)
)
var (
// Force time package dependency for automated UpdatedAt/CreatedAt.
_ = time.Second
// Force qmhelper dependency for where clause generation (which doesn't
// always happen)
_ = qmhelper.Where
)
var auditEventBeforeInsertHooks []AuditEventHook
var auditEventBeforeUpdateHooks []AuditEventHook
var auditEventBeforeDeleteHooks []AuditEventHook
var auditEventBeforeUpsertHooks []AuditEventHook
var auditEventAfterInsertHooks []AuditEventHook
var auditEventAfterSelectHooks []AuditEventHook
var auditEventAfterUpdateHooks []AuditEventHook
var auditEventAfterDeleteHooks []AuditEventHook
var auditEventAfterUpsertHooks []AuditEventHook
// doBeforeInsertHooks executes all "before insert" hooks.
func (o *AuditEvent) doBeforeInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range auditEventBeforeInsertHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doBeforeUpdateHooks executes all "before Update" hooks.
func (o *AuditEvent) doBeforeUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range auditEventBeforeUpdateHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doBeforeDeleteHooks executes all "before Delete" hooks.
func (o *AuditEvent) doBeforeDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range auditEventBeforeDeleteHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doBeforeUpsertHooks executes all "before Upsert" hooks.
func (o *AuditEvent) doBeforeUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range auditEventBeforeUpsertHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doAfterInsertHooks executes all "after Insert" hooks.
func (o *AuditEvent) doAfterInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range auditEventAfterInsertHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doAfterSelectHooks executes all "after Select" hooks.
func (o *AuditEvent) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range auditEventAfterSelectHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doAfterUpdateHooks executes all "after Update" hooks.
func (o *AuditEvent) doAfterUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range auditEventAfterUpdateHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doAfterDeleteHooks executes all "after Delete" hooks.
func (o *AuditEvent) doAfterDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range auditEventAfterDeleteHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doAfterUpsertHooks executes all "after Upsert" hooks.
func (o *AuditEvent) doAfterUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range auditEventAfterUpsertHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// AddAuditEventHook registers your hook function for all future operations.
func AddAuditEventHook(hookPoint boil.HookPoint, auditEventHook AuditEventHook) {
switch hookPoint {
case boil.BeforeInsertHook:
auditEventBeforeInsertHooks = append(auditEventBeforeInsertHooks, auditEventHook)
case boil.BeforeUpdateHook:
auditEventBeforeUpdateHooks = append(auditEventBeforeUpdateHooks, auditEventHook)
case boil.BeforeDeleteHook:
auditEventBeforeDeleteHooks = append(auditEventBeforeDeleteHooks, auditEventHook)
case boil.BeforeUpsertHook:
auditEventBeforeUpsertHooks = append(auditEventBeforeUpsertHooks, auditEventHook)
case boil.AfterInsertHook:
auditEventAfterInsertHooks = append(auditEventAfterInsertHooks, auditEventHook)
case boil.AfterSelectHook:
auditEventAfterSelectHooks = append(auditEventAfterSelectHooks, auditEventHook)
case boil.AfterUpdateHook:
auditEventAfterUpdateHooks = append(auditEventAfterUpdateHooks, auditEventHook)
case boil.AfterDeleteHook:
auditEventAfterDeleteHooks = append(auditEventAfterDeleteHooks, auditEventHook)
case boil.AfterUpsertHook:
auditEventAfterUpsertHooks = append(auditEventAfterUpsertHooks, auditEventHook)
}
}
// One returns a single auditEvent record from the query.
func (q auditEventQuery) One(ctx context.Context, exec boil.ContextExecutor) (*AuditEvent, error) {
o := &AuditEvent{}
queries.SetLimit(q.Query, 1)
err := q.Bind(ctx, exec, o)
if err != nil {
if errors.Cause(err) == sql.ErrNoRows {
return nil, sql.ErrNoRows
}
return nil, errors.Wrap(err, "sqlite3: failed to execute a one query for audit_event")
}
if err := o.doAfterSelectHooks(ctx, exec); err != nil {
return o, err
}
return o, nil
}
// All returns all AuditEvent records from the query.
func (q auditEventQuery) All(ctx context.Context, exec boil.ContextExecutor) (AuditEventSlice, error) {
var o []*AuditEvent
err := q.Bind(ctx, exec, &o)
if err != nil {
return nil, errors.Wrap(err, "sqlite3: failed to assign all query results to AuditEvent slice")
}
if len(auditEventAfterSelectHooks) != 0 {
for _, obj := range o {
if err := obj.doAfterSelectHooks(ctx, exec); err != nil {
return o, err
}
}
}
return o, nil
}
// Count returns the count of all AuditEvent records in the query.
func (q auditEventQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
var count int64
queries.SetSelect(q.Query, nil)
queries.SetCount(q.Query)
err := q.Query.QueryRowContext(ctx, exec).Scan(&count)
if err != nil {
return 0, errors.Wrap(err, "sqlite3: failed to count audit_event rows")
}
return count, nil
}
// Exists checks if the row exists in the table.
func (q auditEventQuery) Exists(ctx context.Context, exec boil.ContextExecutor) (bool, error) {
var count int64
queries.SetSelect(q.Query, nil)
queries.SetCount(q.Query)
queries.SetLimit(q.Query, 1)
err := q.Query.QueryRowContext(ctx, exec).Scan(&count)
if err != nil {
return false, errors.Wrap(err, "sqlite3: failed to check if audit_event exists")
}
return count > 0, nil
}
// AuditEvents retrieves all the records using an executor.
func AuditEvents(mods ...qm.QueryMod) auditEventQuery {
mods = append(mods, qm.From("\"audit_event\""))
return auditEventQuery{NewQuery(mods...)}
}
// FindAuditEvent retrieves a single record by ID with an executor.
// If selectCols is empty Find will return all columns.
func FindAuditEvent(ctx context.Context, exec boil.ContextExecutor, iD int64, selectCols ...string) (*AuditEvent, error) {
auditEventObj := &AuditEvent{}
sel := "*"
if len(selectCols) > 0 {
sel = strings.Join(strmangle.IdentQuoteSlice(dialect.LQ, dialect.RQ, selectCols), ",")
}
query := fmt.Sprintf(
"select %s from \"audit_event\" where \"id\"=?", sel,
)
q := queries.Raw(query, iD)
err := q.Bind(ctx, exec, auditEventObj)
if err != nil {
if errors.Cause(err) == sql.ErrNoRows {
return nil, sql.ErrNoRows
}
return nil, errors.Wrap(err, "sqlite3: unable to select from audit_event")
}
return auditEventObj, nil
}
// Insert a single record using an executor.
// See boil.Columns.InsertColumnSet documentation to understand column list inference for inserts.
func (o *AuditEvent) Insert(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) error {
if o == nil {
return errors.New("sqlite3: no audit_event provided for insertion")
}
var err error
if err := o.doBeforeInsertHooks(ctx, exec); err != nil {
return err
}
nzDefaults := queries.NonZeroDefaultSet(auditEventColumnsWithDefault, o)
key := makeCacheKey(columns, nzDefaults)
auditEventInsertCacheMut.RLock()
cache, cached := auditEventInsertCache[key]
auditEventInsertCacheMut.RUnlock()
if !cached {
wl, returnColumns := columns.InsertColumnSet(
auditEventAllColumns,
auditEventColumnsWithDefault,
auditEventColumnsWithoutDefault,
nzDefaults,
)
cache.valueMapping, err = queries.BindMapping(auditEventType, auditEventMapping, wl)
if err != nil {
return err
}
cache.retMapping, err = queries.BindMapping(auditEventType, auditEventMapping, returnColumns)
if err != nil {
return err
}
if len(wl) != 0 {
cache.query = fmt.Sprintf("INSERT INTO \"audit_event\" (\"%s\") %%sVALUES (%s)%%s", strings.Join(wl, "\",\""), strmangle.Placeholders(dialect.UseIndexPlaceholders, len(wl), 1, 1))
} else {
cache.query = "INSERT INTO \"audit_event\" () VALUES ()%s%s"
}
var queryOutput, queryReturning string
if len(cache.retMapping) != 0 {
cache.retQuery = fmt.Sprintf("SELECT \"%s\" FROM \"audit_event\" WHERE %s", strings.Join(returnColumns, "\",\""), strmangle.WhereClause("\"", "\"", 0, auditEventPrimaryKeyColumns))
}
cache.query = fmt.Sprintf(cache.query, queryOutput, queryReturning)
}
value := reflect.Indirect(reflect.ValueOf(o))
vals := queries.ValuesFromMapping(value, cache.valueMapping)
if boil.DebugMode {
fmt.Fprintln(boil.DebugWriter, cache.query)
fmt.Fprintln(boil.DebugWriter, vals)
}
result, err := exec.ExecContext(ctx, cache.query, vals...)
if err != nil {
return errors.Wrap(err, "sqlite3: unable to insert into audit_event")
}
var lastID int64
var identifierCols []interface{}
if len(cache.retMapping) == 0 {
goto CacheNoHooks
}
lastID, err = result.LastInsertId()
if err != nil {
return ErrSyncFail
}
o.ID = int64(lastID)
if lastID != 0 && len(cache.retMapping) == 1 && cache.retMapping[0] == auditEventMapping["ID"] {
goto CacheNoHooks
}
identifierCols = []interface{}{
o.ID,
}
if boil.DebugMode {
fmt.Fprintln(boil.DebugWriter, cache.retQuery)
fmt.Fprintln(boil.DebugWriter, identifierCols...)
}
err = exec.QueryRowContext(ctx, cache.retQuery, identifierCols...).Scan(queries.PtrsFromMapping(value, cache.retMapping)...)
if err != nil {
return errors.Wrap(err, "sqlite3: unable to populate default values for audit_event")
}
CacheNoHooks:
if !cached {
auditEventInsertCacheMut.Lock()
auditEventInsertCache[key] = cache
auditEventInsertCacheMut.Unlock()
}
return o.doAfterInsertHooks(ctx, exec)
}
// Update uses an executor to update the AuditEvent.
// See boil.Columns.UpdateColumnSet documentation to understand column list inference for updates.
// Update does not automatically update the record in case of default values. Use .Reload() to refresh the records.
func (o *AuditEvent) Update(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) (int64, error) {
var err error
if err = o.doBeforeUpdateHooks(ctx, exec); err != nil {
return 0, err
}
key := makeCacheKey(columns, nil)
auditEventUpdateCacheMut.RLock()
cache, cached := auditEventUpdateCache[key]
auditEventUpdateCacheMut.RUnlock()
if !cached {
wl := columns.UpdateColumnSet(
auditEventAllColumns,
auditEventPrimaryKeyColumns,
)
if len(wl) == 0 {
return 0, errors.New("sqlite3: unable to update audit_event, could not build whitelist")
}
cache.query = fmt.Sprintf("UPDATE \"audit_event\" SET %s WHERE %s",
strmangle.SetParamNames("\"", "\"", 0, wl),
strmangle.WhereClause("\"", "\"", 0, auditEventPrimaryKeyColumns),
)
cache.valueMapping, err = queries.BindMapping(auditEventType, auditEventMapping, append(wl, auditEventPrimaryKeyColumns...))
if err != nil {
return 0, err
}
}
values := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping)
if boil.DebugMode {
fmt.Fprintln(boil.DebugWriter, cache.query)
fmt.Fprintln(boil.DebugWriter, values)
}
var result sql.Result
result, err = exec.ExecContext(ctx, cache.query, values...)
if err != nil {
return 0, errors.Wrap(err, "sqlite3: unable to update audit_event row")
}
rowsAff, err := result.RowsAffected()
if err != nil {
return 0, errors.Wrap(err, "sqlite3: failed to get rows affected by update for audit_event")
}
if !cached {
auditEventUpdateCacheMut.Lock()
auditEventUpdateCache[key] = cache
auditEventUpdateCacheMut.Unlock()
}
return rowsAff, o.doAfterUpdateHooks(ctx, exec)
}
// UpdateAll updates all rows with the specified column values.
func (q auditEventQuery) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) {
queries.SetUpdate(q.Query, cols)
result, err := q.Query.ExecContext(ctx, exec)
if err != nil {
return 0, errors.Wrap(err, "sqlite3: unable to update all for audit_event")
}
rowsAff, err := result.RowsAffected()
if err != nil {
return 0, errors.Wrap(err, "sqlite3: unable to retrieve rows affected for audit_event")
}
return rowsAff, nil
}
// UpdateAll updates all rows with the specified column values, using an executor.
func (o AuditEventSlice) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) {
ln := int64(len(o))
if ln == 0 {
return 0, nil
}
if len(cols) == 0 {
return 0, errors.New("sqlite3: update all requires at least one column argument")
}
colNames := make([]string, len(cols))
args := make([]interface{}, len(cols))
i := 0
for name, value := range cols {
colNames[i] = name
args[i] = value
i++
}
// Append all of the primary key values for each column
for _, obj := range o {
pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), auditEventPrimaryKeyMapping)
args = append(args, pkeyArgs...)
}
sql := fmt.Sprintf("UPDATE \"audit_event\" SET %s WHERE %s",
strmangle.SetParamNames("\"", "\"", 0, colNames),
strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, auditEventPrimaryKeyColumns, len(o)))
if boil.DebugMode {
fmt.Fprintln(boil.DebugWriter, sql)
fmt.Fprintln(boil.DebugWriter, args...)
}
result, err := exec.ExecContext(ctx, sql, args...)
if err != nil {
return 0, errors.Wrap(err, "sqlite3: unable to update all in auditEvent slice")
}
rowsAff, err := result.RowsAffected()
if err != nil {
return 0, errors.Wrap(err, "sqlite3: unable to retrieve rows affected all in update all auditEvent")
}
return rowsAff, nil
}
// Delete deletes a single AuditEvent record with an executor.
// Delete will match against the primary key column to find the record to delete.
func (o *AuditEvent) Delete(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
if o == nil {
return 0, errors.New("sqlite3: no AuditEvent provided for delete")
}
if err := o.doBeforeDeleteHooks(ctx, exec); err != nil {
return 0, err
}
args := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), auditEventPrimaryKeyMapping)
sql := "DELETE FROM \"audit_event\" WHERE \"id\"=?"
if boil.DebugMode {
fmt.Fprintln(boil.DebugWriter, sql)
fmt.Fprintln(boil.DebugWriter, args...)
}
result, err := exec.ExecContext(ctx, sql, args...)
if err != nil {
return 0, errors.Wrap(err, "sqlite3: unable to delete from audit_event")
}
rowsAff, err := result.RowsAffected()
if err != nil {
return 0, errors.Wrap(err, "sqlite3: failed to get rows affected by delete for audit_event")
}
if err := o.doAfterDeleteHooks(ctx, exec); err != nil {
return 0, err
}
return rowsAff, nil
}
// DeleteAll deletes all matching rows.
func (q auditEventQuery) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
if q.Query == nil {
return 0, errors.New("sqlite3: no auditEventQuery provided for delete all")
}
queries.SetDelete(q.Query)
result, err := q.Query.ExecContext(ctx, exec)
if err != nil {
return 0, errors.Wrap(err, "sqlite3: unable to delete all from audit_event")
}
rowsAff, err := result.RowsAffected()
if err != nil {
return 0, errors.Wrap(err, "sqlite3: failed to get rows affected by deleteall for audit_event")
}
return rowsAff, nil
}
// DeleteAll deletes all rows in the slice, using an executor.
func (o AuditEventSlice) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
if len(o) == 0 {
return 0, nil
}
if len(auditEventBeforeDeleteHooks) != 0 {
for _, obj := range o {
if err := obj.doBeforeDeleteHooks(ctx, exec); err != nil {
return 0, err
}
}
}
var args []interface{}
for _, obj := range o {
pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), auditEventPrimaryKeyMapping)
args = append(args, pkeyArgs...)
}
sql := "DELETE FROM \"audit_event\" WHERE " +
strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, auditEventPrimaryKeyColumns, len(o))
if boil.DebugMode {
fmt.Fprintln(boil.DebugWriter, sql)
fmt.Fprintln(boil.DebugWriter, args)
}
result, err := exec.ExecContext(ctx, sql, args...)
if err != nil {
return 0, errors.Wrap(err, "sqlite3: unable to delete all from auditEvent slice")
}
rowsAff, err := result.RowsAffected()
if err != nil {
return 0, errors.Wrap(err, "sqlite3: failed to get rows affected by deleteall for audit_event")
}
if len(auditEventAfterDeleteHooks) != 0 {
for _, obj := range o {
if err := obj.doAfterDeleteHooks(ctx, exec); err != nil {
return 0, err
}
}
}
return rowsAff, nil
}
// Reload refetches the object from the database
// using the primary keys with an executor.
func (o *AuditEvent) Reload(ctx context.Context, exec boil.ContextExecutor) error {
ret, err := FindAuditEvent(ctx, exec, o.ID)
if err != nil {
return err
}
*o = *ret
return nil
}
// ReloadAll refetches every row with matching primary key column values
// and overwrites the original object slice with the newly updated slice.
func (o *AuditEventSlice) ReloadAll(ctx context.Context, exec boil.ContextExecutor) error {
if o == nil || len(*o) == 0 {
return nil
}
slice := AuditEventSlice{}
var args []interface{}
for _, obj := range *o {
pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), auditEventPrimaryKeyMapping)
args = append(args, pkeyArgs...)
}
sql := "SELECT \"audit_event\".* FROM \"audit_event\" WHERE " +
strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, auditEventPrimaryKeyColumns, len(*o))
q := queries.Raw(sql, args...)
err := q.Bind(ctx, exec, &slice)
if err != nil {
return errors.Wrap(err, "sqlite3: unable to reload all in AuditEventSlice")
}
*o = slice
return nil
}
// AuditEventExists checks if the AuditEvent row exists.
func AuditEventExists(ctx context.Context, exec boil.ContextExecutor, iD int64) (bool, error) {
var exists bool
sql := "select exists(select 1 from \"audit_event\" where \"id\"=? limit 1)"
if boil.DebugMode {
fmt.Fprintln(boil.DebugWriter, sql)
fmt.Fprintln(boil.DebugWriter, iD)
}
row := exec.QueryRowContext(ctx, sql, iD)
err := row.Scan(&exists)
if err != nil {
return false, errors.Wrap(err, "sqlite3: unable to check if audit_event exists")
}
return exists, nil
}

View File

@@ -0,0 +1,684 @@
// Code generated by SQLBoiler 3.5.0-gct (https://github.com/thrasher-corp/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package sqlite3
import (
"bytes"
"context"
"reflect"
"testing"
"github.com/thrasher-corp/sqlboiler/boil"
"github.com/thrasher-corp/sqlboiler/queries"
"github.com/thrasher-corp/sqlboiler/randomize"
"github.com/thrasher-corp/sqlboiler/strmangle"
)
var (
// Relationships sometimes use the reflection helper queries.Equal/queries.Assign
// so force a package dependency in case they don't.
_ = queries.Equal
)
func testAuditEvents(t *testing.T) {
t.Parallel()
query := AuditEvents()
if query.Query == nil {
t.Error("expected a query, got nothing")
}
}
func testAuditEventsDelete(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &AuditEvent{}
if err = randomize.Struct(seed, o, auditEventDBTypes, true, auditEventColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize AuditEvent struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if rowsAff, err := o.Delete(ctx, tx); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only have deleted one row, but affected:", rowsAff)
}
count, err := AuditEvents().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 0 {
t.Error("want zero records, got:", count)
}
}
func testAuditEventsQueryDeleteAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &AuditEvent{}
if err = randomize.Struct(seed, o, auditEventDBTypes, true, auditEventColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize AuditEvent struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if rowsAff, err := AuditEvents().DeleteAll(ctx, tx); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only have deleted one row, but affected:", rowsAff)
}
count, err := AuditEvents().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 0 {
t.Error("want zero records, got:", count)
}
}
func testAuditEventsSliceDeleteAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &AuditEvent{}
if err = randomize.Struct(seed, o, auditEventDBTypes, true, auditEventColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize AuditEvent struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice := AuditEventSlice{o}
if rowsAff, err := slice.DeleteAll(ctx, tx); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only have deleted one row, but affected:", rowsAff)
}
count, err := AuditEvents().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 0 {
t.Error("want zero records, got:", count)
}
}
func testAuditEventsExists(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &AuditEvent{}
if err = randomize.Struct(seed, o, auditEventDBTypes, true, auditEventColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize AuditEvent struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
e, err := AuditEventExists(ctx, tx, o.ID)
if err != nil {
t.Errorf("Unable to check if AuditEvent exists: %s", err)
}
if !e {
t.Errorf("Expected AuditEventExists to return true, but got false.")
}
}
func testAuditEventsFind(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &AuditEvent{}
if err = randomize.Struct(seed, o, auditEventDBTypes, true, auditEventColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize AuditEvent struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
auditEventFound, err := FindAuditEvent(ctx, tx, o.ID)
if err != nil {
t.Error(err)
}
if auditEventFound == nil {
t.Error("want a record, got nil")
}
}
func testAuditEventsBind(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &AuditEvent{}
if err = randomize.Struct(seed, o, auditEventDBTypes, true, auditEventColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize AuditEvent struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = AuditEvents().Bind(ctx, tx, o); err != nil {
t.Error(err)
}
}
func testAuditEventsOne(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &AuditEvent{}
if err = randomize.Struct(seed, o, auditEventDBTypes, true, auditEventColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize AuditEvent struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if x, err := AuditEvents().One(ctx, tx); err != nil {
t.Error(err)
} else if x == nil {
t.Error("expected to get a non nil record")
}
}
func testAuditEventsAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
auditEventOne := &AuditEvent{}
auditEventTwo := &AuditEvent{}
if err = randomize.Struct(seed, auditEventOne, auditEventDBTypes, false, auditEventColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize AuditEvent struct: %s", err)
}
if err = randomize.Struct(seed, auditEventTwo, auditEventDBTypes, false, auditEventColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize AuditEvent struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = auditEventOne.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = auditEventTwo.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice, err := AuditEvents().All(ctx, tx)
if err != nil {
t.Error(err)
}
if len(slice) != 2 {
t.Error("want 2 records, got:", len(slice))
}
}
func testAuditEventsCount(t *testing.T) {
t.Parallel()
var err error
seed := randomize.NewSeed()
auditEventOne := &AuditEvent{}
auditEventTwo := &AuditEvent{}
if err = randomize.Struct(seed, auditEventOne, auditEventDBTypes, false, auditEventColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize AuditEvent struct: %s", err)
}
if err = randomize.Struct(seed, auditEventTwo, auditEventDBTypes, false, auditEventColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize AuditEvent struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = auditEventOne.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = auditEventTwo.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := AuditEvents().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 2 {
t.Error("want 2 records, got:", count)
}
}
func auditEventBeforeInsertHook(ctx context.Context, e boil.ContextExecutor, o *AuditEvent) error {
*o = AuditEvent{}
return nil
}
func auditEventAfterInsertHook(ctx context.Context, e boil.ContextExecutor, o *AuditEvent) error {
*o = AuditEvent{}
return nil
}
func auditEventAfterSelectHook(ctx context.Context, e boil.ContextExecutor, o *AuditEvent) error {
*o = AuditEvent{}
return nil
}
func auditEventBeforeUpdateHook(ctx context.Context, e boil.ContextExecutor, o *AuditEvent) error {
*o = AuditEvent{}
return nil
}
func auditEventAfterUpdateHook(ctx context.Context, e boil.ContextExecutor, o *AuditEvent) error {
*o = AuditEvent{}
return nil
}
func auditEventBeforeDeleteHook(ctx context.Context, e boil.ContextExecutor, o *AuditEvent) error {
*o = AuditEvent{}
return nil
}
func auditEventAfterDeleteHook(ctx context.Context, e boil.ContextExecutor, o *AuditEvent) error {
*o = AuditEvent{}
return nil
}
func auditEventBeforeUpsertHook(ctx context.Context, e boil.ContextExecutor, o *AuditEvent) error {
*o = AuditEvent{}
return nil
}
func auditEventAfterUpsertHook(ctx context.Context, e boil.ContextExecutor, o *AuditEvent) error {
*o = AuditEvent{}
return nil
}
func testAuditEventsHooks(t *testing.T) {
t.Parallel()
var err error
ctx := context.Background()
empty := &AuditEvent{}
o := &AuditEvent{}
seed := randomize.NewSeed()
if err = randomize.Struct(seed, o, auditEventDBTypes, false); err != nil {
t.Errorf("Unable to randomize AuditEvent object: %s", err)
}
AddAuditEventHook(boil.BeforeInsertHook, auditEventBeforeInsertHook)
if err = o.doBeforeInsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeInsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeInsertHook function to empty object, but got: %#v", o)
}
auditEventBeforeInsertHooks = []AuditEventHook{}
AddAuditEventHook(boil.AfterInsertHook, auditEventAfterInsertHook)
if err = o.doAfterInsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterInsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterInsertHook function to empty object, but got: %#v", o)
}
auditEventAfterInsertHooks = []AuditEventHook{}
AddAuditEventHook(boil.AfterSelectHook, auditEventAfterSelectHook)
if err = o.doAfterSelectHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterSelectHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterSelectHook function to empty object, but got: %#v", o)
}
auditEventAfterSelectHooks = []AuditEventHook{}
AddAuditEventHook(boil.BeforeUpdateHook, auditEventBeforeUpdateHook)
if err = o.doBeforeUpdateHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeUpdateHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeUpdateHook function to empty object, but got: %#v", o)
}
auditEventBeforeUpdateHooks = []AuditEventHook{}
AddAuditEventHook(boil.AfterUpdateHook, auditEventAfterUpdateHook)
if err = o.doAfterUpdateHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterUpdateHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterUpdateHook function to empty object, but got: %#v", o)
}
auditEventAfterUpdateHooks = []AuditEventHook{}
AddAuditEventHook(boil.BeforeDeleteHook, auditEventBeforeDeleteHook)
if err = o.doBeforeDeleteHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeDeleteHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeDeleteHook function to empty object, but got: %#v", o)
}
auditEventBeforeDeleteHooks = []AuditEventHook{}
AddAuditEventHook(boil.AfterDeleteHook, auditEventAfterDeleteHook)
if err = o.doAfterDeleteHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterDeleteHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterDeleteHook function to empty object, but got: %#v", o)
}
auditEventAfterDeleteHooks = []AuditEventHook{}
AddAuditEventHook(boil.BeforeUpsertHook, auditEventBeforeUpsertHook)
if err = o.doBeforeUpsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeUpsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeUpsertHook function to empty object, but got: %#v", o)
}
auditEventBeforeUpsertHooks = []AuditEventHook{}
AddAuditEventHook(boil.AfterUpsertHook, auditEventAfterUpsertHook)
if err = o.doAfterUpsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterUpsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterUpsertHook function to empty object, but got: %#v", o)
}
auditEventAfterUpsertHooks = []AuditEventHook{}
}
func testAuditEventsInsert(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &AuditEvent{}
if err = randomize.Struct(seed, o, auditEventDBTypes, true, auditEventColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize AuditEvent struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := AuditEvents().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
}
func testAuditEventsInsertWhitelist(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &AuditEvent{}
if err = randomize.Struct(seed, o, auditEventDBTypes, true); err != nil {
t.Errorf("Unable to randomize AuditEvent struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Whitelist(auditEventColumnsWithoutDefault...)); err != nil {
t.Error(err)
}
count, err := AuditEvents().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
}
func testAuditEventsReload(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &AuditEvent{}
if err = randomize.Struct(seed, o, auditEventDBTypes, true, auditEventColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize AuditEvent struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = o.Reload(ctx, tx); err != nil {
t.Error(err)
}
}
func testAuditEventsReloadAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &AuditEvent{}
if err = randomize.Struct(seed, o, auditEventDBTypes, true, auditEventColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize AuditEvent struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice := AuditEventSlice{o}
if err = slice.ReloadAll(ctx, tx); err != nil {
t.Error(err)
}
}
func testAuditEventsSelect(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &AuditEvent{}
if err = randomize.Struct(seed, o, auditEventDBTypes, true, auditEventColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize AuditEvent struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice, err := AuditEvents().All(ctx, tx)
if err != nil {
t.Error(err)
}
if len(slice) != 1 {
t.Error("want one record, got:", len(slice))
}
}
var (
auditEventDBTypes = map[string]string{`ID`: `INTEGER`, `Type`: `TEXT`, `Identifier`: `TEXT`, `Message`: `TEXT`, `CreatedAt`: `TIMESTAMP`}
_ = bytes.MinRead
)
func testAuditEventsUpdate(t *testing.T) {
t.Parallel()
if 0 == len(auditEventPrimaryKeyColumns) {
t.Skip("Skipping table with no primary key columns")
}
if len(auditEventAllColumns) == len(auditEventPrimaryKeyColumns) {
t.Skip("Skipping table with only primary key columns")
}
seed := randomize.NewSeed()
var err error
o := &AuditEvent{}
if err = randomize.Struct(seed, o, auditEventDBTypes, true, auditEventColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize AuditEvent struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := AuditEvents().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
if err = randomize.Struct(seed, o, auditEventDBTypes, true, auditEventPrimaryKeyColumns...); err != nil {
t.Errorf("Unable to randomize AuditEvent struct: %s", err)
}
if rowsAff, err := o.Update(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only affect one row but affected", rowsAff)
}
}
func testAuditEventsSliceUpdateAll(t *testing.T) {
t.Parallel()
if len(auditEventAllColumns) == len(auditEventPrimaryKeyColumns) {
t.Skip("Skipping table with only primary key columns")
}
seed := randomize.NewSeed()
var err error
o := &AuditEvent{}
if err = randomize.Struct(seed, o, auditEventDBTypes, true, auditEventColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize AuditEvent struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := AuditEvents().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
if err = randomize.Struct(seed, o, auditEventDBTypes, true, auditEventPrimaryKeyColumns...); err != nil {
t.Errorf("Unable to randomize AuditEvent struct: %s", err)
}
// Remove Primary keys and unique columns from what we plan to update
var fields []string
if strmangle.StringSliceMatch(auditEventAllColumns, auditEventPrimaryKeyColumns) {
fields = auditEventAllColumns
} else {
fields = strmangle.SetComplement(
auditEventAllColumns,
auditEventPrimaryKeyColumns,
)
}
value := reflect.Indirect(reflect.ValueOf(o))
typ := reflect.TypeOf(o).Elem()
n := typ.NumField()
updateMap := M{}
for _, col := range fields {
for i := 0; i < n; i++ {
f := typ.Field(i)
if f.Tag.Get("boil") == col {
updateMap[col] = value.Field(i).Interface()
}
}
}
slice := AuditEventSlice{o}
if rowsAff, err := slice.UpdateAll(ctx, tx, updateMap); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("wanted one record updated but got", rowsAff)
}
}

View File

@@ -0,0 +1,119 @@
// Code generated by SQLBoiler 3.5.0-gct (https://github.com/thrasher-corp/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package sqlite3
import (
"database/sql"
"flag"
"fmt"
"math/rand"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/spf13/viper"
"github.com/thrasher-corp/sqlboiler/boil"
)
var flagDebugMode = flag.Bool("test.sqldebug", false, "Turns on debug mode for SQL statements")
var flagConfigFile = flag.String("test.config", "", "Overrides the default config")
const outputDirDepth = 3
var (
dbMain tester
)
type tester interface {
setup() error
conn() (*sql.DB, error)
teardown() error
}
func TestMain(m *testing.M) {
if dbMain == nil {
fmt.Println("no dbMain tester interface was ready")
os.Exit(-1)
}
rand.Seed(time.Now().UnixNano())
flag.Parse()
var err error
// Load configuration
err = initViper()
if err != nil {
fmt.Println("unable to load config file")
os.Exit(-2)
}
// Set DebugMode so we can see generated sql statements
boil.DebugMode = *flagDebugMode
if err = dbMain.setup(); err != nil {
fmt.Println("Unable to execute setup:", err)
os.Exit(-4)
}
conn, err := dbMain.conn()
if err != nil {
fmt.Println("failed to get connection:", err)
}
var code int
boil.SetDB(conn)
code = m.Run()
if err = dbMain.teardown(); err != nil {
fmt.Println("Unable to execute teardown:", err)
os.Exit(-5)
}
os.Exit(code)
}
func initViper() error {
if flagConfigFile != nil && *flagConfigFile != "" {
viper.SetConfigFile(*flagConfigFile)
if err := viper.ReadInConfig(); err != nil {
return err
}
return nil
}
var err error
viper.SetConfigName("sqlboiler")
configHome := os.Getenv("XDG_CONFIG_HOME")
homePath := os.Getenv("HOME")
wd, err := os.Getwd()
if err != nil {
wd = strings.Repeat("../", outputDirDepth)
} else {
wd = wd + strings.Repeat("/..", outputDirDepth)
}
configPaths := []string{wd}
if len(configHome) > 0 {
configPaths = append(configPaths, filepath.Join(configHome, "sqlboiler"))
} else {
configPaths = append(configPaths, filepath.Join(homePath, ".config/sqlboiler"))
}
for _, p := range configPaths {
viper.AddConfigPath(p)
}
// Ignore errors here, fall back to defaults and validation to provide errs
_ = viper.ReadInConfig()
viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
viper.AutomaticEnv()
return nil
}

View File

@@ -0,0 +1,33 @@
// Code generated by SQLBoiler 3.5.0-gct (https://github.com/thrasher-corp/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package sqlite3
import (
"github.com/thrasher-corp/sqlboiler/drivers"
"github.com/thrasher-corp/sqlboiler/queries"
"github.com/thrasher-corp/sqlboiler/queries/qm"
)
var dialect = drivers.Dialect{
LQ: 0x22,
RQ: 0x22,
UseIndexPlaceholders: false,
UseLastInsertID: true,
UseSchema: false,
UseDefaultKeyword: false,
UseAutoColumns: false,
UseTopClause: false,
UseOutputClause: false,
UseCaseWhenExistsClause: false,
}
// NewQuery initializes a new Query using the passed in QueryMods
func NewQuery(mods ...qm.QueryMod) *queries.Query {
q := &queries.Query{}
queries.SetDialect(q, &dialect)
qm.Apply(q, mods...)
return q
}

View File

@@ -0,0 +1,52 @@
// Code generated by SQLBoiler 3.5.0-gct (https://github.com/thrasher-corp/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package sqlite3
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"math/rand"
"regexp"
"github.com/thrasher-corp/sqlboiler/boil"
)
var dbNameRand *rand.Rand
func MustTx(transactor boil.ContextTransactor, err error) boil.ContextTransactor {
if err != nil {
panic(fmt.Sprintf("Cannot create a transactor: %s", err))
}
return transactor
}
func newFKeyDestroyer(regex *regexp.Regexp, reader io.Reader) io.Reader {
return &fKeyDestroyer{
reader: reader,
rgx: regex,
}
}
type fKeyDestroyer struct {
reader io.Reader
buf *bytes.Buffer
rgx *regexp.Regexp
}
func (f *fKeyDestroyer) Read(b []byte) (int, error) {
if f.buf == nil {
all, err := ioutil.ReadAll(f.reader)
if err != nil {
return 0, err
}
all = bytes.Replace(all, []byte{'\r', '\n'}, []byte{'\n'}, -1)
all = f.rgx.ReplaceAll(all, []byte{})
f.buf = bytes.NewBuffer(all)
}
return f.buf.Read(b)
}

View File

@@ -0,0 +1,121 @@
// Code generated by SQLBoiler 3.5.0-gct (https://github.com/thrasher-corp/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package sqlite3
import "testing"
// This test suite runs each operation test in parallel.
// Example, if your database has 3 tables, the suite will run:
// table1, table2 and table3 Delete in parallel
// table1, table2 and table3 Insert in parallel, and so forth.
// It does NOT run each operation group in parallel.
// Separating the tests thusly grants avoidance of Postgres deadlocks.
func TestParent(t *testing.T) {
t.Run("AuditEvents", testAuditEvents)
}
func TestDelete(t *testing.T) {
t.Run("AuditEvents", testAuditEventsDelete)
}
func TestQueryDeleteAll(t *testing.T) {
t.Run("AuditEvents", testAuditEventsQueryDeleteAll)
}
func TestSliceDeleteAll(t *testing.T) {
t.Run("AuditEvents", testAuditEventsSliceDeleteAll)
}
func TestExists(t *testing.T) {
t.Run("AuditEvents", testAuditEventsExists)
}
func TestFind(t *testing.T) {
t.Run("AuditEvents", testAuditEventsFind)
}
func TestBind(t *testing.T) {
t.Run("AuditEvents", testAuditEventsBind)
}
func TestOne(t *testing.T) {
t.Run("AuditEvents", testAuditEventsOne)
}
func TestAll(t *testing.T) {
t.Run("AuditEvents", testAuditEventsAll)
}
func TestCount(t *testing.T) {
t.Run("AuditEvents", testAuditEventsCount)
}
func TestHooks(t *testing.T) {
t.Run("AuditEvents", testAuditEventsHooks)
}
func TestInsert(t *testing.T) {
t.Run("AuditEvents", testAuditEventsInsert)
t.Run("AuditEvents", testAuditEventsInsertWhitelist)
}
// TestToOne tests cannot be run in parallel
// or deadlocks can occur.
func TestToOne(t *testing.T) {}
// TestOneToOne tests cannot be run in parallel
// or deadlocks can occur.
func TestOneToOne(t *testing.T) {}
// TestToMany tests cannot be run in parallel
// or deadlocks can occur.
func TestToMany(t *testing.T) {}
// TestToOneSet tests cannot be run in parallel
// or deadlocks can occur.
func TestToOneSet(t *testing.T) {}
// TestToOneRemove tests cannot be run in parallel
// or deadlocks can occur.
func TestToOneRemove(t *testing.T) {}
// TestOneToOneSet tests cannot be run in parallel
// or deadlocks can occur.
func TestOneToOneSet(t *testing.T) {}
// TestOneToOneRemove tests cannot be run in parallel
// or deadlocks can occur.
func TestOneToOneRemove(t *testing.T) {}
// TestToManyAdd tests cannot be run in parallel
// or deadlocks can occur.
func TestToManyAdd(t *testing.T) {}
// TestToManySet tests cannot be run in parallel
// or deadlocks can occur.
func TestToManySet(t *testing.T) {}
// TestToManyRemove tests cannot be run in parallel
// or deadlocks can occur.
func TestToManyRemove(t *testing.T) {}
func TestReload(t *testing.T) {
t.Run("AuditEvents", testAuditEventsReload)
}
func TestReloadAll(t *testing.T) {
t.Run("AuditEvents", testAuditEventsReloadAll)
}
func TestSelect(t *testing.T) {
t.Run("AuditEvents", testAuditEventsSelect)
}
func TestUpdate(t *testing.T) {
t.Run("AuditEvents", testAuditEventsUpdate)
}
func TestSliceUpdateAll(t *testing.T) {
t.Run("AuditEvents", testAuditEventsSliceUpdateAll)
}

View File

@@ -0,0 +1,10 @@
// Code generated by SQLBoiler 3.5.0-gct (https://github.com/thrasher-corp/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package sqlite3
var TableNames = struct {
AuditEvent string
}{
AuditEvent: "audit_event",
}

View File

@@ -0,0 +1,52 @@
// Code generated by SQLBoiler 3.5.0-gct (https://github.com/thrasher-corp/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package sqlite3
import (
"strconv"
"github.com/pkg/errors"
"github.com/thrasher-corp/sqlboiler/boil"
"github.com/thrasher-corp/sqlboiler/strmangle"
)
// M type is for providing columns and column values to UpdateAll.
type M map[string]interface{}
// ErrSyncFail occurs during insert when the record could not be retrieved in
// order to populate default value information. This usually happens when LastInsertId
// fails or there was a primary key configuration that was not resolvable.
var ErrSyncFail = errors.New("sqlite3: failed to synchronize data after insert")
type insertCache struct {
query string
retQuery string
valueMapping []uint64
retMapping []uint64
}
type updateCache struct {
query string
valueMapping []uint64
}
func makeCacheKey(cols boil.Columns, nzDefaults []string) string {
buf := strmangle.GetBuffer()
buf.WriteString(strconv.Itoa(cols.Kind))
for _, w := range cols.Cols {
buf.WriteString(w)
}
if len(nzDefaults) != 0 {
buf.WriteByte('.')
}
for _, nz := range nzDefaults {
buf.WriteString(nz)
}
str := buf.String()
strmangle.PutBuffer(buf)
return str
}

View File

@@ -0,0 +1,63 @@
// Code generated by SQLBoiler 3.5.0-gct (https://github.com/thrasher-corp/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package sqlite3
import (
"database/sql"
"fmt"
"math/rand"
"os"
"path/filepath"
"regexp"
_ "github.com/mattn/go-sqlite3"
"github.com/thrasher-corp/goose"
)
var rgxSQLitekey = regexp.MustCompile(`(?mi)((,\n)?\s+foreign key.*?\n)+`)
type sqliteTester struct {
dbConn *sql.DB
dbName string
testDBName string
}
func init() {
dbMain = &sqliteTester{}
}
func (s *sqliteTester) setup() error {
s.testDBName = filepath.Join(os.TempDir(), fmt.Sprintf("boil-sqlite3-%d.sql", rand.Int()))
return nil
}
func (s *sqliteTester) teardown() error {
if s.dbConn != nil {
s.dbConn.Close()
}
return os.Remove(s.testDBName)
}
func (s *sqliteTester) conn() (*sql.DB, error) {
if s.dbConn != nil {
return s.dbConn, nil
}
var err error
s.dbConn, err = sql.Open("sqlite3", fmt.Sprintf("file:%s?_loc=UTC", s.testDBName))
if err != nil {
return nil, err
}
path := filepath.Join("..", "..", "migrations")
err = goose.Run("up", s.dbConn, "sqlite3", path, "")
if err != nil {
return nil, err
}
return s.dbConn, nil
}

View File

@@ -1,62 +1,93 @@
package audit
import (
"sync"
"context"
"errors"
"time"
"github.com/thrasher-corp/gocryptotrader/database"
"github.com/thrasher-corp/gocryptotrader/database/models"
modelPSQL "github.com/thrasher-corp/gocryptotrader/database/models/postgres"
modelSQLite "github.com/thrasher-corp/gocryptotrader/database/models/sqlite3"
"github.com/thrasher-corp/gocryptotrader/database/repository"
log "github.com/thrasher-corp/gocryptotrader/logger"
"github.com/thrasher-corp/sqlboiler/boil"
"github.com/thrasher-corp/sqlboiler/queries/qm"
)
// Repository that is required for each driver type to implement
type Repository interface {
AddEventTx(event []*models.AuditEvent)
}
// TableTimeFormat Go Time format conversion
const TableTimeFormat = "2006-01-02 15:04:05"
var (
// Audit repository initialise copy of Audit Repository
Audit Repository
)
type eventPool struct {
events []*models.AuditEvent
eventMu sync.Mutex
}
var ep eventPool
// Event allows you to call audit.Event() as long as the audit repository package without the need to include each driver
func Event(msgType, identifier, message string) {
if database.Conn.SQL == nil {
// Event inserts a new audit event to database
func Event(id, msgtype, message string) {
if database.DB.SQL == nil {
return
}
if Audit == nil {
ctx := context.Background()
ctx = boil.SkipTimestamps(ctx)
tx, err := database.DB.SQL.BeginTx(ctx, nil)
if err != nil {
log.Errorf(log.Global, "Event transaction begin failed: %v", err)
return
}
tempEvent := models.AuditEvent{
Type: msgType,
Identifier: identifier,
Message: message}
if repository.GetSQLDialect() == database.DBSQLite3 {
var tempEvent = modelSQLite.AuditEvent{
Type: msgtype,
Identifier: id,
Message: message,
}
err = tempEvent.Insert(ctx, tx, boil.Blacklist("created_at"))
} else {
var tempEvent = modelPSQL.AuditEvent{
Type: msgtype,
Identifier: id,
Message: message,
}
err = tempEvent.Insert(ctx, tx, boil.Blacklist("created_at"))
}
ep.poolEvents(&tempEvent)
}
func (e *eventPool) poolEvents(event *models.AuditEvent) {
e.eventMu.Lock()
defer e.eventMu.Unlock()
e.events = append(e.events, event)
database.Conn.Mu.RLock()
defer database.Conn.Mu.RUnlock()
if !database.Conn.Connected {
log.Warnln(log.DatabaseMgr, "connection to database interrupted pooling database writes")
if err != nil {
log.Errorf(log.Global, "Event insert failed: %v", err)
err = tx.Rollback()
if err != nil {
log.Errorf(log.Global, "Event Transaction rollback failed: %v", err)
}
return
}
Audit.AddEventTx(e.events)
e.events = nil
err = tx.Commit()
if err != nil {
log.Errorf(log.Global, "Event Transaction commit failed: %v", err)
err = tx.Rollback()
if err != nil {
log.Errorf(log.Global, "Event Transaction rollback failed: %v", err)
}
return
}
}
// GetEvent () returns list of order events matching query
func GetEvent(startTime, endTime time.Time, order string, limit int) (interface{}, error) {
if database.DB.SQL == nil {
return nil, errors.New("database is nil")
}
query := qm.Where("created_at BETWEEN ? AND ?", startTime, endTime)
orderByQueryString := "id"
if order == "desc" {
orderByQueryString += " desc"
}
orderByQuery := qm.OrderBy(orderByQueryString)
limitQuery := qm.Limit(limit)
ctx := context.Background()
if repository.GetSQLDialect() == database.DBSQLite3 {
return modelSQLite.AuditEvents(query, orderByQuery, limitQuery).All(ctx, database.DB.SQL)
}
return modelPSQL.AuditEvents(query, orderByQuery, limitQuery).All(ctx, database.DB.SQL)
}

View File

@@ -1,52 +0,0 @@
package audit
import (
"github.com/thrasher-corp/gocryptotrader/database"
"github.com/thrasher-corp/gocryptotrader/database/models"
"github.com/thrasher-corp/gocryptotrader/database/repository/audit"
log "github.com/thrasher-corp/gocryptotrader/logger"
)
type auditRepo struct{}
// Audit returns a new instance of auditRepo
func Audit() audit.Repository {
return &auditRepo{}
}
// AddEventTx writes multiple events to database
// writes are done using a transaction with a rollback on error
func (pg *auditRepo) AddEventTx(event []*models.AuditEvent) {
if pg == nil {
return
}
tx, err := database.Conn.SQL.Begin()
if err != nil {
log.Errorf(log.Global, "Failed to create transaction: %v\n", err)
return
}
query := `INSERT INTO audit_event (type, identifier, message) VALUES($1, $2, $3)`
for x := range event {
_, err = tx.Exec(query, &event[x].Type, &event[x].Identifier, &event[x].Message)
if err != nil {
err = tx.Rollback()
if err != nil {
log.Errorf(log.Global, "Tx Rollback has failed: %v", err)
}
return
}
}
err = tx.Commit()
if err != nil {
err = tx.Rollback()
if err != nil {
log.Errorf(log.Global, "Tx Rollback has failed: %v", err)
}
return
}
}

View File

@@ -1,53 +0,0 @@
package audit
import (
"github.com/thrasher-corp/gocryptotrader/database"
"github.com/thrasher-corp/gocryptotrader/database/models"
"github.com/thrasher-corp/gocryptotrader/database/repository/audit"
log "github.com/thrasher-corp/gocryptotrader/logger"
)
type auditRepo struct{}
// Audit returns a new instance of auditRepo
func Audit() audit.Repository {
return &auditRepo{}
}
// AddEventTx writes multiple event to database
// writes are done using a transaction with a rollback on error
func (pg *auditRepo) AddEventTx(event []*models.AuditEvent) {
if pg == nil {
return
}
tx, err := database.Conn.SQL.Begin()
if err != nil {
log.Errorf(log.Global, "Failed to create transaction: %v\n", err)
return
}
query := `INSERT INTO audit_event (type, identifier, message) VALUES($1, $2, $3)`
for x := range event {
_, err = tx.Exec(query, &event[x].Type, &event[x].Identifier, &event[x].Message)
if err != nil {
err = tx.Rollback()
if err != nil {
log.Errorf(log.Global, "Tx Rollback has failed: %v", err)
}
return
}
}
err = tx.Commit()
if err != nil {
err = tx.Rollback()
if err != nil {
log.Errorf(log.Global, "Tx Rollback has failed: %v", err)
}
return
}
}

View File

@@ -0,0 +1,16 @@
package repository
import (
"github.com/thrasher-corp/gocryptotrader/database"
)
// GetSQLDialect returns current SQL Dialect based on enabled driver
func GetSQLDialect() string {
switch database.DB.Config.Driver {
case "sqlite", "sqlite3":
return database.DBSQLite3
case "psql", "postgres", "postgresql":
return database.DBPostgreSQL
}
return "invalid driver"
}

View File

@@ -2,102 +2,87 @@ package tests
import (
"fmt"
"path"
"path/filepath"
"sync"
"testing"
"time"
"github.com/thrasher-corp/gocryptotrader/database"
"github.com/thrasher-corp/gocryptotrader/database/drivers"
mg "github.com/thrasher-corp/gocryptotrader/database/migration"
"github.com/thrasher-corp/gocryptotrader/database/repository"
"github.com/thrasher-corp/gocryptotrader/database/repository/audit"
auditPSQL "github.com/thrasher-corp/gocryptotrader/database/repository/audit/postgres"
auditSQlite "github.com/thrasher-corp/gocryptotrader/database/repository/audit/sqlite"
"github.com/thrasher-corp/goose"
)
func TestAudit(t *testing.T) {
testCases := []struct {
name string
config database.Config
audit audit.Repository
config *database.Config
runner func(t *testing.T)
closer func(t *testing.T, dbConn *database.Database) error
closer func(t *testing.T, dbConn *database.Db) error
output interface{}
}{
{
"SQLite",
database.Config{
Driver: "sqlite",
ConnectionDetails: drivers.ConnectionDetails{Database: path.Join(tempDir, "./testdb.db")},
"SQLite-Write",
&database.Config{
Driver: database.DBSQLite3,
ConnectionDetails: drivers.ConnectionDetails{Database: "./testdb"},
},
auditSQlite.Audit(),
writeAudit,
closeDatabase,
nil,
},
{
"Postgres",
"SQLite-Read",
&database.Config{
Driver: database.DBSQLite3,
ConnectionDetails: drivers.ConnectionDetails{Database: "./testdb"},
},
readHelper,
closeDatabase,
nil,
},
{
"Postgres-Write",
postgresTestDatabase,
auditPSQL.Audit(),
writeAudit,
nil,
nil,
},
{
"Postgres-Read",
postgresTestDatabase,
readHelper,
nil,
nil,
},
}
for _, tests := range testCases {
test := tests
t.Run(test.name, func(t *testing.T) {
mg.MigrationDir = filepath.Join("../migration", "migrations")
if !checkValidConfig(t, &test.config.ConnectionDetails) {
t.Skip("database not configured skipping test")
}
dbConn, err := connectToDatabase(t, &test.config)
dbConn, err := connectToDatabase(t, test.config)
if err != nil {
t.Fatal(err)
}
mLogger := mg.MLogger{}
migrations := mg.Migrator{
Log: mLogger,
}
migrations.Conn = dbConn
err = migrations.LoadMigrations()
path := filepath.Join("..", "migrations")
err = goose.Run("up", dbConn.SQL, repository.GetSQLDialect(), path, "")
if err != nil {
t.Fatal(err)
}
err = migrations.RunMigration()
if err != nil {
t.Fatal(err)
}
if test.audit != nil {
audit.Audit = test.audit
t.Fatalf("failed to run migrations %v", err)
}
if test.runner != nil {
test.runner(t)
}
switch v := test.output.(type) {
case error:
if v.Error() != test.output.(error).Error() {
t.Fatal(err)
}
return
default:
break
}
if test.closer != nil {
err = test.closer(t, dbConn)
if err != nil {
@@ -112,7 +97,7 @@ func writeAudit(t *testing.T) {
t.Helper()
var wg sync.WaitGroup
for x := 0; x < 200; x++ {
for x := 0; x < 20; x++ {
wg.Add(1)
go func(x int) {
@@ -124,3 +109,12 @@ func writeAudit(t *testing.T) {
wg.Wait()
}
func readHelper(t *testing.T) {
t.Helper()
_, err := audit.GetEvent(time.Now().Add(-time.Hour*60), time.Now(), "asc", 1)
if err != nil {
t.Error(err)
}
}

View File

@@ -4,20 +4,54 @@ import (
"fmt"
"io/ioutil"
"os"
"path"
"reflect"
"testing"
"github.com/thrasher-corp/gocryptotrader/database"
"github.com/thrasher-corp/gocryptotrader/database/drivers"
dbpsql "github.com/thrasher-corp/gocryptotrader/database/drivers/postgres"
dbsqlite "github.com/thrasher-corp/gocryptotrader/database/drivers/sqlite"
psqlConn "github.com/thrasher-corp/gocryptotrader/database/drivers/postgres"
sqliteConn "github.com/thrasher-corp/gocryptotrader/database/drivers/sqlite3"
)
var (
tempDir string
tempDir string
postgresTestDatabase *database.Config
)
postgresTestDatabase = database.Config{
func getConnectionDetails() *database.Config {
_, exists := os.LookupEnv("TRAVIS")
if exists {
return &database.Config{
Enabled: true,
Driver: "postgres",
ConnectionDetails: drivers.ConnectionDetails{
Host: "localhost",
Port: 5432,
Username: "postgres",
Password: "",
Database: "gct_dev_ci",
SSLMode: "",
},
}
}
_, exists = os.LookupEnv("APPVEYOR")
if exists {
return &database.Config{
Enabled: true,
Driver: "postgres",
ConnectionDetails: drivers.ConnectionDetails{
Host: "localhost",
Port: 5432,
Username: "postgres",
Password: "Password12!",
Database: "gct_dev_ci",
SSLMode: "",
},
}
}
return &database.Config{
Enabled: true,
Driver: "postgres",
ConnectionDetails: drivers.ConnectionDetails{
@@ -29,10 +63,12 @@ var (
//SSLMode: "",
},
}
)
}
func TestMain(m *testing.M) {
var err error
postgresTestDatabase = getConnectionDetails()
tempDir, err = ioutil.TempDir("", "gct-temp")
if err != nil {
fmt.Printf("failed to create temp file: %v", err)
@@ -52,23 +88,23 @@ func TestMain(m *testing.M) {
func TestDatabaseConnect(t *testing.T) {
testCases := []struct {
name string
config database.Config
closer func(t *testing.T, dbConn *database.Database) error
config *database.Config
closer func(t *testing.T, dbConn *database.Db) error
output interface{}
}{
{
"SQLite",
database.Config{
Driver: "sqlite",
ConnectionDetails: drivers.ConnectionDetails{Database: path.Join(tempDir, "./testdb.db")},
&database.Config{
Driver: database.DBSQLite3,
ConnectionDetails: drivers.ConnectionDetails{Database: "./testdb.db"},
},
closeDatabase,
nil,
},
{
"SQliteNoDatabase",
database.Config{
Driver: "sqlite",
&database.Config{
Driver: database.DBSQLite3,
ConnectionDetails: drivers.ConnectionDetails{
Host: "localhost",
},
@@ -90,7 +126,7 @@ func TestDatabaseConnect(t *testing.T) {
t.Skip("database not configured skipping test")
}
dbConn, err := connectToDatabase(t, &test.config)
dbConn, err := connectToDatabase(t, test.config)
if err != nil {
switch v := test.output.(type) {
case error:
@@ -113,26 +149,28 @@ func TestDatabaseConnect(t *testing.T) {
}
}
func connectToDatabase(t *testing.T, conn *database.Config) (dbConn *database.Database, err error) {
func connectToDatabase(t *testing.T, conn *database.Config) (dbConn *database.Db, err error) {
t.Helper()
database.Conn.Config = conn
database.DB.Config = conn
if conn.Driver == "postgres" {
dbConn, err = dbpsql.Connect()
if conn.Driver == database.DBPostgreSQL {
dbConn, err = psqlConn.Connect()
if err != nil {
return
return nil, err
}
} else if conn.Driver == "sqlite" {
dbConn, err = dbsqlite.Connect()
} else if conn.Driver == database.DBSQLite3 || conn.Driver == database.DBSQLite {
database.DB.DataPath = tempDir
dbConn, err = sqliteConn.Connect()
if err != nil {
return
return nil, err
}
}
database.Conn.Connected = true
database.DB.Connected = true
return
}
func closeDatabase(t *testing.T, conn *database.Database) (err error) {
func closeDatabase(t *testing.T, conn *database.Db) (err error) {
t.Helper()
if conn != nil {

View File

@@ -7,17 +7,14 @@ import (
"time"
"github.com/thrasher-corp/gocryptotrader/database"
db "github.com/thrasher-corp/gocryptotrader/database/drivers/postgres"
dbsqlite3 "github.com/thrasher-corp/gocryptotrader/database/drivers/sqlite"
mg "github.com/thrasher-corp/gocryptotrader/database/migration"
"github.com/thrasher-corp/gocryptotrader/database/repository/audit"
auditPSQL "github.com/thrasher-corp/gocryptotrader/database/repository/audit/postgres"
auditSQLite "github.com/thrasher-corp/gocryptotrader/database/repository/audit/sqlite"
dbpsql "github.com/thrasher-corp/gocryptotrader/database/drivers/postgres"
dbsqlite3 "github.com/thrasher-corp/gocryptotrader/database/drivers/sqlite3"
log "github.com/thrasher-corp/gocryptotrader/logger"
"github.com/thrasher-corp/sqlboiler/boil"
)
var (
dbConn *database.Database
dbConn *database.Db
)
type databaseManager struct {
@@ -39,53 +36,24 @@ func (a *databaseManager) Start() (err error) {
a.shutdown = make(chan struct{})
if Bot.Config.Database.Enabled {
if Bot.Config.Database.Driver == "postgres" {
dbConn, err = db.Connect()
if err != nil {
return fmt.Errorf("database failed to connect: %v Some features that utilise a database will be unavailable", err)
}
dbConn.SQL.SetMaxOpenConns(2)
dbConn.SQL.SetMaxIdleConns(1)
dbConn.SQL.SetConnMaxLifetime(time.Hour)
audit.Audit = auditPSQL.Audit()
} else if Bot.Config.Database.Driver == "sqlite" {
if Bot.Config.Database.Driver == database.DBPostgreSQL {
log.Debugf(log.DatabaseMgr, "Attempting to establish database connection to host %s/%s utilising %s driver\n",
Bot.Config.Database.Host, Bot.Config.Database.Database, Bot.Config.Database.Driver)
dbConn, err = dbpsql.Connect()
} else if Bot.Config.Database.Driver == database.DBSQLite || Bot.Config.Database.Driver == database.DBSQLite3 {
log.Debugf(log.DatabaseMgr, "Attempting to establish database connection to %s utilising %s driver\n",
Bot.Config.Database.Database, Bot.Config.Database.Driver)
dbConn, err = dbsqlite3.Connect()
if err != nil {
return fmt.Errorf("database failed to connect: %v Some features that utilise a database will be unavailable", err)
}
audit.Audit = auditSQLite.Audit()
}
if err != nil {
return fmt.Errorf("database failed to connect: %v Some features that utilise a database will be unavailable", err)
}
dbConn.Connected = true
if Bot.Config.Database.Driver == "postgres" {
log.Debugf(log.DatabaseMgr,
"Database connection established to host: %s. Using postgres driver\n",
dbConn.Config.Host)
} else {
log.Debugf(log.DatabaseMgr,
"Database connection established to file database: %s. Using sqlite driver\n",
dbConn.Config.Database)
}
mLogger := mg.MLogger{}
migrations := mg.Migrator{
Log: mLogger,
}
migrations.Conn = dbConn
err := migrations.LoadMigrations()
if err != nil {
return err
}
err = migrations.RunMigration()
if err != nil {
return err
DBLogger := database.Logger{}
if Bot.Config.Database.Verbose {
boil.DebugMode = true
boil.DebugWriter = DBLogger
}
go a.run()
@@ -101,10 +69,12 @@ func (a *databaseManager) Stop() error {
}
log.Debugln(log.DatabaseMgr, "Database manager shutting down...")
err := dbConn.SQL.Close()
if err != nil {
log.Errorf(log.DatabaseMgr, "Failed to close database: %v", err)
}
close(a.shutdown)
return nil
}
@@ -114,6 +84,7 @@ func (a *databaseManager) run() {
Bot.ServicesWG.Add(1)
t := time.NewTicker(time.Second * 2)
a.running.Store(true)
defer func() {

View File

@@ -234,6 +234,7 @@ func PrintSettings(s *Settings) {
log.Debugf(log.Global, "\t Enable orderbook syncing: %v", s.EnableOrderbookSyncing)
log.Debugf(log.Global, "\t Enable websocket routine: %v\n", s.EnableWebsocketRoutine)
log.Debugf(log.Global, "\t Enable NTP client: %v", s.EnableNTPClient)
log.Debugf(log.Global, "\t Enable Database manager: %v", s.EnableDatabaseManager)
log.Debugf(log.Global, "\t Enable dispatcher: %v", s.EnableDispatcher)
log.Debugf(log.Global, "\t Dispatch package max worker amount: %d", s.DispatchMaxWorkerAmount)
log.Debugf(log.Global, "- FOREX SETTINGS:")

View File

@@ -15,6 +15,9 @@ import (
"github.com/thrasher-corp/gocryptotrader/common"
"github.com/thrasher-corp/gocryptotrader/common/crypto"
"github.com/thrasher-corp/gocryptotrader/currency"
"github.com/thrasher-corp/gocryptotrader/database/models/postgres"
"github.com/thrasher-corp/gocryptotrader/database/models/sqlite3"
"github.com/thrasher-corp/gocryptotrader/database/repository/audit"
exchange "github.com/thrasher-corp/gocryptotrader/exchanges"
"github.com/thrasher-corp/gocryptotrader/exchanges/asset"
"github.com/thrasher-corp/gocryptotrader/exchanges/orderbook"
@@ -1158,3 +1161,51 @@ func (s *RPCServer) GetExchangeTickerStream(r *gctrpc.GetExchangeTickerStreamReq
}
}
}
// GetAuditEvent returns matching audit events from database
func (s *RPCServer) GetAuditEvent(ctx context.Context, r *gctrpc.GetAuditEventRequest) (*gctrpc.GetAuditEventResponse, error) {
UTCStartTime, err := time.Parse(audit.TableTimeFormat, r.StartDate)
if err != nil {
return nil, err
}
UTCSEndTime, err := time.Parse(audit.TableTimeFormat, r.EndDate)
if err != nil {
return nil, err
}
loc := time.FixedZone("", int(r.Offset))
events, err := audit.GetEvent(UTCStartTime, UTCSEndTime, r.OrderBy, int(r.Limit))
if err != nil {
return nil, err
}
resp := gctrpc.GetAuditEventResponse{}
switch v := events.(type) {
case postgres.AuditEventSlice:
for x := range v {
tempEvent := &gctrpc.AuditEvent{
Type: v[x].Type,
Identifier: v[x].Identifier,
Message: v[x].Message,
Timestamp: v[x].CreatedAt.In(loc).Format(audit.TableTimeFormat),
}
resp.Events = append(resp.Events, tempEvent)
}
case sqlite3.AuditEventSlice:
for x := range v {
tempEvent := &gctrpc.AuditEvent{
Type: v[x].Type,
Identifier: v[x].Identifier,
Message: v[x].Message,
Timestamp: v[x].CreatedAt,
}
resp.Events = append(resp.Events, tempEvent)
}
}
return &resp, nil
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -509,6 +509,25 @@ message GetExchangeTickerStreamRequest {
string exchange = 1;
}
message GetAuditEventRequest {
string start_date = 1;
string end_date = 2;
string order_by = 3;
int32 limit = 4;
int32 offset = 5;
}
message GetAuditEventResponse {
repeated audit_event events = 1;
}
message audit_event {
string type = 1 ;
string identifier = 2;
string message = 3;
string timestamp = 4;
}
service GoCryptoTrader {
rpc GetInfo (GetInfoRequest) returns (GetInfoResponse) {
option (google.api.http) = {
@@ -815,4 +834,10 @@ service GoCryptoTrader {
get: "/v1/getexchangetickerstream"
};
}
rpc GetAuditEvent(GetAuditEventRequest) returns (GetAuditEventResponse) {
option (google.api.http) = {
get: "/v1/getauditevent"
};
}
}

View File

@@ -295,6 +295,56 @@
]
}
},
"/v1/getauditevent": {
"get": {
"operationId": "GetAuditEvent",
"responses": {
"200": {
"description": "A successful response.",
"schema": {
"$ref": "#/definitions/gctrpcGetAuditEventResponse"
}
}
},
"parameters": [
{
"name": "start_date",
"in": "query",
"required": false,
"type": "string"
},
{
"name": "end_date",
"in": "query",
"required": false,
"type": "string"
},
{
"name": "order_by",
"in": "query",
"required": false,
"type": "string"
},
{
"name": "limit",
"in": "query",
"required": false,
"type": "integer",
"format": "int32"
},
{
"name": "offset",
"in": "query",
"required": false,
"type": "integer",
"format": "int32"
}
],
"tags": [
"GoCryptoTrader"
]
}
},
"/v1/getcommunicationrelayers": {
"get": {
"operationId": "GetCommunicationRelayers",
@@ -1442,6 +1492,17 @@
}
}
},
"gctrpcGetAuditEventResponse": {
"type": "object",
"properties": {
"events": {
"type": "array",
"items": {
"$ref": "#/definitions/gctrpcaudit_event"
}
}
}
},
"gctrpcGetCommunicationRelayersResponse": {
"type": "object",
"properties": {
@@ -2320,6 +2381,23 @@
}
}
},
"gctrpcaudit_event": {
"type": "object",
"properties": {
"type": {
"type": "string"
},
"identifier": {
"type": "string"
},
"message": {
"type": "string"
},
"timestamp": {
"type": "string"
}
}
},
"protobufAny": {
"type": "object",
"properties": {

17
go.mod
View File

@@ -3,27 +3,26 @@ module github.com/thrasher-corp/gocryptotrader
go 1.12
require (
github.com/cockroachdb/apd v1.1.0 // indirect
github.com/gofrs/uuid v3.2.0+incompatible
github.com/gogo/protobuf v1.2.1 // indirect
github.com/golang/protobuf v1.3.2
github.com/google/go-querystring v1.0.0
github.com/gorilla/mux v1.7.3
github.com/gorilla/websocket v1.4.0
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0
github.com/grpc-ecosystem/grpc-gateway v1.11.3
github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 // indirect
github.com/jackc/pgx v3.5.0+incompatible
github.com/jmoiron/sqlx v1.2.0
github.com/kat-co/vala v0.0.0-20170210184112-42e1d8b61f12
github.com/lib/pq v1.2.0
github.com/mattn/go-sqlite3 v1.11.0
github.com/pkg/errors v0.8.1 // indirect
github.com/pkg/errors v0.8.1
github.com/pquerna/otp v1.2.0
github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b // indirect
github.com/shopspring/decimal v0.0.0-20190905144223-a36b5d85f337 // indirect
github.com/spf13/viper v1.4.0
github.com/thrasher-corp/goose v2.7.0-rc4.0.20191002032028-0f2c2a27abdb+incompatible
github.com/thrasher-corp/sqlboiler v1.0.1-0.20191001234224-71e17f37a85e
github.com/toorop/go-pusher v0.0.0-20180521062818-4521e2eb39fb
github.com/urfave/cli v1.20.0
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5
golang.org/x/net v0.0.0-20190606173856-1492cefac77f
golang.org/x/net v0.0.0-20190606173856-1492cefac77f // indirect
golang.org/x/sys v0.0.0-20191003212358-c178f38b412c // indirect
google.golang.org/genproto v0.0.0-20191002211648-c459b9ce5143
google.golang.org/grpc v1.21.1
gopkg.in/yaml.v2 v2.2.4 // indirect

153
go.sum
View File

@@ -1,29 +1,63 @@
cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/DATA-DOG/go-sqlmock v1.3.3 h1:CWUqKXe0s8A2z6qCgkP4Kru7wC11YoAnoupUKFDnH08=
github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/apmckinlay/gsuneido v0.0.0-20180907175622-1f10244968e3/go.mod h1:hJnaqxrCRgMCTWtpNz9XUFkBCREiQdlcyK6YNmOfroM=
github.com/apmckinlay/gsuneido v0.0.0-20190404155041-0b6cd442a18f/go.mod h1:JU2DOj5Fc6rol0yaT79Csr47QR0vONGwJtBNGRD7jmc=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8WK8raXaxBx6fRVTlJILwEwQGL1I/ByEI=
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/denisenkom/go-mssqldb v0.0.0-20190924004331-208c0a498538/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/ericlagergren/decimal v0.0.0-20180907214518-0bb163153a5d/go.mod h1:1yj25TwtUlJ+pfOu9apAVaM1RWfZGg+aFpd4hPQZekQ=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-sql-driver/mysql v1.4.0 h1:7LxgVwFb2hIQtMm87NdgAVfXjnt4OePseqT1tKx+opk=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE=
github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
@@ -33,18 +67,22 @@ github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmonfrMlCDdsejg4CZE7c=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/grpc-gateway v1.9.2 h1:S+ef0492XaIknb8LMjcwgW2i3cNTzDYMmDrOThOJNWc=
github.com/grpc-ecosystem/grpc-gateway v1.9.2/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.11.3 h1:h8+NsYENhxNTuq+dobk3+ODoJtwY4Fu0WQXsxJfL8aM=
github.com/grpc-ecosystem/grpc-gateway v1.11.3/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 h1:vr3AYkKovP8uR8AvSGGUK1IDqRa5lAAvEkZG1LKaCRc=
github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ=
github.com/jackc/pgx v3.5.0+incompatible h1:BRJ4G3UPtvml5R1ey0biqqGuYUGayMYekm3woO75orY=
github.com/jackc/pgx v3.5.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I=
github.com/jmoiron/sqlx v1.2.0 h1:41Ip0zITnmWNR/vHV+S4m+VoUivnWY5E4OJfLZjCJMA=
github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kat-co/vala v0.0.0-20170210184112-42e1d8b61f12 h1:DQVOxR9qdYEybJUr/c7ku34r3PfajaMYXZwgDM7KuSk=
github.com/kat-co/vala v0.0.0-20170210184112-42e1d8b61f12/go.mod h1:u9MdXq/QageOOSGp7qG4XAQsYUMP+V5zEel/Vrl6OOc=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
@@ -52,28 +90,92 @@ github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mattn/go-sqlite3 v1.11.0 h1:LDdKkqtYlom37fkvqs8rMPFKAMe8+SgjbwZ6ex1/A/Q=
github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pelletier/go-toml v1.4.0 h1:u3Z1r+oOXJIkxqw34zVhyPgjBsm6X2wn21NWs/HfSeg=
github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pquerna/otp v1.2.0 h1:/A3+Jn+cagqayeR3iHs/L62m5ue7710D35zl1zJ1kok=
github.com/pquerna/otp v1.2.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b h1:gQZ0qzfKHQIybLANtM3mBXNUtOfsCFXeTsnBqCsx1KM=
github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/shopspring/decimal v0.0.0-20190905144223-a36b5d85f337 h1:Da9XEUfFxgyDOqUfwgoTDcWzmnlOnCGi6i4iPS+8Fbw=
github.com/shopspring/decimal v0.0.0-20190905144223-a36b5d85f337/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s=
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/spf13/viper v1.4.0 h1:yXHLWeravcrgGyFSyCgdYpXQ9dR9c/WED3pg1RhxqEU=
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/thrasher-corp/goose v2.7.0-rc4.0.20191002032028-0f2c2a27abdb+incompatible h1:SPqQlzFu3g4P9wK2iwJaWVLJWcQ5rYc43rvXBJ8RSCY=
github.com/thrasher-corp/goose v2.7.0-rc4.0.20191002032028-0f2c2a27abdb+incompatible/go.mod h1:2Bb/y0SpnUWOlPU5kDz+ctvb3w/mzuAVqxy7JPfBzgw=
github.com/thrasher-corp/sqlboiler v1.0.1-0.20191001234224-71e17f37a85e h1:4kYBo2YhqqFY7aZPPEhrtPTMoAq4iCsoDITd3jseRbY=
github.com/thrasher-corp/sqlboiler v1.0.1-0.20191001234224-71e17f37a85e/go.mod h1:JfJE+3gijF30ZJbUCzxGkU0+ymQxBfBOVp4XDObmJBE=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/toorop/go-pusher v0.0.0-20180521062818-4521e2eb39fb h1:9kcmLvQdiIecpgVEL3/+J5QIP/ElRBJDljOay0SvqnA=
github.com/toorop/go-pusher v0.0.0-20180521062818-4521e2eb39fb/go.mod h1:VTLqNCX1tXrur6pdIRCl8Q90FR7nw/mEBdyMkWMcsb0=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/urfave/cli v1.20.0 h1:fDqGv3UG/4jbVl/QkFwEdddtEDjh/5Ov6X+0B/3bPaw=
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/volatiletech/inflect v0.0.0-20170731032912-e7201282ae8d h1:gI4/tqP6lCY5k6Sg+4k9qSoBXmPwG+xXgMpK7jivD4M=
github.com/volatiletech/inflect v0.0.0-20170731032912-e7201282ae8d/go.mod h1:jspfvgf53t5NLUT4o9L1IX0kIBNKamGq1tWc/MgWK9Q=
github.com/volatiletech/null v8.0.0+incompatible h1:7wP8m5d/gZ6kW/9GnrLtMCRre2dlEnaQ9Km5OXlK4zg=
github.com/volatiletech/null v8.0.0+incompatible/go.mod h1:0wD98JzdqB+rLyZ70fN05VDbXbafIb0KU0MdVhCzmOQ=
github.com/volatiletech/sqlboiler v3.5.0+incompatible h1:n160O7UQLpZVRnJY6VH5eRNkt7sQdQBZGCCZ3CUy1+g=
github.com/volatiletech/sqlboiler v3.5.0+incompatible/go.mod h1:jLfDkkHWPbS2cWRLkyC20vQWaIQsASEY7gM7zSo11Yw=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5 h1:58fnuSXlxZmFdJyvtTFVmVhcMLU6v5fEb/ok4wyqtNU=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -82,24 +184,38 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190606173856-1492cefac77f h1:IWHgpgFqnL5AhBUBZSgBdjl2vkQUEzcY+JNKWfcgAU0=
golang.org/x/net v0.0.0-20190606173856-1492cefac77f/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190927073244-c990c680b611 h1:q9u40nxWT5zRClI/uU9dHCiYGottAg6Nzz4YUQyHxdA=
golang.org/x/sys v0.0.0-20190927073244-c990c680b611/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191003212358-c178f38b412c h1:6Zx7DRlKXf79yfxuQ/7GqV3w2y7aDsk6bGg0MzF5RVU=
golang.org/x/sys v0.0.0-20191003212358-c178f38b412c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
@@ -107,19 +223,22 @@ google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9Ywl
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190605220351-eb0b1bdb6ae6 h1:XRqWpmQ5ACYxWuYX495S0sHawhPGOVrh62WzgXsQnWs=
google.golang.org/genproto v0.0.0-20190605220351-eb0b1bdb6ae6/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
google.golang.org/genproto v0.0.0-20191002211648-c459b9ce5143 h1:tikhlQEJeezbnu0Zcblj7g5vm/L7xt6g1vnfq8mRCS4=
google.golang.org/genproto v0.0.0-20191002211648-c459b9ce5143/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.21.1 h1:j6XxA85m/6txkUCHvzlV5f+HBNl/1r5cZ2A/3IEFOO8=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7 h1:+t9dhfO+GNOIGJof6kPOAenx7YgrZMTdRPV+EsnPabk=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View File

@@ -9,7 +9,6 @@ import (
"github.com/thrasher-corp/gocryptotrader/common"
"github.com/thrasher-corp/gocryptotrader/core"
mg "github.com/thrasher-corp/gocryptotrader/database/migration"
"github.com/thrasher-corp/gocryptotrader/dispatch"
"github.com/thrasher-corp/gocryptotrader/engine"
"github.com/thrasher-corp/gocryptotrader/exchanges/request"
@@ -25,7 +24,6 @@ func main() {
// Core settings
flag.StringVar(&settings.ConfigFile, "config", "", "config file to load")
flag.StringVar(&settings.DataDir, "datadir", common.GetDefaultDataDir(runtime.GOOS), "default data directory for GoCryptoTrader files")
flag.StringVar(&settings.MigrationDir, "migrationdir", mg.MigrationDir, "override migration folder")
flag.IntVar(&settings.GoMaxProcs, "gomaxprocs", runtime.GOMAXPROCS(-1), "sets the runtime GOMAXPROCS value")
flag.BoolVar(&settings.EnableDryRun, "dryrun", false, "dry runs bot, doesn't save config file")
flag.BoolVar(&settings.EnableAllExchanges, "enableallexchanges", false, "enables all exchanges")

34
sqlboiler_example.json Normal file
View File

@@ -0,0 +1,34 @@
{
"psql": {
"dbname": "",
"host": "",
"port": 5432,
"user": "",
"pass": "",
"schema": "public",
"sslmode": "disable",
"blacklist": [
"goose_db_version"
]
},
"mysql": {
"dbname": "",
"host": "",
"port": 3306,
"user": "",
"pass": "",
"sslmode": "false"
},
"mssql": {
"dbname": "",
"host": "",
"port": 1433,
"user": "",
"pass": "",
"sslmode": "disable",
"schema": ""
},
"sqlite": {
"dbname": "/.gocryptotrader/database/gocryptotrader.db"
}
}