Feature: Data history manager engine subsystem (#693)

* Adds lovely initial concept for historical data doer

* Adds ability to save tasks. Adds config. Adds startStop to engine

* Has a database microservice without use of globals! Further infrastructure design. Adds readme

* Commentary to help design

* Adds migrations for database

* readme and adds database models

* Some modelling that doesn't work end of day

* Completes datahistoryjob sql.Begins datahistoryjobresult

* Adds datahistoryjob functions to retreive job results. Adapts subsystem

* Adds process for upserting jobs and job results to the database

* Broken end of day weird sqlboiler crap

* Fixes issue with SQL generation.

* RPC generation and addition of basic upsert command

* Renames types

* Adds rpc functions

* quick commit before context swithc. Exchanges aren't being populated

* Begin the tests!

* complete sql tests. stop failed jobs. CLI command creation

* Defines rpc commands

* Fleshes out RPC implementation

* Expands testing

* Expands testing, removes double remove

* Adds coverage of data history subsystem, expands errors and nil checks

* Minor logic improvement

* streamlines datahistory test setup

* End of day minor linting

* Lint, convert simplify, rpc expansion, type expansion, readme expansion

* Documentation update

* Renames for consistency

* Completes RPC server commands

* Fixes tests

* Speeds up testing by reducing unnecessary actions. Adds maxjobspercycle config

* Comments for everything

* Adds missing result string. checks interval supported. default start end cli

* Fixes ID problem. Improves binance trade fetch. job ranges are processed

* adds dbservice coverage. adds rpcserver coverage

* docs regen, uses dbcon interface, reverts binance, fixes races, toggle manager

* Speed up tests, remove bad global usage, fix uuid check

* Adds verbose. Updates docs. Fixes postgres

* Minor changes to logging and start stop

* Fixes postgres db tests, fixes postgres column typo

* Fixes old string typo,removes constraint,error parsing for nonreaders

* prevents dhm running when table doesn't exist. Adds prereq documentation

* Adds parallel, rmlines, err fix, comment fix, minor param fixes

* doc regen, common time range check and test updating

* Fixes job validation issues. Updates candle range checker.

* Ensures test cannot fail due to time.Now() shenanigans

* Fixes oopsie, adds documentation and a warn

* Fixes another time test, adjusts copy

* Drastically speeds up data history manager tests via function overrides

* Fixes summary bug and better logs

* Fixes local time test, fixes websocket tests

* removes defaults and comment,updates error messages,sets cli command args

* Fixes FTX trade processing

* Fixes issue where jobs got stuck if data wasn't returned but retrieval was successful

* Improves test speed. Simplifies trade verification SQL. Adds command help

* Fixes the oopsies

* Fixes use of query within transaction. Fixes trade err

* oopsie, not needed

* Adds missing data status. Properly ends job even when data is missing

* errors are more verbose and so have more words to describe them

* Doc regen for new status

* tiny test tinkering

* str := string("Removes .String()").String()

* Merge fixups

* Fixes a data race discovered during github actions

* Allows websocket test to pass consistently

* Fixes merge issue preventing datahistorymanager from starting via config

* Niterinos cmd defaults and explanations

* fixes default oopsie

* Fixes lack of nil protection

* Additional oopsie

* More detailed error for validating job exchange
This commit is contained in:
Scott
2021-07-01 16:21:48 +10:00
committed by GitHub
parent c109cfb6b4
commit 197ef2df21
133 changed files with 17770 additions and 1367 deletions

View File

@@ -2,46 +2,52 @@ package database
import (
"database/sql"
"fmt"
"time"
"github.com/thrasher-corp/sqlboiler/boil"
)
// SetConfig safely sets the global database instance's config with some
// basic locks and checks
func (i *Instance) SetConfig(cfg *Config) error {
if i == nil {
return errNilInstance
return ErrNilInstance
}
if cfg == nil {
return errNilConfig
}
i.m.Lock()
i.config = cfg
if i.config.Verbose {
boil.DebugMode = true
boil.DebugWriter = Logger{}
} else {
boil.DebugMode = false
}
i.m.Unlock()
return nil
}
// SetSQLiteConnection safely sets the global database instance's connection
// to use SQLite
func (i *Instance) SetSQLiteConnection(con *sql.DB) {
func (i *Instance) SetSQLiteConnection(con *sql.DB) error {
if i == nil {
return ErrNilInstance
}
if con == nil {
return errNilSQL
}
i.m.Lock()
defer i.m.Unlock()
i.SQL = con
i.SQL.SetMaxOpenConns(1)
return nil
}
// SetPostgresConnection safely sets the global database instance's connection
// to use Postgres
func (i *Instance) SetPostgresConnection(con *sql.DB) error {
if i == nil {
return ErrNilInstance
}
if con == nil {
return errNilSQL
}
if err := con.Ping(); err != nil {
return err
return fmt.Errorf("%w %s", errFailedPing, err)
}
i.m.Lock()
defer i.m.Unlock()
@@ -55,6 +61,9 @@ func (i *Instance) SetPostgresConnection(con *sql.DB) error {
// SetConnected safely sets the global database instance's connected
// status
func (i *Instance) SetConnected(v bool) {
if i == nil {
return
}
i.m.Lock()
i.connected = v
i.m.Unlock()
@@ -62,13 +71,23 @@ func (i *Instance) SetConnected(v bool) {
// CloseConnection safely disconnects the global database instance
func (i *Instance) CloseConnection() error {
if i == nil {
return ErrNilInstance
}
if i.SQL == nil {
return errNilSQL
}
i.m.Lock()
defer i.m.Unlock()
return i.SQL.Close()
}
// IsConnected safely checks the SQL connection status
func (i *Instance) IsConnected() bool {
if i == nil {
return false
}
i.m.RLock()
defer i.m.RUnlock()
return i.connected
@@ -76,6 +95,9 @@ func (i *Instance) IsConnected() bool {
// GetConfig safely returns a copy of the config
func (i *Instance) GetConfig() *Config {
if i == nil {
return nil
}
i.m.RLock()
defer i.m.RUnlock()
cpy := i.config
@@ -85,7 +107,10 @@ func (i *Instance) GetConfig() *Config {
// Ping pings the database
func (i *Instance) Ping() error {
if i == nil {
return errNilInstance
return ErrNilInstance
}
if !i.IsConnected() {
return ErrDatabaseNotConnected
}
i.m.RLock()
defer i.m.RUnlock()
@@ -94,3 +119,17 @@ func (i *Instance) Ping() error {
}
return i.SQL.Ping()
}
// GetSQL returns the sql connection
func (i *Instance) GetSQL() (*sql.DB, error) {
if i == nil {
return nil, ErrNilInstance
}
if i.SQL == nil {
return nil, errNilSQL
}
i.m.Lock()
defer i.m.Unlock()
resp := i.SQL
return resp, nil
}

213
database/database_test.go Normal file
View File

@@ -0,0 +1,213 @@
package database
import (
"database/sql"
"errors"
"os"
"path/filepath"
"testing"
// import sqlite3 driver
_ "github.com/mattn/go-sqlite3"
)
func TestSetConfig(t *testing.T) {
t.Parallel()
inst := &Instance{}
err := inst.SetConfig(&Config{Verbose: true})
if !errors.Is(err, nil) {
t.Errorf("received %v, expected %v", err, nil)
}
err = inst.SetConfig(nil)
if !errors.Is(err, errNilConfig) {
t.Errorf("received %v, expected %v", err, errNilConfig)
}
inst = nil
err = inst.SetConfig(&Config{})
if !errors.Is(err, ErrNilInstance) {
t.Errorf("received %v, expected %v", err, ErrNilInstance)
}
}
func TestSetSQLiteConnection(t *testing.T) {
t.Parallel()
inst := &Instance{}
err := inst.SetSQLiteConnection(nil)
if !errors.Is(err, errNilSQL) {
t.Errorf("received %v, expected %v", err, errNilSQL)
}
err = inst.SetSQLiteConnection(&sql.DB{})
if !errors.Is(err, nil) {
t.Errorf("received %v, expected %v", err, nil)
}
inst = nil
err = inst.SetSQLiteConnection(nil)
if !errors.Is(err, ErrNilInstance) {
t.Errorf("received %v, expected %v", err, ErrNilInstance)
}
}
func TestSetPostgresConnection(t *testing.T) {
// there is nothing actually requiring a postgres connection specifically
// so this is testing the checks and the ability to set values
// however, such settings would be bad for a sqlite connection irl
t.Parallel()
inst := &Instance{}
databaseFullLocation := filepath.Join(DB.DataPath, "TestSetPostgresConnection")
con, err := sql.Open("sqlite3", databaseFullLocation)
if !errors.Is(err, nil) {
t.Errorf("received %v, expected %v", err, nil)
}
err = inst.SetPostgresConnection(con)
if !errors.Is(err, nil) {
t.Errorf("received %v, expected %v", err, nil)
}
err = con.Close()
if !errors.Is(err, nil) {
t.Errorf("received %v, expected %v", err, nil)
}
err = os.Remove(databaseFullLocation)
if !errors.Is(err, nil) {
t.Errorf("received %v, expected %v", err, nil)
}
}
func TestSetConnected(t *testing.T) {
t.Parallel()
inst := &Instance{}
inst.SetConnected(true)
if !inst.connected {
t.Errorf("received %v, expected %v", false, true)
}
inst.SetConnected(false)
if inst.connected {
t.Errorf("received %v, expected %v", true, false)
}
}
func TestCloseConnection(t *testing.T) {
t.Parallel()
inst := &Instance{}
databaseFullLocation := filepath.Join(DB.DataPath, "TestCloseConnection")
con, err := sql.Open("sqlite3", databaseFullLocation)
if !errors.Is(err, nil) {
t.Errorf("received %v, expected %v", err, nil)
}
err = inst.SetSQLiteConnection(con)
if !errors.Is(err, nil) {
t.Errorf("received %v, expected %v", err, nil)
}
err = inst.CloseConnection()
if !errors.Is(err, nil) {
t.Errorf("received %v, expected %v", err, nil)
}
}
func TestIsConnected(t *testing.T) {
t.Parallel()
inst := &Instance{}
inst.SetConnected(true)
if !inst.IsConnected() {
t.Errorf("received %v, expected %v", false, true)
}
inst.SetConnected(false)
if inst.IsConnected() {
t.Errorf("received %v, expected %v", true, false)
}
}
func TestGetConfig(t *testing.T) {
t.Parallel()
inst := &Instance{}
cfg := inst.GetConfig()
if cfg != nil {
t.Errorf("received %v, expected %v", cfg, nil)
}
err := inst.SetConfig(&Config{Enabled: true})
if !errors.Is(err, nil) {
t.Errorf("received %v, expected %v", err, nil)
}
cfg = inst.GetConfig()
if cfg == nil {
t.Errorf("received %v, expected %v", cfg, &Config{Enabled: true})
}
}
func TestPing(t *testing.T) {
t.Parallel()
inst := &Instance{}
databaseFullLocation := filepath.Join(DB.DataPath, "TestPing")
con, err := sql.Open("sqlite3", databaseFullLocation)
if !errors.Is(err, nil) {
t.Errorf("received %v, expected %v", err, nil)
}
err = inst.SetSQLiteConnection(con)
if !errors.Is(err, nil) {
t.Errorf("received %v, expected %v", err, nil)
}
inst.SetConnected(true)
err = inst.Ping()
if !errors.Is(err, nil) {
t.Errorf("received %v, expected %v", err, nil)
}
inst.SQL = nil
err = inst.Ping()
if !errors.Is(err, errNilSQL) {
t.Errorf("received %v, expected %v", err, errNilSQL)
}
inst.SetConnected(false)
err = inst.Ping()
if !errors.Is(err, ErrDatabaseNotConnected) {
t.Errorf("received %v, expected %v", err, ErrDatabaseNotConnected)
}
inst = nil
err = inst.Ping()
if !errors.Is(err, ErrNilInstance) {
t.Errorf("received %v, expected %v", err, ErrNilInstance)
}
err = con.Close()
if !errors.Is(err, nil) {
t.Errorf("received %v, expected %v", err, nil)
}
err = os.Remove(databaseFullLocation)
if !errors.Is(err, nil) {
t.Errorf("received %v, expected %v", err, nil)
}
}
func TestGetSQL(t *testing.T) {
t.Parallel()
inst := &Instance{}
_, err := inst.GetSQL()
if !errors.Is(err, errNilSQL) {
t.Errorf("received %v, expected %v", err, errNilSQL)
}
databaseFullLocation := filepath.Join(DB.DataPath, "TestGetSQL")
con, err := sql.Open("sqlite3", databaseFullLocation)
if !errors.Is(err, nil) {
t.Errorf("received %v, expected %v", err, nil)
}
err = inst.SetSQLiteConnection(con)
if !errors.Is(err, nil) {
t.Errorf("received %v, expected %v", err, nil)
}
_, err = inst.GetSQL()
if !errors.Is(err, nil) {
t.Errorf("received %v, expected %v", err, nil)
}
inst = nil
_, err = inst.GetSQL()
if !errors.Is(err, ErrNilInstance) {
t.Errorf("received %v, expected %v", err, ErrNilInstance)
}
}

View File

@@ -1,6 +1,7 @@
package database
import (
"context"
"database/sql"
"errors"
"path/filepath"
@@ -39,11 +40,15 @@ var (
SupportedDrivers = []string{DBSQLite, DBSQLite3, DBPostgreSQL}
// ErrFailedToConnect for when a database fails to connect
ErrFailedToConnect = errors.New("database failed to connect")
// ErrDatabaseNotConnected for when a database is not connected
ErrDatabaseNotConnected = errors.New("database is not connected")
// DefaultSQLiteDatabase is the default sqlite3 database name to use
DefaultSQLiteDatabase = "gocryptotrader.db"
errNilConfig = errors.New("received nil config")
errNilInstance = errors.New("database instance is nil")
errNilSQL = errors.New("database SQL connection is nil")
// ErrNilInstance for when a database is nil
ErrNilInstance = errors.New("database instance is nil")
errNilConfig = errors.New("received nil config")
errNilSQL = errors.New("database SQL connection is nil")
errFailedPing = errors.New("unable to verify database is connected, failed ping")
)
const (
@@ -56,3 +61,23 @@ const (
// DBInvalidDriver const string for invalid driver
DBInvalidDriver = "invalid driver"
)
// IDatabase allows for the passing of a database struct
// without giving the receiver access to all functionality
type IDatabase interface {
IsConnected() bool
GetSQL() (*sql.DB, error)
GetConfig() *Config
}
// ISQL allows for the passing of a SQL connection
// without giving the receiver access to all functionality
type ISQL interface {
BeginTx(context.Context, *sql.TxOptions) (*sql.Tx, error)
Exec(string, ...interface{}) (sql.Result, error)
Query(string, ...interface{}) (*sql.Rows, error)
QueryRow(string, ...interface{}) *sql.Row
ExecContext(context.Context, string, ...interface{}) (sql.Result, error)
QueryContext(context.Context, string, ...interface{}) (*sql.Rows, error)
QueryRowContext(context.Context, string, ...interface{}) *sql.Row
}

View File

@@ -10,9 +10,7 @@ import (
)
// Connect opens a connection to Postgres database and returns a pointer to database.DB
func Connect() (*database.Instance, error) {
cfg := database.DB.GetConfig()
func Connect(cfg *database.Config) (*database.Instance, error) {
if cfg.SSLMode == "" {
cfg.SSLMode = "disable"
}

View File

@@ -10,20 +10,21 @@ import (
)
// Connect opens a connection to sqlite database and returns a pointer to database.DB
func Connect() (*database.Instance, error) {
cfg := database.DB.GetConfig()
if cfg.Database == "" {
func Connect(db string) (*database.Instance, error) {
if db == "" {
return nil, database.ErrNoDatabaseProvided
}
databaseFullLocation := filepath.Join(database.DB.DataPath, cfg.Database)
databaseFullLocation := filepath.Join(database.DB.DataPath, db)
dbConn, err := sql.Open("sqlite3", databaseFullLocation)
if err != nil {
return nil, err
}
database.DB.SetSQLiteConnection(dbConn)
err = database.DB.SetSQLiteConnection(dbConn)
if err != nil {
return nil, err
}
return database.DB, nil
}

View File

@@ -0,0 +1,39 @@
-- +goose Up
CREATE TABLE IF NOT EXISTS datahistoryjob
(
id uuid PRIMARY KEY DEFAULT gen_random_uuid(),
nickname varchar(255) NOT NULL,
exchange_name_id uuid REFERENCES exchange(id) NOT NULL,
asset varchar NOT NULL,
base varchar(30) NOT NULL,
quote varchar(30) NOT NULL,
start_time TIMESTAMPTZ NOT NULL,
end_time TIMESTAMPTZ NOT NULL,
data_type DOUBLE PRECISION NOT NULL,
interval DOUBLE PRECISION NOT NULL,
request_size DOUBLE PRECISION NOT NULL,
max_retries DOUBLE PRECISION NOT NULL,
batch_count DOUBLE PRECISION NOT NULL,
status DOUBLE PRECISION NOT NULL,
created TIMESTAMPTZ NOT NULL,
CONSTRAINT uniquenickname
unique(nickname),
CONSTRAINT uniqueid
unique(id)
);
CREATE TABLE IF NOT EXISTS datahistoryjobresult
(
id uuid PRIMARY KEY DEFAULT gen_random_uuid(),
job_id uuid NOT NULL REFERENCES datahistoryjob(id) ON DELETE RESTRICT,
result TEXT NULL,
status DOUBLE PRECISION NOT NULL,
interval_start_time TIMESTAMPTZ NOT NULL,
interval_end_time TIMESTAMPTZ NOT NULL,
run_time TIMESTAMPTZ NOT NULL
);
-- +goose Down
DROP TABLE datahistoryjobresult;
DROP TABLE datahistoryjob;

View File

@@ -0,0 +1,40 @@
-- +goose Up
CREATE TABLE datahistoryjob
(
id text NOT NULL primary key,
nickname text NOT NULL,
exchange_name_id text NOT NULL,
asset text NOT NULL,
base text NOT NULL,
quote text NOT NULL,
start_time timestamp NOT NULL,
end_time timestamp NOT NULL,
interval real NOT NULL,
data_type real NOT NULL,
request_size real NOT NULL,
max_retries real NOT NULL,
batch_count real NOT NULL,
status real NOT NULL,
created timestamp NOT NULL default CURRENT_TIMESTAMP,
FOREIGN KEY(exchange_name_id) REFERENCES exchange(id) ON DELETE RESTRICT,
UNIQUE(id) ON CONFLICT REPLACE,
UNIQUE(nickname) ON CONFLICT REPLACE
);
CREATE TABLE datahistoryjobresult
(
id text not null primary key,
job_id text NOT NULL,
result text NULL,
status real NOT NULL,
interval_start_time timestamp NOT NULL,
interval_end_time timestamp NOT NULL,
run_time timestamp NOT NULL default CURRENT_TIMESTAMP,
UNIQUE(id) ON CONFLICT REPLACE,
FOREIGN KEY(job_id) REFERENCES datahistoryjob(id) ON DELETE RESTRICT
);
-- +goose Down
DROP TABLE datahistoryjob;
DROP TABLE datahistoryjobresult;

View File

@@ -4,23 +4,27 @@
package postgres
var TableNames = struct {
AuditEvent string
Candle string
Exchange string
Script string
ScriptExecution string
Trade string
WithdrawalCrypto string
WithdrawalFiat string
WithdrawalHistory string
AuditEvent string
Candle string
Datahistoryjob string
Datahistoryjobresult string
Exchange string
Script string
ScriptExecution string
Trade string
WithdrawalCrypto string
WithdrawalFiat string
WithdrawalHistory string
}{
AuditEvent: "audit_event",
Candle: "candle",
Exchange: "exchange",
Script: "script",
ScriptExecution: "script_execution",
Trade: "trade",
WithdrawalCrypto: "withdrawal_crypto",
WithdrawalFiat: "withdrawal_fiat",
WithdrawalHistory: "withdrawal_history",
AuditEvent: "audit_event",
Candle: "candle",
Datahistoryjob: "datahistoryjob",
Datahistoryjobresult: "datahistoryjobresult",
Exchange: "exchange",
Script: "script",
ScriptExecution: "script_execution",
Trade: "trade",
WithdrawalCrypto: "withdrawal_crypto",
WithdrawalFiat: "withdrawal_fiat",
WithdrawalHistory: "withdrawal_history",
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,994 @@
// Code generated by SQLBoiler 3.5.0-gct (https://github.com/thrasher-corp/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package postgres
import (
"bytes"
"context"
"reflect"
"testing"
"github.com/thrasher-corp/sqlboiler/boil"
"github.com/thrasher-corp/sqlboiler/queries"
"github.com/thrasher-corp/sqlboiler/randomize"
"github.com/thrasher-corp/sqlboiler/strmangle"
)
var (
// Relationships sometimes use the reflection helper queries.Equal/queries.Assign
// so force a package dependency in case they don't.
_ = queries.Equal
)
func testDatahistoryjobs(t *testing.T) {
t.Parallel()
query := Datahistoryjobs()
if query.Query == nil {
t.Error("expected a query, got nothing")
}
}
func testDatahistoryjobsDelete(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if rowsAff, err := o.Delete(ctx, tx); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only have deleted one row, but affected:", rowsAff)
}
count, err := Datahistoryjobs().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 0 {
t.Error("want zero records, got:", count)
}
}
func testDatahistoryjobsQueryDeleteAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if rowsAff, err := Datahistoryjobs().DeleteAll(ctx, tx); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only have deleted one row, but affected:", rowsAff)
}
count, err := Datahistoryjobs().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 0 {
t.Error("want zero records, got:", count)
}
}
func testDatahistoryjobsSliceDeleteAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice := DatahistoryjobSlice{o}
if rowsAff, err := slice.DeleteAll(ctx, tx); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only have deleted one row, but affected:", rowsAff)
}
count, err := Datahistoryjobs().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 0 {
t.Error("want zero records, got:", count)
}
}
func testDatahistoryjobsExists(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
e, err := DatahistoryjobExists(ctx, tx, o.ID)
if err != nil {
t.Errorf("Unable to check if Datahistoryjob exists: %s", err)
}
if !e {
t.Errorf("Expected DatahistoryjobExists to return true, but got false.")
}
}
func testDatahistoryjobsFind(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
datahistoryjobFound, err := FindDatahistoryjob(ctx, tx, o.ID)
if err != nil {
t.Error(err)
}
if datahistoryjobFound == nil {
t.Error("want a record, got nil")
}
}
func testDatahistoryjobsBind(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = Datahistoryjobs().Bind(ctx, tx, o); err != nil {
t.Error(err)
}
}
func testDatahistoryjobsOne(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if x, err := Datahistoryjobs().One(ctx, tx); err != nil {
t.Error(err)
} else if x == nil {
t.Error("expected to get a non nil record")
}
}
func testDatahistoryjobsAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
datahistoryjobOne := &Datahistoryjob{}
datahistoryjobTwo := &Datahistoryjob{}
if err = randomize.Struct(seed, datahistoryjobOne, datahistoryjobDBTypes, false, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
if err = randomize.Struct(seed, datahistoryjobTwo, datahistoryjobDBTypes, false, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = datahistoryjobOne.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = datahistoryjobTwo.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice, err := Datahistoryjobs().All(ctx, tx)
if err != nil {
t.Error(err)
}
if len(slice) != 2 {
t.Error("want 2 records, got:", len(slice))
}
}
func testDatahistoryjobsCount(t *testing.T) {
t.Parallel()
var err error
seed := randomize.NewSeed()
datahistoryjobOne := &Datahistoryjob{}
datahistoryjobTwo := &Datahistoryjob{}
if err = randomize.Struct(seed, datahistoryjobOne, datahistoryjobDBTypes, false, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
if err = randomize.Struct(seed, datahistoryjobTwo, datahistoryjobDBTypes, false, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = datahistoryjobOne.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = datahistoryjobTwo.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := Datahistoryjobs().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 2 {
t.Error("want 2 records, got:", count)
}
}
func datahistoryjobBeforeInsertHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjob) error {
*o = Datahistoryjob{}
return nil
}
func datahistoryjobAfterInsertHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjob) error {
*o = Datahistoryjob{}
return nil
}
func datahistoryjobAfterSelectHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjob) error {
*o = Datahistoryjob{}
return nil
}
func datahistoryjobBeforeUpdateHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjob) error {
*o = Datahistoryjob{}
return nil
}
func datahistoryjobAfterUpdateHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjob) error {
*o = Datahistoryjob{}
return nil
}
func datahistoryjobBeforeDeleteHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjob) error {
*o = Datahistoryjob{}
return nil
}
func datahistoryjobAfterDeleteHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjob) error {
*o = Datahistoryjob{}
return nil
}
func datahistoryjobBeforeUpsertHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjob) error {
*o = Datahistoryjob{}
return nil
}
func datahistoryjobAfterUpsertHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjob) error {
*o = Datahistoryjob{}
return nil
}
func testDatahistoryjobsHooks(t *testing.T) {
t.Parallel()
var err error
ctx := context.Background()
empty := &Datahistoryjob{}
o := &Datahistoryjob{}
seed := randomize.NewSeed()
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, false); err != nil {
t.Errorf("Unable to randomize Datahistoryjob object: %s", err)
}
AddDatahistoryjobHook(boil.BeforeInsertHook, datahistoryjobBeforeInsertHook)
if err = o.doBeforeInsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeInsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeInsertHook function to empty object, but got: %#v", o)
}
datahistoryjobBeforeInsertHooks = []DatahistoryjobHook{}
AddDatahistoryjobHook(boil.AfterInsertHook, datahistoryjobAfterInsertHook)
if err = o.doAfterInsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterInsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterInsertHook function to empty object, but got: %#v", o)
}
datahistoryjobAfterInsertHooks = []DatahistoryjobHook{}
AddDatahistoryjobHook(boil.AfterSelectHook, datahistoryjobAfterSelectHook)
if err = o.doAfterSelectHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterSelectHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterSelectHook function to empty object, but got: %#v", o)
}
datahistoryjobAfterSelectHooks = []DatahistoryjobHook{}
AddDatahistoryjobHook(boil.BeforeUpdateHook, datahistoryjobBeforeUpdateHook)
if err = o.doBeforeUpdateHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeUpdateHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeUpdateHook function to empty object, but got: %#v", o)
}
datahistoryjobBeforeUpdateHooks = []DatahistoryjobHook{}
AddDatahistoryjobHook(boil.AfterUpdateHook, datahistoryjobAfterUpdateHook)
if err = o.doAfterUpdateHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterUpdateHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterUpdateHook function to empty object, but got: %#v", o)
}
datahistoryjobAfterUpdateHooks = []DatahistoryjobHook{}
AddDatahistoryjobHook(boil.BeforeDeleteHook, datahistoryjobBeforeDeleteHook)
if err = o.doBeforeDeleteHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeDeleteHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeDeleteHook function to empty object, but got: %#v", o)
}
datahistoryjobBeforeDeleteHooks = []DatahistoryjobHook{}
AddDatahistoryjobHook(boil.AfterDeleteHook, datahistoryjobAfterDeleteHook)
if err = o.doAfterDeleteHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterDeleteHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterDeleteHook function to empty object, but got: %#v", o)
}
datahistoryjobAfterDeleteHooks = []DatahistoryjobHook{}
AddDatahistoryjobHook(boil.BeforeUpsertHook, datahistoryjobBeforeUpsertHook)
if err = o.doBeforeUpsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeUpsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeUpsertHook function to empty object, but got: %#v", o)
}
datahistoryjobBeforeUpsertHooks = []DatahistoryjobHook{}
AddDatahistoryjobHook(boil.AfterUpsertHook, datahistoryjobAfterUpsertHook)
if err = o.doAfterUpsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterUpsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterUpsertHook function to empty object, but got: %#v", o)
}
datahistoryjobAfterUpsertHooks = []DatahistoryjobHook{}
}
func testDatahistoryjobsInsert(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := Datahistoryjobs().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
}
func testDatahistoryjobsInsertWhitelist(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Whitelist(datahistoryjobColumnsWithoutDefault...)); err != nil {
t.Error(err)
}
count, err := Datahistoryjobs().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
}
func testDatahistoryjobToManyJobDatahistoryjobresults(t *testing.T) {
var err error
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
var a Datahistoryjob
var b, c Datahistoryjobresult
seed := randomize.NewSeed()
if err = randomize.Struct(seed, &a, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
if err := a.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
if err = randomize.Struct(seed, &b, datahistoryjobresultDBTypes, false, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Fatal(err)
}
if err = randomize.Struct(seed, &c, datahistoryjobresultDBTypes, false, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Fatal(err)
}
b.JobID = a.ID
c.JobID = a.ID
if err = b.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
if err = c.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
check, err := a.JobDatahistoryjobresults().All(ctx, tx)
if err != nil {
t.Fatal(err)
}
bFound, cFound := false, false
for _, v := range check {
if v.JobID == b.JobID {
bFound = true
}
if v.JobID == c.JobID {
cFound = true
}
}
if !bFound {
t.Error("expected to find b")
}
if !cFound {
t.Error("expected to find c")
}
slice := DatahistoryjobSlice{&a}
if err = a.L.LoadJobDatahistoryjobresults(ctx, tx, false, (*[]*Datahistoryjob)(&slice), nil); err != nil {
t.Fatal(err)
}
if got := len(a.R.JobDatahistoryjobresults); got != 2 {
t.Error("number of eager loaded records wrong, got:", got)
}
a.R.JobDatahistoryjobresults = nil
if err = a.L.LoadJobDatahistoryjobresults(ctx, tx, true, &a, nil); err != nil {
t.Fatal(err)
}
if got := len(a.R.JobDatahistoryjobresults); got != 2 {
t.Error("number of eager loaded records wrong, got:", got)
}
if t.Failed() {
t.Logf("%#v", check)
}
}
func testDatahistoryjobToManyAddOpJobDatahistoryjobresults(t *testing.T) {
var err error
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
var a Datahistoryjob
var b, c, d, e Datahistoryjobresult
seed := randomize.NewSeed()
if err = randomize.Struct(seed, &a, datahistoryjobDBTypes, false, strmangle.SetComplement(datahistoryjobPrimaryKeyColumns, datahistoryjobColumnsWithoutDefault)...); err != nil {
t.Fatal(err)
}
foreigners := []*Datahistoryjobresult{&b, &c, &d, &e}
for _, x := range foreigners {
if err = randomize.Struct(seed, x, datahistoryjobresultDBTypes, false, strmangle.SetComplement(datahistoryjobresultPrimaryKeyColumns, datahistoryjobresultColumnsWithoutDefault)...); err != nil {
t.Fatal(err)
}
}
if err := a.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
if err = b.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
if err = c.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
foreignersSplitByInsertion := [][]*Datahistoryjobresult{
{&b, &c},
{&d, &e},
}
for i, x := range foreignersSplitByInsertion {
err = a.AddJobDatahistoryjobresults(ctx, tx, i != 0, x...)
if err != nil {
t.Fatal(err)
}
first := x[0]
second := x[1]
if a.ID != first.JobID {
t.Error("foreign key was wrong value", a.ID, first.JobID)
}
if a.ID != second.JobID {
t.Error("foreign key was wrong value", a.ID, second.JobID)
}
if first.R.Job != &a {
t.Error("relationship was not added properly to the foreign slice")
}
if second.R.Job != &a {
t.Error("relationship was not added properly to the foreign slice")
}
if a.R.JobDatahistoryjobresults[i*2] != first {
t.Error("relationship struct slice not set to correct value")
}
if a.R.JobDatahistoryjobresults[i*2+1] != second {
t.Error("relationship struct slice not set to correct value")
}
count, err := a.JobDatahistoryjobresults().Count(ctx, tx)
if err != nil {
t.Fatal(err)
}
if want := int64((i + 1) * 2); count != want {
t.Error("want", want, "got", count)
}
}
}
func testDatahistoryjobToOneExchangeUsingExchangeName(t *testing.T) {
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
var local Datahistoryjob
var foreign Exchange
seed := randomize.NewSeed()
if err := randomize.Struct(seed, &local, datahistoryjobDBTypes, false, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
if err := randomize.Struct(seed, &foreign, exchangeDBTypes, false, exchangeColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Exchange struct: %s", err)
}
if err := foreign.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
local.ExchangeNameID = foreign.ID
if err := local.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
check, err := local.ExchangeName().One(ctx, tx)
if err != nil {
t.Fatal(err)
}
if check.ID != foreign.ID {
t.Errorf("want: %v, got %v", foreign.ID, check.ID)
}
slice := DatahistoryjobSlice{&local}
if err = local.L.LoadExchangeName(ctx, tx, false, (*[]*Datahistoryjob)(&slice), nil); err != nil {
t.Fatal(err)
}
if local.R.ExchangeName == nil {
t.Error("struct should have been eager loaded")
}
local.R.ExchangeName = nil
if err = local.L.LoadExchangeName(ctx, tx, true, &local, nil); err != nil {
t.Fatal(err)
}
if local.R.ExchangeName == nil {
t.Error("struct should have been eager loaded")
}
}
func testDatahistoryjobToOneSetOpExchangeUsingExchangeName(t *testing.T) {
var err error
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
var a Datahistoryjob
var b, c Exchange
seed := randomize.NewSeed()
if err = randomize.Struct(seed, &a, datahistoryjobDBTypes, false, strmangle.SetComplement(datahistoryjobPrimaryKeyColumns, datahistoryjobColumnsWithoutDefault)...); err != nil {
t.Fatal(err)
}
if err = randomize.Struct(seed, &b, exchangeDBTypes, false, strmangle.SetComplement(exchangePrimaryKeyColumns, exchangeColumnsWithoutDefault)...); err != nil {
t.Fatal(err)
}
if err = randomize.Struct(seed, &c, exchangeDBTypes, false, strmangle.SetComplement(exchangePrimaryKeyColumns, exchangeColumnsWithoutDefault)...); err != nil {
t.Fatal(err)
}
if err := a.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
if err = b.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
for i, x := range []*Exchange{&b, &c} {
err = a.SetExchangeName(ctx, tx, i != 0, x)
if err != nil {
t.Fatal(err)
}
if a.R.ExchangeName != x {
t.Error("relationship struct not set to correct value")
}
if x.R.ExchangeNameDatahistoryjobs[0] != &a {
t.Error("failed to append to foreign relationship struct")
}
if a.ExchangeNameID != x.ID {
t.Error("foreign key was wrong value", a.ExchangeNameID)
}
zero := reflect.Zero(reflect.TypeOf(a.ExchangeNameID))
reflect.Indirect(reflect.ValueOf(&a.ExchangeNameID)).Set(zero)
if err = a.Reload(ctx, tx); err != nil {
t.Fatal("failed to reload", err)
}
if a.ExchangeNameID != x.ID {
t.Error("foreign key was wrong value", a.ExchangeNameID, x.ID)
}
}
}
func testDatahistoryjobsReload(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = o.Reload(ctx, tx); err != nil {
t.Error(err)
}
}
func testDatahistoryjobsReloadAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice := DatahistoryjobSlice{o}
if err = slice.ReloadAll(ctx, tx); err != nil {
t.Error(err)
}
}
func testDatahistoryjobsSelect(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice, err := Datahistoryjobs().All(ctx, tx)
if err != nil {
t.Error(err)
}
if len(slice) != 1 {
t.Error("want one record, got:", len(slice))
}
}
var (
datahistoryjobDBTypes = map[string]string{`ID`: `uuid`, `Nickname`: `character varying`, `ExchangeNameID`: `uuid`, `Asset`: `character varying`, `Base`: `character varying`, `Quote`: `character varying`, `StartTime`: `timestamp with time zone`, `EndTime`: `timestamp with time zone`, `DataType`: `double precision`, `Interval`: `double precision`, `RequestSize`: `double precision`, `MaxRetries`: `double precision`, `BatchCount`: `double precision`, `Status`: `double precision`, `Created`: `timestamp with time zone`}
_ = bytes.MinRead
)
func testDatahistoryjobsUpdate(t *testing.T) {
t.Parallel()
if 0 == len(datahistoryjobPrimaryKeyColumns) {
t.Skip("Skipping table with no primary key columns")
}
if len(datahistoryjobAllColumns) == len(datahistoryjobPrimaryKeyColumns) {
t.Skip("Skipping table with only primary key columns")
}
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := Datahistoryjobs().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobPrimaryKeyColumns...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
if rowsAff, err := o.Update(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only affect one row but affected", rowsAff)
}
}
func testDatahistoryjobsSliceUpdateAll(t *testing.T) {
t.Parallel()
if len(datahistoryjobAllColumns) == len(datahistoryjobPrimaryKeyColumns) {
t.Skip("Skipping table with only primary key columns")
}
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := Datahistoryjobs().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobPrimaryKeyColumns...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
// Remove Primary keys and unique columns from what we plan to update
var fields []string
if strmangle.StringSliceMatch(datahistoryjobAllColumns, datahistoryjobPrimaryKeyColumns) {
fields = datahistoryjobAllColumns
} else {
fields = strmangle.SetComplement(
datahistoryjobAllColumns,
datahistoryjobPrimaryKeyColumns,
)
}
value := reflect.Indirect(reflect.ValueOf(o))
typ := reflect.TypeOf(o).Elem()
n := typ.NumField()
updateMap := M{}
for _, col := range fields {
for i := 0; i < n; i++ {
f := typ.Field(i)
if f.Tag.Get("boil") == col {
updateMap[col] = value.Field(i).Interface()
}
}
}
slice := DatahistoryjobSlice{o}
if rowsAff, err := slice.UpdateAll(ctx, tx, updateMap); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("wanted one record updated but got", rowsAff)
}
}
func testDatahistoryjobsUpsert(t *testing.T) {
t.Parallel()
if len(datahistoryjobAllColumns) == len(datahistoryjobPrimaryKeyColumns) {
t.Skip("Skipping table with only primary key columns")
}
seed := randomize.NewSeed()
var err error
// Attempt the INSERT side of an UPSERT
o := Datahistoryjob{}
if err = randomize.Struct(seed, &o, datahistoryjobDBTypes, true); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Upsert(ctx, tx, false, nil, boil.Infer(), boil.Infer()); err != nil {
t.Errorf("Unable to upsert Datahistoryjob: %s", err)
}
count, err := Datahistoryjobs().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
// Attempt the UPDATE side of an UPSERT
if err = randomize.Struct(seed, &o, datahistoryjobDBTypes, false, datahistoryjobPrimaryKeyColumns...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
if err = o.Upsert(ctx, tx, true, nil, boil.Infer(), boil.Infer()); err != nil {
t.Errorf("Unable to upsert Datahistoryjob: %s", err)
}
count, err = Datahistoryjobs().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,841 @@
// Code generated by SQLBoiler 3.5.0-gct (https://github.com/thrasher-corp/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package postgres
import (
"bytes"
"context"
"reflect"
"testing"
"github.com/thrasher-corp/sqlboiler/boil"
"github.com/thrasher-corp/sqlboiler/queries"
"github.com/thrasher-corp/sqlboiler/randomize"
"github.com/thrasher-corp/sqlboiler/strmangle"
)
var (
// Relationships sometimes use the reflection helper queries.Equal/queries.Assign
// so force a package dependency in case they don't.
_ = queries.Equal
)
func testDatahistoryjobresults(t *testing.T) {
t.Parallel()
query := Datahistoryjobresults()
if query.Query == nil {
t.Error("expected a query, got nothing")
}
}
func testDatahistoryjobresultsDelete(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if rowsAff, err := o.Delete(ctx, tx); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only have deleted one row, but affected:", rowsAff)
}
count, err := Datahistoryjobresults().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 0 {
t.Error("want zero records, got:", count)
}
}
func testDatahistoryjobresultsQueryDeleteAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if rowsAff, err := Datahistoryjobresults().DeleteAll(ctx, tx); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only have deleted one row, but affected:", rowsAff)
}
count, err := Datahistoryjobresults().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 0 {
t.Error("want zero records, got:", count)
}
}
func testDatahistoryjobresultsSliceDeleteAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice := DatahistoryjobresultSlice{o}
if rowsAff, err := slice.DeleteAll(ctx, tx); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only have deleted one row, but affected:", rowsAff)
}
count, err := Datahistoryjobresults().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 0 {
t.Error("want zero records, got:", count)
}
}
func testDatahistoryjobresultsExists(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
e, err := DatahistoryjobresultExists(ctx, tx, o.ID)
if err != nil {
t.Errorf("Unable to check if Datahistoryjobresult exists: %s", err)
}
if !e {
t.Errorf("Expected DatahistoryjobresultExists to return true, but got false.")
}
}
func testDatahistoryjobresultsFind(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
datahistoryjobresultFound, err := FindDatahistoryjobresult(ctx, tx, o.ID)
if err != nil {
t.Error(err)
}
if datahistoryjobresultFound == nil {
t.Error("want a record, got nil")
}
}
func testDatahistoryjobresultsBind(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = Datahistoryjobresults().Bind(ctx, tx, o); err != nil {
t.Error(err)
}
}
func testDatahistoryjobresultsOne(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if x, err := Datahistoryjobresults().One(ctx, tx); err != nil {
t.Error(err)
} else if x == nil {
t.Error("expected to get a non nil record")
}
}
func testDatahistoryjobresultsAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
datahistoryjobresultOne := &Datahistoryjobresult{}
datahistoryjobresultTwo := &Datahistoryjobresult{}
if err = randomize.Struct(seed, datahistoryjobresultOne, datahistoryjobresultDBTypes, false, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
if err = randomize.Struct(seed, datahistoryjobresultTwo, datahistoryjobresultDBTypes, false, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = datahistoryjobresultOne.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = datahistoryjobresultTwo.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice, err := Datahistoryjobresults().All(ctx, tx)
if err != nil {
t.Error(err)
}
if len(slice) != 2 {
t.Error("want 2 records, got:", len(slice))
}
}
func testDatahistoryjobresultsCount(t *testing.T) {
t.Parallel()
var err error
seed := randomize.NewSeed()
datahistoryjobresultOne := &Datahistoryjobresult{}
datahistoryjobresultTwo := &Datahistoryjobresult{}
if err = randomize.Struct(seed, datahistoryjobresultOne, datahistoryjobresultDBTypes, false, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
if err = randomize.Struct(seed, datahistoryjobresultTwo, datahistoryjobresultDBTypes, false, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = datahistoryjobresultOne.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = datahistoryjobresultTwo.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := Datahistoryjobresults().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 2 {
t.Error("want 2 records, got:", count)
}
}
func datahistoryjobresultBeforeInsertHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjobresult) error {
*o = Datahistoryjobresult{}
return nil
}
func datahistoryjobresultAfterInsertHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjobresult) error {
*o = Datahistoryjobresult{}
return nil
}
func datahistoryjobresultAfterSelectHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjobresult) error {
*o = Datahistoryjobresult{}
return nil
}
func datahistoryjobresultBeforeUpdateHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjobresult) error {
*o = Datahistoryjobresult{}
return nil
}
func datahistoryjobresultAfterUpdateHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjobresult) error {
*o = Datahistoryjobresult{}
return nil
}
func datahistoryjobresultBeforeDeleteHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjobresult) error {
*o = Datahistoryjobresult{}
return nil
}
func datahistoryjobresultAfterDeleteHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjobresult) error {
*o = Datahistoryjobresult{}
return nil
}
func datahistoryjobresultBeforeUpsertHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjobresult) error {
*o = Datahistoryjobresult{}
return nil
}
func datahistoryjobresultAfterUpsertHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjobresult) error {
*o = Datahistoryjobresult{}
return nil
}
func testDatahistoryjobresultsHooks(t *testing.T) {
t.Parallel()
var err error
ctx := context.Background()
empty := &Datahistoryjobresult{}
o := &Datahistoryjobresult{}
seed := randomize.NewSeed()
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, false); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult object: %s", err)
}
AddDatahistoryjobresultHook(boil.BeforeInsertHook, datahistoryjobresultBeforeInsertHook)
if err = o.doBeforeInsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeInsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeInsertHook function to empty object, but got: %#v", o)
}
datahistoryjobresultBeforeInsertHooks = []DatahistoryjobresultHook{}
AddDatahistoryjobresultHook(boil.AfterInsertHook, datahistoryjobresultAfterInsertHook)
if err = o.doAfterInsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterInsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterInsertHook function to empty object, but got: %#v", o)
}
datahistoryjobresultAfterInsertHooks = []DatahistoryjobresultHook{}
AddDatahistoryjobresultHook(boil.AfterSelectHook, datahistoryjobresultAfterSelectHook)
if err = o.doAfterSelectHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterSelectHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterSelectHook function to empty object, but got: %#v", o)
}
datahistoryjobresultAfterSelectHooks = []DatahistoryjobresultHook{}
AddDatahistoryjobresultHook(boil.BeforeUpdateHook, datahistoryjobresultBeforeUpdateHook)
if err = o.doBeforeUpdateHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeUpdateHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeUpdateHook function to empty object, but got: %#v", o)
}
datahistoryjobresultBeforeUpdateHooks = []DatahistoryjobresultHook{}
AddDatahistoryjobresultHook(boil.AfterUpdateHook, datahistoryjobresultAfterUpdateHook)
if err = o.doAfterUpdateHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterUpdateHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterUpdateHook function to empty object, but got: %#v", o)
}
datahistoryjobresultAfterUpdateHooks = []DatahistoryjobresultHook{}
AddDatahistoryjobresultHook(boil.BeforeDeleteHook, datahistoryjobresultBeforeDeleteHook)
if err = o.doBeforeDeleteHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeDeleteHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeDeleteHook function to empty object, but got: %#v", o)
}
datahistoryjobresultBeforeDeleteHooks = []DatahistoryjobresultHook{}
AddDatahistoryjobresultHook(boil.AfterDeleteHook, datahistoryjobresultAfterDeleteHook)
if err = o.doAfterDeleteHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterDeleteHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterDeleteHook function to empty object, but got: %#v", o)
}
datahistoryjobresultAfterDeleteHooks = []DatahistoryjobresultHook{}
AddDatahistoryjobresultHook(boil.BeforeUpsertHook, datahistoryjobresultBeforeUpsertHook)
if err = o.doBeforeUpsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeUpsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeUpsertHook function to empty object, but got: %#v", o)
}
datahistoryjobresultBeforeUpsertHooks = []DatahistoryjobresultHook{}
AddDatahistoryjobresultHook(boil.AfterUpsertHook, datahistoryjobresultAfterUpsertHook)
if err = o.doAfterUpsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterUpsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterUpsertHook function to empty object, but got: %#v", o)
}
datahistoryjobresultAfterUpsertHooks = []DatahistoryjobresultHook{}
}
func testDatahistoryjobresultsInsert(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := Datahistoryjobresults().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
}
func testDatahistoryjobresultsInsertWhitelist(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Whitelist(datahistoryjobresultColumnsWithoutDefault...)); err != nil {
t.Error(err)
}
count, err := Datahistoryjobresults().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
}
func testDatahistoryjobresultToOneDatahistoryjobUsingJob(t *testing.T) {
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
var local Datahistoryjobresult
var foreign Datahistoryjob
seed := randomize.NewSeed()
if err := randomize.Struct(seed, &local, datahistoryjobresultDBTypes, false, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
if err := randomize.Struct(seed, &foreign, datahistoryjobDBTypes, false, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
if err := foreign.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
local.JobID = foreign.ID
if err := local.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
check, err := local.Job().One(ctx, tx)
if err != nil {
t.Fatal(err)
}
if check.ID != foreign.ID {
t.Errorf("want: %v, got %v", foreign.ID, check.ID)
}
slice := DatahistoryjobresultSlice{&local}
if err = local.L.LoadJob(ctx, tx, false, (*[]*Datahistoryjobresult)(&slice), nil); err != nil {
t.Fatal(err)
}
if local.R.Job == nil {
t.Error("struct should have been eager loaded")
}
local.R.Job = nil
if err = local.L.LoadJob(ctx, tx, true, &local, nil); err != nil {
t.Fatal(err)
}
if local.R.Job == nil {
t.Error("struct should have been eager loaded")
}
}
func testDatahistoryjobresultToOneSetOpDatahistoryjobUsingJob(t *testing.T) {
var err error
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
var a Datahistoryjobresult
var b, c Datahistoryjob
seed := randomize.NewSeed()
if err = randomize.Struct(seed, &a, datahistoryjobresultDBTypes, false, strmangle.SetComplement(datahistoryjobresultPrimaryKeyColumns, datahistoryjobresultColumnsWithoutDefault)...); err != nil {
t.Fatal(err)
}
if err = randomize.Struct(seed, &b, datahistoryjobDBTypes, false, strmangle.SetComplement(datahistoryjobPrimaryKeyColumns, datahistoryjobColumnsWithoutDefault)...); err != nil {
t.Fatal(err)
}
if err = randomize.Struct(seed, &c, datahistoryjobDBTypes, false, strmangle.SetComplement(datahistoryjobPrimaryKeyColumns, datahistoryjobColumnsWithoutDefault)...); err != nil {
t.Fatal(err)
}
if err := a.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
if err = b.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
for i, x := range []*Datahistoryjob{&b, &c} {
err = a.SetJob(ctx, tx, i != 0, x)
if err != nil {
t.Fatal(err)
}
if a.R.Job != x {
t.Error("relationship struct not set to correct value")
}
if x.R.JobDatahistoryjobresults[0] != &a {
t.Error("failed to append to foreign relationship struct")
}
if a.JobID != x.ID {
t.Error("foreign key was wrong value", a.JobID)
}
zero := reflect.Zero(reflect.TypeOf(a.JobID))
reflect.Indirect(reflect.ValueOf(&a.JobID)).Set(zero)
if err = a.Reload(ctx, tx); err != nil {
t.Fatal("failed to reload", err)
}
if a.JobID != x.ID {
t.Error("foreign key was wrong value", a.JobID, x.ID)
}
}
}
func testDatahistoryjobresultsReload(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = o.Reload(ctx, tx); err != nil {
t.Error(err)
}
}
func testDatahistoryjobresultsReloadAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice := DatahistoryjobresultSlice{o}
if err = slice.ReloadAll(ctx, tx); err != nil {
t.Error(err)
}
}
func testDatahistoryjobresultsSelect(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice, err := Datahistoryjobresults().All(ctx, tx)
if err != nil {
t.Error(err)
}
if len(slice) != 1 {
t.Error("want one record, got:", len(slice))
}
}
var (
datahistoryjobresultDBTypes = map[string]string{`ID`: `uuid`, `JobID`: `uuid`, `Result`: `text`, `Status`: `double precision`, `IntervalStartTime`: `timestamp with time zone`, `IntervalEndTime`: `timestamp with time zone`, `RunTime`: `timestamp with time zone`}
_ = bytes.MinRead
)
func testDatahistoryjobresultsUpdate(t *testing.T) {
t.Parallel()
if 0 == len(datahistoryjobresultPrimaryKeyColumns) {
t.Skip("Skipping table with no primary key columns")
}
if len(datahistoryjobresultAllColumns) == len(datahistoryjobresultPrimaryKeyColumns) {
t.Skip("Skipping table with only primary key columns")
}
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := Datahistoryjobresults().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultPrimaryKeyColumns...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
if rowsAff, err := o.Update(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only affect one row but affected", rowsAff)
}
}
func testDatahistoryjobresultsSliceUpdateAll(t *testing.T) {
t.Parallel()
if len(datahistoryjobresultAllColumns) == len(datahistoryjobresultPrimaryKeyColumns) {
t.Skip("Skipping table with only primary key columns")
}
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := Datahistoryjobresults().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultPrimaryKeyColumns...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
// Remove Primary keys and unique columns from what we plan to update
var fields []string
if strmangle.StringSliceMatch(datahistoryjobresultAllColumns, datahistoryjobresultPrimaryKeyColumns) {
fields = datahistoryjobresultAllColumns
} else {
fields = strmangle.SetComplement(
datahistoryjobresultAllColumns,
datahistoryjobresultPrimaryKeyColumns,
)
}
value := reflect.Indirect(reflect.ValueOf(o))
typ := reflect.TypeOf(o).Elem()
n := typ.NumField()
updateMap := M{}
for _, col := range fields {
for i := 0; i < n; i++ {
f := typ.Field(i)
if f.Tag.Get("boil") == col {
updateMap[col] = value.Field(i).Interface()
}
}
}
slice := DatahistoryjobresultSlice{o}
if rowsAff, err := slice.UpdateAll(ctx, tx, updateMap); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("wanted one record updated but got", rowsAff)
}
}
func testDatahistoryjobresultsUpsert(t *testing.T) {
t.Parallel()
if len(datahistoryjobresultAllColumns) == len(datahistoryjobresultPrimaryKeyColumns) {
t.Skip("Skipping table with only primary key columns")
}
seed := randomize.NewSeed()
var err error
// Attempt the INSERT side of an UPSERT
o := Datahistoryjobresult{}
if err = randomize.Struct(seed, &o, datahistoryjobresultDBTypes, true); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Upsert(ctx, tx, false, nil, boil.Infer(), boil.Infer()); err != nil {
t.Errorf("Unable to upsert Datahistoryjobresult: %s", err)
}
count, err := Datahistoryjobresults().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
// Attempt the UPDATE side of an UPSERT
if err = randomize.Struct(seed, &o, datahistoryjobresultDBTypes, false, datahistoryjobresultPrimaryKeyColumns...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
if err = o.Upsert(ctx, tx, true, nil, boil.Infer(), boil.Infer()); err != nil {
t.Errorf("Unable to upsert Datahistoryjobresult: %s", err)
}
count, err = Datahistoryjobresults().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
}

View File

@@ -51,10 +51,12 @@ var ExchangeWhere = struct {
// ExchangeRels is where relationship names are stored.
var ExchangeRels = struct {
ExchangeNameCandles string
ExchangeNameDatahistoryjobs string
ExchangeNameTrades string
ExchangeNameWithdrawalHistories string
}{
ExchangeNameCandles: "ExchangeNameCandles",
ExchangeNameDatahistoryjobs: "ExchangeNameDatahistoryjobs",
ExchangeNameTrades: "ExchangeNameTrades",
ExchangeNameWithdrawalHistories: "ExchangeNameWithdrawalHistories",
}
@@ -62,6 +64,7 @@ var ExchangeRels = struct {
// exchangeR is where relationships are stored.
type exchangeR struct {
ExchangeNameCandles CandleSlice
ExchangeNameDatahistoryjobs DatahistoryjobSlice
ExchangeNameTrades TradeSlice
ExchangeNameWithdrawalHistories WithdrawalHistorySlice
}
@@ -377,6 +380,27 @@ func (o *Exchange) ExchangeNameCandles(mods ...qm.QueryMod) candleQuery {
return query
}
// ExchangeNameDatahistoryjobs retrieves all the datahistoryjob's Datahistoryjobs with an executor via exchange_name_id column.
func (o *Exchange) ExchangeNameDatahistoryjobs(mods ...qm.QueryMod) datahistoryjobQuery {
var queryMods []qm.QueryMod
if len(mods) != 0 {
queryMods = append(queryMods, mods...)
}
queryMods = append(queryMods,
qm.Where("\"datahistoryjob\".\"exchange_name_id\"=?", o.ID),
)
query := Datahistoryjobs(queryMods...)
queries.SetFrom(query.Query, "\"datahistoryjob\"")
if len(queries.GetSelect(query.Query)) == 0 {
queries.SetSelect(query.Query, []string{"\"datahistoryjob\".*"})
}
return query
}
// ExchangeNameTrades retrieves all the trade's Trades with an executor via exchange_name_id column.
func (o *Exchange) ExchangeNameTrades(mods ...qm.QueryMod) tradeQuery {
var queryMods []qm.QueryMod
@@ -514,6 +538,101 @@ func (exchangeL) LoadExchangeNameCandles(ctx context.Context, e boil.ContextExec
return nil
}
// LoadExchangeNameDatahistoryjobs allows an eager lookup of values, cached into the
// loaded structs of the objects. This is for a 1-M or N-M relationship.
func (exchangeL) LoadExchangeNameDatahistoryjobs(ctx context.Context, e boil.ContextExecutor, singular bool, maybeExchange interface{}, mods queries.Applicator) error {
var slice []*Exchange
var object *Exchange
if singular {
object = maybeExchange.(*Exchange)
} else {
slice = *maybeExchange.(*[]*Exchange)
}
args := make([]interface{}, 0, 1)
if singular {
if object.R == nil {
object.R = &exchangeR{}
}
args = append(args, object.ID)
} else {
Outer:
for _, obj := range slice {
if obj.R == nil {
obj.R = &exchangeR{}
}
for _, a := range args {
if a == obj.ID {
continue Outer
}
}
args = append(args, obj.ID)
}
}
if len(args) == 0 {
return nil
}
query := NewQuery(qm.From(`datahistoryjob`), qm.WhereIn(`datahistoryjob.exchange_name_id in ?`, args...))
if mods != nil {
mods.Apply(query)
}
results, err := query.QueryContext(ctx, e)
if err != nil {
return errors.Wrap(err, "failed to eager load datahistoryjob")
}
var resultSlice []*Datahistoryjob
if err = queries.Bind(results, &resultSlice); err != nil {
return errors.Wrap(err, "failed to bind eager loaded slice datahistoryjob")
}
if err = results.Close(); err != nil {
return errors.Wrap(err, "failed to close results in eager load on datahistoryjob")
}
if err = results.Err(); err != nil {
return errors.Wrap(err, "error occurred during iteration of eager loaded relations for datahistoryjob")
}
if len(datahistoryjobAfterSelectHooks) != 0 {
for _, obj := range resultSlice {
if err := obj.doAfterSelectHooks(ctx, e); err != nil {
return err
}
}
}
if singular {
object.R.ExchangeNameDatahistoryjobs = resultSlice
for _, foreign := range resultSlice {
if foreign.R == nil {
foreign.R = &datahistoryjobR{}
}
foreign.R.ExchangeName = object
}
return nil
}
for _, foreign := range resultSlice {
for _, local := range slice {
if local.ID == foreign.ExchangeNameID {
local.R.ExchangeNameDatahistoryjobs = append(local.R.ExchangeNameDatahistoryjobs, foreign)
if foreign.R == nil {
foreign.R = &datahistoryjobR{}
}
foreign.R.ExchangeName = local
break
}
}
}
return nil
}
// LoadExchangeNameTrades allows an eager lookup of values, cached into the
// loaded structs of the objects. This is for a 1-M or N-M relationship.
func (exchangeL) LoadExchangeNameTrades(ctx context.Context, e boil.ContextExecutor, singular bool, maybeExchange interface{}, mods queries.Applicator) error {
@@ -757,6 +876,59 @@ func (o *Exchange) AddExchangeNameCandles(ctx context.Context, exec boil.Context
return nil
}
// AddExchangeNameDatahistoryjobs adds the given related objects to the existing relationships
// of the exchange, optionally inserting them as new records.
// Appends related to o.R.ExchangeNameDatahistoryjobs.
// Sets related.R.ExchangeName appropriately.
func (o *Exchange) AddExchangeNameDatahistoryjobs(ctx context.Context, exec boil.ContextExecutor, insert bool, related ...*Datahistoryjob) error {
var err error
for _, rel := range related {
if insert {
rel.ExchangeNameID = o.ID
if err = rel.Insert(ctx, exec, boil.Infer()); err != nil {
return errors.Wrap(err, "failed to insert into foreign table")
}
} else {
updateQuery := fmt.Sprintf(
"UPDATE \"datahistoryjob\" SET %s WHERE %s",
strmangle.SetParamNames("\"", "\"", 1, []string{"exchange_name_id"}),
strmangle.WhereClause("\"", "\"", 2, datahistoryjobPrimaryKeyColumns),
)
values := []interface{}{o.ID, rel.ID}
if boil.DebugMode {
fmt.Fprintln(boil.DebugWriter, updateQuery)
fmt.Fprintln(boil.DebugWriter, values)
}
if _, err = exec.ExecContext(ctx, updateQuery, values...); err != nil {
return errors.Wrap(err, "failed to update foreign table")
}
rel.ExchangeNameID = o.ID
}
}
if o.R == nil {
o.R = &exchangeR{
ExchangeNameDatahistoryjobs: related,
}
} else {
o.R.ExchangeNameDatahistoryjobs = append(o.R.ExchangeNameDatahistoryjobs, related...)
}
for _, rel := range related {
if rel.R == nil {
rel.R = &datahistoryjobR{
ExchangeName: o,
}
} else {
rel.R.ExchangeName = o
}
}
return nil
}
// AddExchangeNameTrades adds the given related objects to the existing relationships
// of the exchange, optionally inserting them as new records.
// Appends related to o.R.ExchangeNameTrades.

View File

@@ -572,6 +572,84 @@ func testExchangeToManyExchangeNameCandles(t *testing.T) {
}
}
func testExchangeToManyExchangeNameDatahistoryjobs(t *testing.T) {
var err error
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
var a Exchange
var b, c Datahistoryjob
seed := randomize.NewSeed()
if err = randomize.Struct(seed, &a, exchangeDBTypes, true, exchangeColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Exchange struct: %s", err)
}
if err := a.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
if err = randomize.Struct(seed, &b, datahistoryjobDBTypes, false, datahistoryjobColumnsWithDefault...); err != nil {
t.Fatal(err)
}
if err = randomize.Struct(seed, &c, datahistoryjobDBTypes, false, datahistoryjobColumnsWithDefault...); err != nil {
t.Fatal(err)
}
b.ExchangeNameID = a.ID
c.ExchangeNameID = a.ID
if err = b.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
if err = c.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
check, err := a.ExchangeNameDatahistoryjobs().All(ctx, tx)
if err != nil {
t.Fatal(err)
}
bFound, cFound := false, false
for _, v := range check {
if v.ExchangeNameID == b.ExchangeNameID {
bFound = true
}
if v.ExchangeNameID == c.ExchangeNameID {
cFound = true
}
}
if !bFound {
t.Error("expected to find b")
}
if !cFound {
t.Error("expected to find c")
}
slice := ExchangeSlice{&a}
if err = a.L.LoadExchangeNameDatahistoryjobs(ctx, tx, false, (*[]*Exchange)(&slice), nil); err != nil {
t.Fatal(err)
}
if got := len(a.R.ExchangeNameDatahistoryjobs); got != 2 {
t.Error("number of eager loaded records wrong, got:", got)
}
a.R.ExchangeNameDatahistoryjobs = nil
if err = a.L.LoadExchangeNameDatahistoryjobs(ctx, tx, true, &a, nil); err != nil {
t.Fatal(err)
}
if got := len(a.R.ExchangeNameDatahistoryjobs); got != 2 {
t.Error("number of eager loaded records wrong, got:", got)
}
if t.Failed() {
t.Logf("%#v", check)
}
}
func testExchangeToManyExchangeNameTrades(t *testing.T) {
var err error
ctx := context.Background()
@@ -803,6 +881,81 @@ func testExchangeToManyAddOpExchangeNameCandles(t *testing.T) {
}
}
}
func testExchangeToManyAddOpExchangeNameDatahistoryjobs(t *testing.T) {
var err error
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
var a Exchange
var b, c, d, e Datahistoryjob
seed := randomize.NewSeed()
if err = randomize.Struct(seed, &a, exchangeDBTypes, false, strmangle.SetComplement(exchangePrimaryKeyColumns, exchangeColumnsWithoutDefault)...); err != nil {
t.Fatal(err)
}
foreigners := []*Datahistoryjob{&b, &c, &d, &e}
for _, x := range foreigners {
if err = randomize.Struct(seed, x, datahistoryjobDBTypes, false, strmangle.SetComplement(datahistoryjobPrimaryKeyColumns, datahistoryjobColumnsWithoutDefault)...); err != nil {
t.Fatal(err)
}
}
if err := a.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
if err = b.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
if err = c.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
foreignersSplitByInsertion := [][]*Datahistoryjob{
{&b, &c},
{&d, &e},
}
for i, x := range foreignersSplitByInsertion {
err = a.AddExchangeNameDatahistoryjobs(ctx, tx, i != 0, x...)
if err != nil {
t.Fatal(err)
}
first := x[0]
second := x[1]
if a.ID != first.ExchangeNameID {
t.Error("foreign key was wrong value", a.ID, first.ExchangeNameID)
}
if a.ID != second.ExchangeNameID {
t.Error("foreign key was wrong value", a.ID, second.ExchangeNameID)
}
if first.R.ExchangeName != &a {
t.Error("relationship was not added properly to the foreign slice")
}
if second.R.ExchangeName != &a {
t.Error("relationship was not added properly to the foreign slice")
}
if a.R.ExchangeNameDatahistoryjobs[i*2] != first {
t.Error("relationship struct slice not set to correct value")
}
if a.R.ExchangeNameDatahistoryjobs[i*2+1] != second {
t.Error("relationship struct slice not set to correct value")
}
count, err := a.ExchangeNameDatahistoryjobs().Count(ctx, tx)
if err != nil {
t.Fatal(err)
}
if want := int64((i + 1) * 2); count != want {
t.Error("want", want, "got", count)
}
}
}
func testExchangeToManyAddOpExchangeNameTrades(t *testing.T) {
var err error

View File

@@ -50,29 +50,6 @@ var ScriptExecutionColumns = struct {
// Generated where
type whereHelpernull_String struct{ field string }
func (w whereHelpernull_String) EQ(x null.String) qm.QueryMod {
return qmhelper.WhereNullEQ(w.field, false, x)
}
func (w whereHelpernull_String) NEQ(x null.String) qm.QueryMod {
return qmhelper.WhereNullEQ(w.field, true, x)
}
func (w whereHelpernull_String) IsNull() qm.QueryMod { return qmhelper.WhereIsNull(w.field) }
func (w whereHelpernull_String) IsNotNull() qm.QueryMod { return qmhelper.WhereIsNotNull(w.field) }
func (w whereHelpernull_String) LT(x null.String) qm.QueryMod {
return qmhelper.Where(w.field, qmhelper.LT, x)
}
func (w whereHelpernull_String) LTE(x null.String) qm.QueryMod {
return qmhelper.Where(w.field, qmhelper.LTE, x)
}
func (w whereHelpernull_String) GT(x null.String) qm.QueryMod {
return qmhelper.Where(w.field, qmhelper.GT, x)
}
func (w whereHelpernull_String) GTE(x null.String) qm.QueryMod {
return qmhelper.Where(w.field, qmhelper.GTE, x)
}
var ScriptExecutionWhere = struct {
ID whereHelperstring
ScriptID whereHelpernull_String

View File

@@ -14,6 +14,8 @@ import "testing"
func TestParent(t *testing.T) {
t.Run("AuditEvents", testAuditEvents)
t.Run("Candles", testCandles)
t.Run("Datahistoryjobs", testDatahistoryjobs)
t.Run("Datahistoryjobresults", testDatahistoryjobresults)
t.Run("Exchanges", testExchanges)
t.Run("Scripts", testScripts)
t.Run("ScriptExecutions", testScriptExecutions)
@@ -26,6 +28,8 @@ func TestParent(t *testing.T) {
func TestDelete(t *testing.T) {
t.Run("AuditEvents", testAuditEventsDelete)
t.Run("Candles", testCandlesDelete)
t.Run("Datahistoryjobs", testDatahistoryjobsDelete)
t.Run("Datahistoryjobresults", testDatahistoryjobresultsDelete)
t.Run("Exchanges", testExchangesDelete)
t.Run("Scripts", testScriptsDelete)
t.Run("ScriptExecutions", testScriptExecutionsDelete)
@@ -38,6 +42,8 @@ func TestDelete(t *testing.T) {
func TestQueryDeleteAll(t *testing.T) {
t.Run("AuditEvents", testAuditEventsQueryDeleteAll)
t.Run("Candles", testCandlesQueryDeleteAll)
t.Run("Datahistoryjobs", testDatahistoryjobsQueryDeleteAll)
t.Run("Datahistoryjobresults", testDatahistoryjobresultsQueryDeleteAll)
t.Run("Exchanges", testExchangesQueryDeleteAll)
t.Run("Scripts", testScriptsQueryDeleteAll)
t.Run("ScriptExecutions", testScriptExecutionsQueryDeleteAll)
@@ -50,6 +56,8 @@ func TestQueryDeleteAll(t *testing.T) {
func TestSliceDeleteAll(t *testing.T) {
t.Run("AuditEvents", testAuditEventsSliceDeleteAll)
t.Run("Candles", testCandlesSliceDeleteAll)
t.Run("Datahistoryjobs", testDatahistoryjobsSliceDeleteAll)
t.Run("Datahistoryjobresults", testDatahistoryjobresultsSliceDeleteAll)
t.Run("Exchanges", testExchangesSliceDeleteAll)
t.Run("Scripts", testScriptsSliceDeleteAll)
t.Run("ScriptExecutions", testScriptExecutionsSliceDeleteAll)
@@ -62,6 +70,8 @@ func TestSliceDeleteAll(t *testing.T) {
func TestExists(t *testing.T) {
t.Run("AuditEvents", testAuditEventsExists)
t.Run("Candles", testCandlesExists)
t.Run("Datahistoryjobs", testDatahistoryjobsExists)
t.Run("Datahistoryjobresults", testDatahistoryjobresultsExists)
t.Run("Exchanges", testExchangesExists)
t.Run("Scripts", testScriptsExists)
t.Run("ScriptExecutions", testScriptExecutionsExists)
@@ -74,6 +84,8 @@ func TestExists(t *testing.T) {
func TestFind(t *testing.T) {
t.Run("AuditEvents", testAuditEventsFind)
t.Run("Candles", testCandlesFind)
t.Run("Datahistoryjobs", testDatahistoryjobsFind)
t.Run("Datahistoryjobresults", testDatahistoryjobresultsFind)
t.Run("Exchanges", testExchangesFind)
t.Run("Scripts", testScriptsFind)
t.Run("ScriptExecutions", testScriptExecutionsFind)
@@ -86,6 +98,8 @@ func TestFind(t *testing.T) {
func TestBind(t *testing.T) {
t.Run("AuditEvents", testAuditEventsBind)
t.Run("Candles", testCandlesBind)
t.Run("Datahistoryjobs", testDatahistoryjobsBind)
t.Run("Datahistoryjobresults", testDatahistoryjobresultsBind)
t.Run("Exchanges", testExchangesBind)
t.Run("Scripts", testScriptsBind)
t.Run("ScriptExecutions", testScriptExecutionsBind)
@@ -98,6 +112,8 @@ func TestBind(t *testing.T) {
func TestOne(t *testing.T) {
t.Run("AuditEvents", testAuditEventsOne)
t.Run("Candles", testCandlesOne)
t.Run("Datahistoryjobs", testDatahistoryjobsOne)
t.Run("Datahistoryjobresults", testDatahistoryjobresultsOne)
t.Run("Exchanges", testExchangesOne)
t.Run("Scripts", testScriptsOne)
t.Run("ScriptExecutions", testScriptExecutionsOne)
@@ -110,6 +126,8 @@ func TestOne(t *testing.T) {
func TestAll(t *testing.T) {
t.Run("AuditEvents", testAuditEventsAll)
t.Run("Candles", testCandlesAll)
t.Run("Datahistoryjobs", testDatahistoryjobsAll)
t.Run("Datahistoryjobresults", testDatahistoryjobresultsAll)
t.Run("Exchanges", testExchangesAll)
t.Run("Scripts", testScriptsAll)
t.Run("ScriptExecutions", testScriptExecutionsAll)
@@ -122,6 +140,8 @@ func TestAll(t *testing.T) {
func TestCount(t *testing.T) {
t.Run("AuditEvents", testAuditEventsCount)
t.Run("Candles", testCandlesCount)
t.Run("Datahistoryjobs", testDatahistoryjobsCount)
t.Run("Datahistoryjobresults", testDatahistoryjobresultsCount)
t.Run("Exchanges", testExchangesCount)
t.Run("Scripts", testScriptsCount)
t.Run("ScriptExecutions", testScriptExecutionsCount)
@@ -134,6 +154,8 @@ func TestCount(t *testing.T) {
func TestHooks(t *testing.T) {
t.Run("AuditEvents", testAuditEventsHooks)
t.Run("Candles", testCandlesHooks)
t.Run("Datahistoryjobs", testDatahistoryjobsHooks)
t.Run("Datahistoryjobresults", testDatahistoryjobresultsHooks)
t.Run("Exchanges", testExchangesHooks)
t.Run("Scripts", testScriptsHooks)
t.Run("ScriptExecutions", testScriptExecutionsHooks)
@@ -148,6 +170,10 @@ func TestInsert(t *testing.T) {
t.Run("AuditEvents", testAuditEventsInsertWhitelist)
t.Run("Candles", testCandlesInsert)
t.Run("Candles", testCandlesInsertWhitelist)
t.Run("Datahistoryjobs", testDatahistoryjobsInsert)
t.Run("Datahistoryjobs", testDatahistoryjobsInsertWhitelist)
t.Run("Datahistoryjobresults", testDatahistoryjobresultsInsert)
t.Run("Datahistoryjobresults", testDatahistoryjobresultsInsertWhitelist)
t.Run("Exchanges", testExchangesInsert)
t.Run("Exchanges", testExchangesInsertWhitelist)
t.Run("Scripts", testScriptsInsert)
@@ -168,6 +194,8 @@ func TestInsert(t *testing.T) {
// or deadlocks can occur.
func TestToOne(t *testing.T) {
t.Run("CandleToExchangeUsingExchangeName", testCandleToOneExchangeUsingExchangeName)
t.Run("DatahistoryjobToExchangeUsingExchangeName", testDatahistoryjobToOneExchangeUsingExchangeName)
t.Run("DatahistoryjobresultToDatahistoryjobUsingJob", testDatahistoryjobresultToOneDatahistoryjobUsingJob)
t.Run("ScriptExecutionToScriptUsingScript", testScriptExecutionToOneScriptUsingScript)
t.Run("TradeToExchangeUsingExchangeName", testTradeToOneExchangeUsingExchangeName)
t.Run("WithdrawalCryptoToWithdrawalHistoryUsingWithdrawalHistory", testWithdrawalCryptoToOneWithdrawalHistoryUsingWithdrawalHistory)
@@ -185,6 +213,8 @@ func TestOneToOne(t *testing.T) {
// TestToMany tests cannot be run in parallel
// or deadlocks can occur.
func TestToMany(t *testing.T) {
t.Run("DatahistoryjobToJobDatahistoryjobresults", testDatahistoryjobToManyJobDatahistoryjobresults)
t.Run("ExchangeToExchangeNameDatahistoryjobs", testExchangeToManyExchangeNameDatahistoryjobs)
t.Run("ExchangeToExchangeNameWithdrawalHistories", testExchangeToManyExchangeNameWithdrawalHistories)
t.Run("ScriptToScriptExecutions", testScriptToManyScriptExecutions)
t.Run("WithdrawalHistoryToWithdrawalCryptos", testWithdrawalHistoryToManyWithdrawalCryptos)
@@ -195,6 +225,8 @@ func TestToMany(t *testing.T) {
// or deadlocks can occur.
func TestToOneSet(t *testing.T) {
t.Run("CandleToExchangeUsingExchangeNameCandle", testCandleToOneSetOpExchangeUsingExchangeName)
t.Run("DatahistoryjobToExchangeUsingExchangeNameDatahistoryjobs", testDatahistoryjobToOneSetOpExchangeUsingExchangeName)
t.Run("DatahistoryjobresultToDatahistoryjobUsingJobDatahistoryjobresults", testDatahistoryjobresultToOneSetOpDatahistoryjobUsingJob)
t.Run("ScriptExecutionToScriptUsingScriptExecutions", testScriptExecutionToOneSetOpScriptUsingScript)
t.Run("TradeToExchangeUsingExchangeNameTrade", testTradeToOneSetOpExchangeUsingExchangeName)
t.Run("WithdrawalCryptoToWithdrawalHistoryUsingWithdrawalCryptos", testWithdrawalCryptoToOneSetOpWithdrawalHistoryUsingWithdrawalHistory)
@@ -220,6 +252,8 @@ func TestOneToOneRemove(t *testing.T) {}
// TestToManyAdd tests cannot be run in parallel
// or deadlocks can occur.
func TestToManyAdd(t *testing.T) {
t.Run("DatahistoryjobToJobDatahistoryjobresults", testDatahistoryjobToManyAddOpJobDatahistoryjobresults)
t.Run("ExchangeToExchangeNameDatahistoryjobs", testExchangeToManyAddOpExchangeNameDatahistoryjobs)
t.Run("ExchangeToExchangeNameWithdrawalHistories", testExchangeToManyAddOpExchangeNameWithdrawalHistories)
t.Run("ScriptToScriptExecutions", testScriptToManyAddOpScriptExecutions)
t.Run("WithdrawalHistoryToWithdrawalCryptos", testWithdrawalHistoryToManyAddOpWithdrawalCryptos)
@@ -237,6 +271,8 @@ func TestToManyRemove(t *testing.T) {}
func TestReload(t *testing.T) {
t.Run("AuditEvents", testAuditEventsReload)
t.Run("Candles", testCandlesReload)
t.Run("Datahistoryjobs", testDatahistoryjobsReload)
t.Run("Datahistoryjobresults", testDatahistoryjobresultsReload)
t.Run("Exchanges", testExchangesReload)
t.Run("Scripts", testScriptsReload)
t.Run("ScriptExecutions", testScriptExecutionsReload)
@@ -249,6 +285,8 @@ func TestReload(t *testing.T) {
func TestReloadAll(t *testing.T) {
t.Run("AuditEvents", testAuditEventsReloadAll)
t.Run("Candles", testCandlesReloadAll)
t.Run("Datahistoryjobs", testDatahistoryjobsReloadAll)
t.Run("Datahistoryjobresults", testDatahistoryjobresultsReloadAll)
t.Run("Exchanges", testExchangesReloadAll)
t.Run("Scripts", testScriptsReloadAll)
t.Run("ScriptExecutions", testScriptExecutionsReloadAll)
@@ -261,6 +299,8 @@ func TestReloadAll(t *testing.T) {
func TestSelect(t *testing.T) {
t.Run("AuditEvents", testAuditEventsSelect)
t.Run("Candles", testCandlesSelect)
t.Run("Datahistoryjobs", testDatahistoryjobsSelect)
t.Run("Datahistoryjobresults", testDatahistoryjobresultsSelect)
t.Run("Exchanges", testExchangesSelect)
t.Run("Scripts", testScriptsSelect)
t.Run("ScriptExecutions", testScriptExecutionsSelect)
@@ -273,6 +313,8 @@ func TestSelect(t *testing.T) {
func TestUpdate(t *testing.T) {
t.Run("AuditEvents", testAuditEventsUpdate)
t.Run("Candles", testCandlesUpdate)
t.Run("Datahistoryjobs", testDatahistoryjobsUpdate)
t.Run("Datahistoryjobresults", testDatahistoryjobresultsUpdate)
t.Run("Exchanges", testExchangesUpdate)
t.Run("Scripts", testScriptsUpdate)
t.Run("ScriptExecutions", testScriptExecutionsUpdate)
@@ -285,6 +327,8 @@ func TestUpdate(t *testing.T) {
func TestSliceUpdateAll(t *testing.T) {
t.Run("AuditEvents", testAuditEventsSliceUpdateAll)
t.Run("Candles", testCandlesSliceUpdateAll)
t.Run("Datahistoryjobs", testDatahistoryjobsSliceUpdateAll)
t.Run("Datahistoryjobresults", testDatahistoryjobresultsSliceUpdateAll)
t.Run("Exchanges", testExchangesSliceUpdateAll)
t.Run("Scripts", testScriptsSliceUpdateAll)
t.Run("ScriptExecutions", testScriptExecutionsSliceUpdateAll)

View File

@@ -4,25 +4,29 @@
package sqlite3
var TableNames = struct {
AuditEvent string
Candle string
Exchange string
GooseDBVersion string
Script string
ScriptExecution string
Trade string
WithdrawalCrypto string
WithdrawalFiat string
WithdrawalHistory string
AuditEvent string
Candle string
Datahistoryjob string
Datahistoryjobresult string
Exchange string
GooseDBVersion string
Script string
ScriptExecution string
Trade string
WithdrawalCrypto string
WithdrawalFiat string
WithdrawalHistory string
}{
AuditEvent: "audit_event",
Candle: "candle",
Exchange: "exchange",
GooseDBVersion: "goose_db_version",
Script: "script",
ScriptExecution: "script_execution",
Trade: "trade",
WithdrawalCrypto: "withdrawal_crypto",
WithdrawalFiat: "withdrawal_fiat",
WithdrawalHistory: "withdrawal_history",
AuditEvent: "audit_event",
Candle: "candle",
Datahistoryjob: "datahistoryjob",
Datahistoryjobresult: "datahistoryjobresult",
Exchange: "exchange",
GooseDBVersion: "goose_db_version",
Script: "script",
ScriptExecution: "script_execution",
Trade: "trade",
WithdrawalCrypto: "withdrawal_crypto",
WithdrawalFiat: "withdrawal_fiat",
WithdrawalHistory: "withdrawal_history",
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,946 @@
// Code generated by SQLBoiler 3.5.0-gct (https://github.com/thrasher-corp/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package sqlite3
import (
"bytes"
"context"
"reflect"
"testing"
"github.com/thrasher-corp/sqlboiler/boil"
"github.com/thrasher-corp/sqlboiler/queries"
"github.com/thrasher-corp/sqlboiler/randomize"
"github.com/thrasher-corp/sqlboiler/strmangle"
)
var (
// Relationships sometimes use the reflection helper queries.Equal/queries.Assign
// so force a package dependency in case they don't.
_ = queries.Equal
)
func testDatahistoryjobs(t *testing.T) {
t.Parallel()
query := Datahistoryjobs()
if query.Query == nil {
t.Error("expected a query, got nothing")
}
}
func testDatahistoryjobsDelete(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if rowsAff, err := o.Delete(ctx, tx); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only have deleted one row, but affected:", rowsAff)
}
count, err := Datahistoryjobs().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 0 {
t.Error("want zero records, got:", count)
}
}
func testDatahistoryjobsQueryDeleteAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if rowsAff, err := Datahistoryjobs().DeleteAll(ctx, tx); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only have deleted one row, but affected:", rowsAff)
}
count, err := Datahistoryjobs().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 0 {
t.Error("want zero records, got:", count)
}
}
func testDatahistoryjobsSliceDeleteAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice := DatahistoryjobSlice{o}
if rowsAff, err := slice.DeleteAll(ctx, tx); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only have deleted one row, but affected:", rowsAff)
}
count, err := Datahistoryjobs().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 0 {
t.Error("want zero records, got:", count)
}
}
func testDatahistoryjobsExists(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
e, err := DatahistoryjobExists(ctx, tx, o.ID)
if err != nil {
t.Errorf("Unable to check if Datahistoryjob exists: %s", err)
}
if !e {
t.Errorf("Expected DatahistoryjobExists to return true, but got false.")
}
}
func testDatahistoryjobsFind(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
datahistoryjobFound, err := FindDatahistoryjob(ctx, tx, o.ID)
if err != nil {
t.Error(err)
}
if datahistoryjobFound == nil {
t.Error("want a record, got nil")
}
}
func testDatahistoryjobsBind(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = Datahistoryjobs().Bind(ctx, tx, o); err != nil {
t.Error(err)
}
}
func testDatahistoryjobsOne(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if x, err := Datahistoryjobs().One(ctx, tx); err != nil {
t.Error(err)
} else if x == nil {
t.Error("expected to get a non nil record")
}
}
func testDatahistoryjobsAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
datahistoryjobOne := &Datahistoryjob{}
datahistoryjobTwo := &Datahistoryjob{}
if err = randomize.Struct(seed, datahistoryjobOne, datahistoryjobDBTypes, false, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
if err = randomize.Struct(seed, datahistoryjobTwo, datahistoryjobDBTypes, false, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = datahistoryjobOne.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = datahistoryjobTwo.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice, err := Datahistoryjobs().All(ctx, tx)
if err != nil {
t.Error(err)
}
if len(slice) != 2 {
t.Error("want 2 records, got:", len(slice))
}
}
func testDatahistoryjobsCount(t *testing.T) {
t.Parallel()
var err error
seed := randomize.NewSeed()
datahistoryjobOne := &Datahistoryjob{}
datahistoryjobTwo := &Datahistoryjob{}
if err = randomize.Struct(seed, datahistoryjobOne, datahistoryjobDBTypes, false, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
if err = randomize.Struct(seed, datahistoryjobTwo, datahistoryjobDBTypes, false, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = datahistoryjobOne.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = datahistoryjobTwo.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := Datahistoryjobs().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 2 {
t.Error("want 2 records, got:", count)
}
}
func datahistoryjobBeforeInsertHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjob) error {
*o = Datahistoryjob{}
return nil
}
func datahistoryjobAfterInsertHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjob) error {
*o = Datahistoryjob{}
return nil
}
func datahistoryjobAfterSelectHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjob) error {
*o = Datahistoryjob{}
return nil
}
func datahistoryjobBeforeUpdateHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjob) error {
*o = Datahistoryjob{}
return nil
}
func datahistoryjobAfterUpdateHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjob) error {
*o = Datahistoryjob{}
return nil
}
func datahistoryjobBeforeDeleteHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjob) error {
*o = Datahistoryjob{}
return nil
}
func datahistoryjobAfterDeleteHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjob) error {
*o = Datahistoryjob{}
return nil
}
func datahistoryjobBeforeUpsertHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjob) error {
*o = Datahistoryjob{}
return nil
}
func datahistoryjobAfterUpsertHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjob) error {
*o = Datahistoryjob{}
return nil
}
func testDatahistoryjobsHooks(t *testing.T) {
t.Parallel()
var err error
ctx := context.Background()
empty := &Datahistoryjob{}
o := &Datahistoryjob{}
seed := randomize.NewSeed()
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, false); err != nil {
t.Errorf("Unable to randomize Datahistoryjob object: %s", err)
}
AddDatahistoryjobHook(boil.BeforeInsertHook, datahistoryjobBeforeInsertHook)
if err = o.doBeforeInsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeInsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeInsertHook function to empty object, but got: %#v", o)
}
datahistoryjobBeforeInsertHooks = []DatahistoryjobHook{}
AddDatahistoryjobHook(boil.AfterInsertHook, datahistoryjobAfterInsertHook)
if err = o.doAfterInsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterInsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterInsertHook function to empty object, but got: %#v", o)
}
datahistoryjobAfterInsertHooks = []DatahistoryjobHook{}
AddDatahistoryjobHook(boil.AfterSelectHook, datahistoryjobAfterSelectHook)
if err = o.doAfterSelectHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterSelectHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterSelectHook function to empty object, but got: %#v", o)
}
datahistoryjobAfterSelectHooks = []DatahistoryjobHook{}
AddDatahistoryjobHook(boil.BeforeUpdateHook, datahistoryjobBeforeUpdateHook)
if err = o.doBeforeUpdateHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeUpdateHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeUpdateHook function to empty object, but got: %#v", o)
}
datahistoryjobBeforeUpdateHooks = []DatahistoryjobHook{}
AddDatahistoryjobHook(boil.AfterUpdateHook, datahistoryjobAfterUpdateHook)
if err = o.doAfterUpdateHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterUpdateHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterUpdateHook function to empty object, but got: %#v", o)
}
datahistoryjobAfterUpdateHooks = []DatahistoryjobHook{}
AddDatahistoryjobHook(boil.BeforeDeleteHook, datahistoryjobBeforeDeleteHook)
if err = o.doBeforeDeleteHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeDeleteHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeDeleteHook function to empty object, but got: %#v", o)
}
datahistoryjobBeforeDeleteHooks = []DatahistoryjobHook{}
AddDatahistoryjobHook(boil.AfterDeleteHook, datahistoryjobAfterDeleteHook)
if err = o.doAfterDeleteHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterDeleteHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterDeleteHook function to empty object, but got: %#v", o)
}
datahistoryjobAfterDeleteHooks = []DatahistoryjobHook{}
AddDatahistoryjobHook(boil.BeforeUpsertHook, datahistoryjobBeforeUpsertHook)
if err = o.doBeforeUpsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeUpsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeUpsertHook function to empty object, but got: %#v", o)
}
datahistoryjobBeforeUpsertHooks = []DatahistoryjobHook{}
AddDatahistoryjobHook(boil.AfterUpsertHook, datahistoryjobAfterUpsertHook)
if err = o.doAfterUpsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterUpsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterUpsertHook function to empty object, but got: %#v", o)
}
datahistoryjobAfterUpsertHooks = []DatahistoryjobHook{}
}
func testDatahistoryjobsInsert(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := Datahistoryjobs().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
}
func testDatahistoryjobsInsertWhitelist(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Whitelist(datahistoryjobColumnsWithoutDefault...)); err != nil {
t.Error(err)
}
count, err := Datahistoryjobs().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
}
func testDatahistoryjobToManyJobDatahistoryjobresults(t *testing.T) {
var err error
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
var a Datahistoryjob
var b, c Datahistoryjobresult
seed := randomize.NewSeed()
if err = randomize.Struct(seed, &a, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
if err := a.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
if err = randomize.Struct(seed, &b, datahistoryjobresultDBTypes, false, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Fatal(err)
}
if err = randomize.Struct(seed, &c, datahistoryjobresultDBTypes, false, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Fatal(err)
}
b.JobID = a.ID
c.JobID = a.ID
if err = b.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
if err = c.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
check, err := a.JobDatahistoryjobresults().All(ctx, tx)
if err != nil {
t.Fatal(err)
}
bFound, cFound := false, false
for _, v := range check {
if v.JobID == b.JobID {
bFound = true
}
if v.JobID == c.JobID {
cFound = true
}
}
if !bFound {
t.Error("expected to find b")
}
if !cFound {
t.Error("expected to find c")
}
slice := DatahistoryjobSlice{&a}
if err = a.L.LoadJobDatahistoryjobresults(ctx, tx, false, (*[]*Datahistoryjob)(&slice), nil); err != nil {
t.Fatal(err)
}
if got := len(a.R.JobDatahistoryjobresults); got != 2 {
t.Error("number of eager loaded records wrong, got:", got)
}
a.R.JobDatahistoryjobresults = nil
if err = a.L.LoadJobDatahistoryjobresults(ctx, tx, true, &a, nil); err != nil {
t.Fatal(err)
}
if got := len(a.R.JobDatahistoryjobresults); got != 2 {
t.Error("number of eager loaded records wrong, got:", got)
}
if t.Failed() {
t.Logf("%#v", check)
}
}
func testDatahistoryjobToManyAddOpJobDatahistoryjobresults(t *testing.T) {
var err error
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
var a Datahistoryjob
var b, c, d, e Datahistoryjobresult
seed := randomize.NewSeed()
if err = randomize.Struct(seed, &a, datahistoryjobDBTypes, false, strmangle.SetComplement(datahistoryjobPrimaryKeyColumns, datahistoryjobColumnsWithoutDefault)...); err != nil {
t.Fatal(err)
}
foreigners := []*Datahistoryjobresult{&b, &c, &d, &e}
for _, x := range foreigners {
if err = randomize.Struct(seed, x, datahistoryjobresultDBTypes, false, strmangle.SetComplement(datahistoryjobresultPrimaryKeyColumns, datahistoryjobresultColumnsWithoutDefault)...); err != nil {
t.Fatal(err)
}
}
if err := a.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
if err = b.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
if err = c.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
foreignersSplitByInsertion := [][]*Datahistoryjobresult{
{&b, &c},
{&d, &e},
}
for i, x := range foreignersSplitByInsertion {
err = a.AddJobDatahistoryjobresults(ctx, tx, i != 0, x...)
if err != nil {
t.Fatal(err)
}
first := x[0]
second := x[1]
if a.ID != first.JobID {
t.Error("foreign key was wrong value", a.ID, first.JobID)
}
if a.ID != second.JobID {
t.Error("foreign key was wrong value", a.ID, second.JobID)
}
if first.R.Job != &a {
t.Error("relationship was not added properly to the foreign slice")
}
if second.R.Job != &a {
t.Error("relationship was not added properly to the foreign slice")
}
if a.R.JobDatahistoryjobresults[i*2] != first {
t.Error("relationship struct slice not set to correct value")
}
if a.R.JobDatahistoryjobresults[i*2+1] != second {
t.Error("relationship struct slice not set to correct value")
}
count, err := a.JobDatahistoryjobresults().Count(ctx, tx)
if err != nil {
t.Fatal(err)
}
if want := int64((i + 1) * 2); count != want {
t.Error("want", want, "got", count)
}
}
}
func testDatahistoryjobToOneExchangeUsingExchangeName(t *testing.T) {
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
var local Datahistoryjob
var foreign Exchange
seed := randomize.NewSeed()
if err := randomize.Struct(seed, &local, datahistoryjobDBTypes, false, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
if err := randomize.Struct(seed, &foreign, exchangeDBTypes, false, exchangeColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Exchange struct: %s", err)
}
if err := foreign.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
local.ExchangeNameID = foreign.ID
if err := local.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
check, err := local.ExchangeName().One(ctx, tx)
if err != nil {
t.Fatal(err)
}
if check.ID != foreign.ID {
t.Errorf("want: %v, got %v", foreign.ID, check.ID)
}
slice := DatahistoryjobSlice{&local}
if err = local.L.LoadExchangeName(ctx, tx, false, (*[]*Datahistoryjob)(&slice), nil); err != nil {
t.Fatal(err)
}
if local.R.ExchangeName == nil {
t.Error("struct should have been eager loaded")
}
local.R.ExchangeName = nil
if err = local.L.LoadExchangeName(ctx, tx, true, &local, nil); err != nil {
t.Fatal(err)
}
if local.R.ExchangeName == nil {
t.Error("struct should have been eager loaded")
}
}
func testDatahistoryjobToOneSetOpExchangeUsingExchangeName(t *testing.T) {
var err error
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
var a Datahistoryjob
var b, c Exchange
seed := randomize.NewSeed()
if err = randomize.Struct(seed, &a, datahistoryjobDBTypes, false, strmangle.SetComplement(datahistoryjobPrimaryKeyColumns, datahistoryjobColumnsWithoutDefault)...); err != nil {
t.Fatal(err)
}
if err = randomize.Struct(seed, &b, exchangeDBTypes, false, strmangle.SetComplement(exchangePrimaryKeyColumns, exchangeColumnsWithoutDefault)...); err != nil {
t.Fatal(err)
}
if err = randomize.Struct(seed, &c, exchangeDBTypes, false, strmangle.SetComplement(exchangePrimaryKeyColumns, exchangeColumnsWithoutDefault)...); err != nil {
t.Fatal(err)
}
if err := a.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
if err = b.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
for i, x := range []*Exchange{&b, &c} {
err = a.SetExchangeName(ctx, tx, i != 0, x)
if err != nil {
t.Fatal(err)
}
if a.R.ExchangeName != x {
t.Error("relationship struct not set to correct value")
}
if x.R.ExchangeNameDatahistoryjobs[0] != &a {
t.Error("failed to append to foreign relationship struct")
}
if a.ExchangeNameID != x.ID {
t.Error("foreign key was wrong value", a.ExchangeNameID)
}
zero := reflect.Zero(reflect.TypeOf(a.ExchangeNameID))
reflect.Indirect(reflect.ValueOf(&a.ExchangeNameID)).Set(zero)
if err = a.Reload(ctx, tx); err != nil {
t.Fatal("failed to reload", err)
}
if a.ExchangeNameID != x.ID {
t.Error("foreign key was wrong value", a.ExchangeNameID, x.ID)
}
}
}
func testDatahistoryjobsReload(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = o.Reload(ctx, tx); err != nil {
t.Error(err)
}
}
func testDatahistoryjobsReloadAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice := DatahistoryjobSlice{o}
if err = slice.ReloadAll(ctx, tx); err != nil {
t.Error(err)
}
}
func testDatahistoryjobsSelect(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice, err := Datahistoryjobs().All(ctx, tx)
if err != nil {
t.Error(err)
}
if len(slice) != 1 {
t.Error("want one record, got:", len(slice))
}
}
var (
datahistoryjobDBTypes = map[string]string{`ID`: `TEXT`, `Nickname`: `TEXT`, `ExchangeNameID`: `TEXT`, `Asset`: `TEXT`, `Base`: `TEXT`, `Quote`: `TEXT`, `StartTime`: `TIMESTAMP`, `EndTime`: `TIMESTAMP`, `Interval`: `REAL`, `DataType`: `REAL`, `RequestSize`: `REAL`, `MaxRetries`: `REAL`, `BatchCount`: `REAL`, `Status`: `REAL`, `Created`: `TIMESTAMP`}
_ = bytes.MinRead
)
func testDatahistoryjobsUpdate(t *testing.T) {
t.Parallel()
if 0 == len(datahistoryjobPrimaryKeyColumns) {
t.Skip("Skipping table with no primary key columns")
}
if len(datahistoryjobAllColumns) == len(datahistoryjobPrimaryKeyColumns) {
t.Skip("Skipping table with only primary key columns")
}
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := Datahistoryjobs().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobPrimaryKeyColumns...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
if rowsAff, err := o.Update(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only affect one row but affected", rowsAff)
}
}
func testDatahistoryjobsSliceUpdateAll(t *testing.T) {
t.Parallel()
if len(datahistoryjobAllColumns) == len(datahistoryjobPrimaryKeyColumns) {
t.Skip("Skipping table with only primary key columns")
}
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := Datahistoryjobs().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobPrimaryKeyColumns...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
// Remove Primary keys and unique columns from what we plan to update
var fields []string
if strmangle.StringSliceMatch(datahistoryjobAllColumns, datahistoryjobPrimaryKeyColumns) {
fields = datahistoryjobAllColumns
} else {
fields = strmangle.SetComplement(
datahistoryjobAllColumns,
datahistoryjobPrimaryKeyColumns,
)
}
value := reflect.Indirect(reflect.ValueOf(o))
typ := reflect.TypeOf(o).Elem()
n := typ.NumField()
updateMap := M{}
for _, col := range fields {
for i := 0; i < n; i++ {
f := typ.Field(i)
if f.Tag.Get("boil") == col {
updateMap[col] = value.Field(i).Interface()
}
}
}
slice := DatahistoryjobSlice{o}
if rowsAff, err := slice.UpdateAll(ctx, tx, updateMap); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("wanted one record updated but got", rowsAff)
}
}

View File

@@ -0,0 +1,980 @@
// Code generated by SQLBoiler 3.5.0-gct (https://github.com/thrasher-corp/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package sqlite3
import (
"context"
"database/sql"
"fmt"
"reflect"
"strings"
"sync"
"time"
"github.com/pkg/errors"
"github.com/thrasher-corp/sqlboiler/boil"
"github.com/thrasher-corp/sqlboiler/queries"
"github.com/thrasher-corp/sqlboiler/queries/qm"
"github.com/thrasher-corp/sqlboiler/queries/qmhelper"
"github.com/thrasher-corp/sqlboiler/strmangle"
"github.com/volatiletech/null"
)
// Datahistoryjobresult is an object representing the database table.
type Datahistoryjobresult struct {
ID string `boil:"id" json:"id" toml:"id" yaml:"id"`
JobID string `boil:"job_id" json:"job_id" toml:"job_id" yaml:"job_id"`
Result null.String `boil:"result" json:"result,omitempty" toml:"result" yaml:"result,omitempty"`
Status float64 `boil:"status" json:"status" toml:"status" yaml:"status"`
IntervalStartTime string `boil:"interval_start_time" json:"interval_start_time" toml:"interval_start_time" yaml:"interval_start_time"`
IntervalEndTime string `boil:"interval_end_time" json:"interval_end_time" toml:"interval_end_time" yaml:"interval_end_time"`
RunTime string `boil:"run_time" json:"run_time" toml:"run_time" yaml:"run_time"`
R *datahistoryjobresultR `boil:"-" json:"-" toml:"-" yaml:"-"`
L datahistoryjobresultL `boil:"-" json:"-" toml:"-" yaml:"-"`
}
var DatahistoryjobresultColumns = struct {
ID string
JobID string
Result string
Status string
IntervalStartTime string
IntervalEndTime string
RunTime string
}{
ID: "id",
JobID: "job_id",
Result: "result",
Status: "status",
IntervalStartTime: "interval_start_time",
IntervalEndTime: "interval_end_time",
RunTime: "run_time",
}
// Generated where
type whereHelpernull_String struct{ field string }
func (w whereHelpernull_String) EQ(x null.String) qm.QueryMod {
return qmhelper.WhereNullEQ(w.field, false, x)
}
func (w whereHelpernull_String) NEQ(x null.String) qm.QueryMod {
return qmhelper.WhereNullEQ(w.field, true, x)
}
func (w whereHelpernull_String) IsNull() qm.QueryMod { return qmhelper.WhereIsNull(w.field) }
func (w whereHelpernull_String) IsNotNull() qm.QueryMod { return qmhelper.WhereIsNotNull(w.field) }
func (w whereHelpernull_String) LT(x null.String) qm.QueryMod {
return qmhelper.Where(w.field, qmhelper.LT, x)
}
func (w whereHelpernull_String) LTE(x null.String) qm.QueryMod {
return qmhelper.Where(w.field, qmhelper.LTE, x)
}
func (w whereHelpernull_String) GT(x null.String) qm.QueryMod {
return qmhelper.Where(w.field, qmhelper.GT, x)
}
func (w whereHelpernull_String) GTE(x null.String) qm.QueryMod {
return qmhelper.Where(w.field, qmhelper.GTE, x)
}
var DatahistoryjobresultWhere = struct {
ID whereHelperstring
JobID whereHelperstring
Result whereHelpernull_String
Status whereHelperfloat64
IntervalStartTime whereHelperstring
IntervalEndTime whereHelperstring
RunTime whereHelperstring
}{
ID: whereHelperstring{field: "\"datahistoryjobresult\".\"id\""},
JobID: whereHelperstring{field: "\"datahistoryjobresult\".\"job_id\""},
Result: whereHelpernull_String{field: "\"datahistoryjobresult\".\"result\""},
Status: whereHelperfloat64{field: "\"datahistoryjobresult\".\"status\""},
IntervalStartTime: whereHelperstring{field: "\"datahistoryjobresult\".\"interval_start_time\""},
IntervalEndTime: whereHelperstring{field: "\"datahistoryjobresult\".\"interval_end_time\""},
RunTime: whereHelperstring{field: "\"datahistoryjobresult\".\"run_time\""},
}
// DatahistoryjobresultRels is where relationship names are stored.
var DatahistoryjobresultRels = struct {
Job string
}{
Job: "Job",
}
// datahistoryjobresultR is where relationships are stored.
type datahistoryjobresultR struct {
Job *Datahistoryjob
}
// NewStruct creates a new relationship struct
func (*datahistoryjobresultR) NewStruct() *datahistoryjobresultR {
return &datahistoryjobresultR{}
}
// datahistoryjobresultL is where Load methods for each relationship are stored.
type datahistoryjobresultL struct{}
var (
datahistoryjobresultAllColumns = []string{"id", "job_id", "result", "status", "interval_start_time", "interval_end_time", "run_time"}
datahistoryjobresultColumnsWithoutDefault = []string{"id", "job_id", "result", "status", "interval_start_time", "interval_end_time"}
datahistoryjobresultColumnsWithDefault = []string{"run_time"}
datahistoryjobresultPrimaryKeyColumns = []string{"id"}
)
type (
// DatahistoryjobresultSlice is an alias for a slice of pointers to Datahistoryjobresult.
// This should generally be used opposed to []Datahistoryjobresult.
DatahistoryjobresultSlice []*Datahistoryjobresult
// DatahistoryjobresultHook is the signature for custom Datahistoryjobresult hook methods
DatahistoryjobresultHook func(context.Context, boil.ContextExecutor, *Datahistoryjobresult) error
datahistoryjobresultQuery struct {
*queries.Query
}
)
// Cache for insert, update and upsert
var (
datahistoryjobresultType = reflect.TypeOf(&Datahistoryjobresult{})
datahistoryjobresultMapping = queries.MakeStructMapping(datahistoryjobresultType)
datahistoryjobresultPrimaryKeyMapping, _ = queries.BindMapping(datahistoryjobresultType, datahistoryjobresultMapping, datahistoryjobresultPrimaryKeyColumns)
datahistoryjobresultInsertCacheMut sync.RWMutex
datahistoryjobresultInsertCache = make(map[string]insertCache)
datahistoryjobresultUpdateCacheMut sync.RWMutex
datahistoryjobresultUpdateCache = make(map[string]updateCache)
datahistoryjobresultUpsertCacheMut sync.RWMutex
datahistoryjobresultUpsertCache = make(map[string]insertCache)
)
var (
// Force time package dependency for automated UpdatedAt/CreatedAt.
_ = time.Second
// Force qmhelper dependency for where clause generation (which doesn't
// always happen)
_ = qmhelper.Where
)
var datahistoryjobresultBeforeInsertHooks []DatahistoryjobresultHook
var datahistoryjobresultBeforeUpdateHooks []DatahistoryjobresultHook
var datahistoryjobresultBeforeDeleteHooks []DatahistoryjobresultHook
var datahistoryjobresultBeforeUpsertHooks []DatahistoryjobresultHook
var datahistoryjobresultAfterInsertHooks []DatahistoryjobresultHook
var datahistoryjobresultAfterSelectHooks []DatahistoryjobresultHook
var datahistoryjobresultAfterUpdateHooks []DatahistoryjobresultHook
var datahistoryjobresultAfterDeleteHooks []DatahistoryjobresultHook
var datahistoryjobresultAfterUpsertHooks []DatahistoryjobresultHook
// doBeforeInsertHooks executes all "before insert" hooks.
func (o *Datahistoryjobresult) doBeforeInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range datahistoryjobresultBeforeInsertHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doBeforeUpdateHooks executes all "before Update" hooks.
func (o *Datahistoryjobresult) doBeforeUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range datahistoryjobresultBeforeUpdateHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doBeforeDeleteHooks executes all "before Delete" hooks.
func (o *Datahistoryjobresult) doBeforeDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range datahistoryjobresultBeforeDeleteHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doBeforeUpsertHooks executes all "before Upsert" hooks.
func (o *Datahistoryjobresult) doBeforeUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range datahistoryjobresultBeforeUpsertHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doAfterInsertHooks executes all "after Insert" hooks.
func (o *Datahistoryjobresult) doAfterInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range datahistoryjobresultAfterInsertHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doAfterSelectHooks executes all "after Select" hooks.
func (o *Datahistoryjobresult) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range datahistoryjobresultAfterSelectHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doAfterUpdateHooks executes all "after Update" hooks.
func (o *Datahistoryjobresult) doAfterUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range datahistoryjobresultAfterUpdateHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doAfterDeleteHooks executes all "after Delete" hooks.
func (o *Datahistoryjobresult) doAfterDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range datahistoryjobresultAfterDeleteHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doAfterUpsertHooks executes all "after Upsert" hooks.
func (o *Datahistoryjobresult) doAfterUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range datahistoryjobresultAfterUpsertHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// AddDatahistoryjobresultHook registers your hook function for all future operations.
func AddDatahistoryjobresultHook(hookPoint boil.HookPoint, datahistoryjobresultHook DatahistoryjobresultHook) {
switch hookPoint {
case boil.BeforeInsertHook:
datahistoryjobresultBeforeInsertHooks = append(datahistoryjobresultBeforeInsertHooks, datahistoryjobresultHook)
case boil.BeforeUpdateHook:
datahistoryjobresultBeforeUpdateHooks = append(datahistoryjobresultBeforeUpdateHooks, datahistoryjobresultHook)
case boil.BeforeDeleteHook:
datahistoryjobresultBeforeDeleteHooks = append(datahistoryjobresultBeforeDeleteHooks, datahistoryjobresultHook)
case boil.BeforeUpsertHook:
datahistoryjobresultBeforeUpsertHooks = append(datahistoryjobresultBeforeUpsertHooks, datahistoryjobresultHook)
case boil.AfterInsertHook:
datahistoryjobresultAfterInsertHooks = append(datahistoryjobresultAfterInsertHooks, datahistoryjobresultHook)
case boil.AfterSelectHook:
datahistoryjobresultAfterSelectHooks = append(datahistoryjobresultAfterSelectHooks, datahistoryjobresultHook)
case boil.AfterUpdateHook:
datahistoryjobresultAfterUpdateHooks = append(datahistoryjobresultAfterUpdateHooks, datahistoryjobresultHook)
case boil.AfterDeleteHook:
datahistoryjobresultAfterDeleteHooks = append(datahistoryjobresultAfterDeleteHooks, datahistoryjobresultHook)
case boil.AfterUpsertHook:
datahistoryjobresultAfterUpsertHooks = append(datahistoryjobresultAfterUpsertHooks, datahistoryjobresultHook)
}
}
// One returns a single datahistoryjobresult record from the query.
func (q datahistoryjobresultQuery) One(ctx context.Context, exec boil.ContextExecutor) (*Datahistoryjobresult, error) {
o := &Datahistoryjobresult{}
queries.SetLimit(q.Query, 1)
err := q.Bind(ctx, exec, o)
if err != nil {
if errors.Cause(err) == sql.ErrNoRows {
return nil, sql.ErrNoRows
}
return nil, errors.Wrap(err, "sqlite3: failed to execute a one query for datahistoryjobresult")
}
if err := o.doAfterSelectHooks(ctx, exec); err != nil {
return o, err
}
return o, nil
}
// All returns all Datahistoryjobresult records from the query.
func (q datahistoryjobresultQuery) All(ctx context.Context, exec boil.ContextExecutor) (DatahistoryjobresultSlice, error) {
var o []*Datahistoryjobresult
err := q.Bind(ctx, exec, &o)
if err != nil {
return nil, errors.Wrap(err, "sqlite3: failed to assign all query results to Datahistoryjobresult slice")
}
if len(datahistoryjobresultAfterSelectHooks) != 0 {
for _, obj := range o {
if err := obj.doAfterSelectHooks(ctx, exec); err != nil {
return o, err
}
}
}
return o, nil
}
// Count returns the count of all Datahistoryjobresult records in the query.
func (q datahistoryjobresultQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
var count int64
queries.SetSelect(q.Query, nil)
queries.SetCount(q.Query)
err := q.Query.QueryRowContext(ctx, exec).Scan(&count)
if err != nil {
return 0, errors.Wrap(err, "sqlite3: failed to count datahistoryjobresult rows")
}
return count, nil
}
// Exists checks if the row exists in the table.
func (q datahistoryjobresultQuery) Exists(ctx context.Context, exec boil.ContextExecutor) (bool, error) {
var count int64
queries.SetSelect(q.Query, nil)
queries.SetCount(q.Query)
queries.SetLimit(q.Query, 1)
err := q.Query.QueryRowContext(ctx, exec).Scan(&count)
if err != nil {
return false, errors.Wrap(err, "sqlite3: failed to check if datahistoryjobresult exists")
}
return count > 0, nil
}
// Job pointed to by the foreign key.
func (o *Datahistoryjobresult) Job(mods ...qm.QueryMod) datahistoryjobQuery {
queryMods := []qm.QueryMod{
qm.Where("\"id\" = ?", o.JobID),
}
queryMods = append(queryMods, mods...)
query := Datahistoryjobs(queryMods...)
queries.SetFrom(query.Query, "\"datahistoryjob\"")
return query
}
// LoadJob allows an eager lookup of values, cached into the
// loaded structs of the objects. This is for an N-1 relationship.
func (datahistoryjobresultL) LoadJob(ctx context.Context, e boil.ContextExecutor, singular bool, maybeDatahistoryjobresult interface{}, mods queries.Applicator) error {
var slice []*Datahistoryjobresult
var object *Datahistoryjobresult
if singular {
object = maybeDatahistoryjobresult.(*Datahistoryjobresult)
} else {
slice = *maybeDatahistoryjobresult.(*[]*Datahistoryjobresult)
}
args := make([]interface{}, 0, 1)
if singular {
if object.R == nil {
object.R = &datahistoryjobresultR{}
}
args = append(args, object.JobID)
} else {
Outer:
for _, obj := range slice {
if obj.R == nil {
obj.R = &datahistoryjobresultR{}
}
for _, a := range args {
if a == obj.JobID {
continue Outer
}
}
args = append(args, obj.JobID)
}
}
if len(args) == 0 {
return nil
}
query := NewQuery(qm.From(`datahistoryjob`), qm.WhereIn(`datahistoryjob.id in ?`, args...))
if mods != nil {
mods.Apply(query)
}
results, err := query.QueryContext(ctx, e)
if err != nil {
return errors.Wrap(err, "failed to eager load Datahistoryjob")
}
var resultSlice []*Datahistoryjob
if err = queries.Bind(results, &resultSlice); err != nil {
return errors.Wrap(err, "failed to bind eager loaded slice Datahistoryjob")
}
if err = results.Close(); err != nil {
return errors.Wrap(err, "failed to close results of eager load for datahistoryjob")
}
if err = results.Err(); err != nil {
return errors.Wrap(err, "error occurred during iteration of eager loaded relations for datahistoryjob")
}
if len(datahistoryjobresultAfterSelectHooks) != 0 {
for _, obj := range resultSlice {
if err := obj.doAfterSelectHooks(ctx, e); err != nil {
return err
}
}
}
if len(resultSlice) == 0 {
return nil
}
if singular {
foreign := resultSlice[0]
object.R.Job = foreign
if foreign.R == nil {
foreign.R = &datahistoryjobR{}
}
foreign.R.JobDatahistoryjobresults = append(foreign.R.JobDatahistoryjobresults, object)
return nil
}
for _, local := range slice {
for _, foreign := range resultSlice {
if local.JobID == foreign.ID {
local.R.Job = foreign
if foreign.R == nil {
foreign.R = &datahistoryjobR{}
}
foreign.R.JobDatahistoryjobresults = append(foreign.R.JobDatahistoryjobresults, local)
break
}
}
}
return nil
}
// SetJob of the datahistoryjobresult to the related item.
// Sets o.R.Job to related.
// Adds o to related.R.JobDatahistoryjobresults.
func (o *Datahistoryjobresult) SetJob(ctx context.Context, exec boil.ContextExecutor, insert bool, related *Datahistoryjob) error {
var err error
if insert {
if err = related.Insert(ctx, exec, boil.Infer()); err != nil {
return errors.Wrap(err, "failed to insert into foreign table")
}
}
updateQuery := fmt.Sprintf(
"UPDATE \"datahistoryjobresult\" SET %s WHERE %s",
strmangle.SetParamNames("\"", "\"", 0, []string{"job_id"}),
strmangle.WhereClause("\"", "\"", 0, datahistoryjobresultPrimaryKeyColumns),
)
values := []interface{}{related.ID, o.ID}
if boil.DebugMode {
fmt.Fprintln(boil.DebugWriter, updateQuery)
fmt.Fprintln(boil.DebugWriter, values)
}
if _, err = exec.ExecContext(ctx, updateQuery, values...); err != nil {
return errors.Wrap(err, "failed to update local table")
}
o.JobID = related.ID
if o.R == nil {
o.R = &datahistoryjobresultR{
Job: related,
}
} else {
o.R.Job = related
}
if related.R == nil {
related.R = &datahistoryjobR{
JobDatahistoryjobresults: DatahistoryjobresultSlice{o},
}
} else {
related.R.JobDatahistoryjobresults = append(related.R.JobDatahistoryjobresults, o)
}
return nil
}
// Datahistoryjobresults retrieves all the records using an executor.
func Datahistoryjobresults(mods ...qm.QueryMod) datahistoryjobresultQuery {
mods = append(mods, qm.From("\"datahistoryjobresult\""))
return datahistoryjobresultQuery{NewQuery(mods...)}
}
// FindDatahistoryjobresult retrieves a single record by ID with an executor.
// If selectCols is empty Find will return all columns.
func FindDatahistoryjobresult(ctx context.Context, exec boil.ContextExecutor, iD string, selectCols ...string) (*Datahistoryjobresult, error) {
datahistoryjobresultObj := &Datahistoryjobresult{}
sel := "*"
if len(selectCols) > 0 {
sel = strings.Join(strmangle.IdentQuoteSlice(dialect.LQ, dialect.RQ, selectCols), ",")
}
query := fmt.Sprintf(
"select %s from \"datahistoryjobresult\" where \"id\"=?", sel,
)
q := queries.Raw(query, iD)
err := q.Bind(ctx, exec, datahistoryjobresultObj)
if err != nil {
if errors.Cause(err) == sql.ErrNoRows {
return nil, sql.ErrNoRows
}
return nil, errors.Wrap(err, "sqlite3: unable to select from datahistoryjobresult")
}
return datahistoryjobresultObj, nil
}
// Insert a single record using an executor.
// See boil.Columns.InsertColumnSet documentation to understand column list inference for inserts.
func (o *Datahistoryjobresult) Insert(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) error {
if o == nil {
return errors.New("sqlite3: no datahistoryjobresult provided for insertion")
}
var err error
if err := o.doBeforeInsertHooks(ctx, exec); err != nil {
return err
}
nzDefaults := queries.NonZeroDefaultSet(datahistoryjobresultColumnsWithDefault, o)
key := makeCacheKey(columns, nzDefaults)
datahistoryjobresultInsertCacheMut.RLock()
cache, cached := datahistoryjobresultInsertCache[key]
datahistoryjobresultInsertCacheMut.RUnlock()
if !cached {
wl, returnColumns := columns.InsertColumnSet(
datahistoryjobresultAllColumns,
datahistoryjobresultColumnsWithDefault,
datahistoryjobresultColumnsWithoutDefault,
nzDefaults,
)
cache.valueMapping, err = queries.BindMapping(datahistoryjobresultType, datahistoryjobresultMapping, wl)
if err != nil {
return err
}
cache.retMapping, err = queries.BindMapping(datahistoryjobresultType, datahistoryjobresultMapping, returnColumns)
if err != nil {
return err
}
if len(wl) != 0 {
cache.query = fmt.Sprintf("INSERT INTO \"datahistoryjobresult\" (\"%s\") %%sVALUES (%s)%%s", strings.Join(wl, "\",\""), strmangle.Placeholders(dialect.UseIndexPlaceholders, len(wl), 1, 1))
} else {
cache.query = "INSERT INTO \"datahistoryjobresult\" () VALUES ()%s%s"
}
var queryOutput, queryReturning string
if len(cache.retMapping) != 0 {
cache.retQuery = fmt.Sprintf("SELECT \"%s\" FROM \"datahistoryjobresult\" WHERE %s", strings.Join(returnColumns, "\",\""), strmangle.WhereClause("\"", "\"", 0, datahistoryjobresultPrimaryKeyColumns))
}
cache.query = fmt.Sprintf(cache.query, queryOutput, queryReturning)
}
value := reflect.Indirect(reflect.ValueOf(o))
vals := queries.ValuesFromMapping(value, cache.valueMapping)
if boil.DebugMode {
fmt.Fprintln(boil.DebugWriter, cache.query)
fmt.Fprintln(boil.DebugWriter, vals)
}
_, err = exec.ExecContext(ctx, cache.query, vals...)
if err != nil {
return errors.Wrap(err, "sqlite3: unable to insert into datahistoryjobresult")
}
var identifierCols []interface{}
if len(cache.retMapping) == 0 {
goto CacheNoHooks
}
identifierCols = []interface{}{
o.ID,
}
if boil.DebugMode {
fmt.Fprintln(boil.DebugWriter, cache.retQuery)
fmt.Fprintln(boil.DebugWriter, identifierCols...)
}
err = exec.QueryRowContext(ctx, cache.retQuery, identifierCols...).Scan(queries.PtrsFromMapping(value, cache.retMapping)...)
if err != nil {
return errors.Wrap(err, "sqlite3: unable to populate default values for datahistoryjobresult")
}
CacheNoHooks:
if !cached {
datahistoryjobresultInsertCacheMut.Lock()
datahistoryjobresultInsertCache[key] = cache
datahistoryjobresultInsertCacheMut.Unlock()
}
return o.doAfterInsertHooks(ctx, exec)
}
// Update uses an executor to update the Datahistoryjobresult.
// See boil.Columns.UpdateColumnSet documentation to understand column list inference for updates.
// Update does not automatically update the record in case of default values. Use .Reload() to refresh the records.
func (o *Datahistoryjobresult) Update(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) (int64, error) {
var err error
if err = o.doBeforeUpdateHooks(ctx, exec); err != nil {
return 0, err
}
key := makeCacheKey(columns, nil)
datahistoryjobresultUpdateCacheMut.RLock()
cache, cached := datahistoryjobresultUpdateCache[key]
datahistoryjobresultUpdateCacheMut.RUnlock()
if !cached {
wl := columns.UpdateColumnSet(
datahistoryjobresultAllColumns,
datahistoryjobresultPrimaryKeyColumns,
)
if len(wl) == 0 {
return 0, errors.New("sqlite3: unable to update datahistoryjobresult, could not build whitelist")
}
cache.query = fmt.Sprintf("UPDATE \"datahistoryjobresult\" SET %s WHERE %s",
strmangle.SetParamNames("\"", "\"", 0, wl),
strmangle.WhereClause("\"", "\"", 0, datahistoryjobresultPrimaryKeyColumns),
)
cache.valueMapping, err = queries.BindMapping(datahistoryjobresultType, datahistoryjobresultMapping, append(wl, datahistoryjobresultPrimaryKeyColumns...))
if err != nil {
return 0, err
}
}
values := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping)
if boil.DebugMode {
fmt.Fprintln(boil.DebugWriter, cache.query)
fmt.Fprintln(boil.DebugWriter, values)
}
var result sql.Result
result, err = exec.ExecContext(ctx, cache.query, values...)
if err != nil {
return 0, errors.Wrap(err, "sqlite3: unable to update datahistoryjobresult row")
}
rowsAff, err := result.RowsAffected()
if err != nil {
return 0, errors.Wrap(err, "sqlite3: failed to get rows affected by update for datahistoryjobresult")
}
if !cached {
datahistoryjobresultUpdateCacheMut.Lock()
datahistoryjobresultUpdateCache[key] = cache
datahistoryjobresultUpdateCacheMut.Unlock()
}
return rowsAff, o.doAfterUpdateHooks(ctx, exec)
}
// UpdateAll updates all rows with the specified column values.
func (q datahistoryjobresultQuery) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) {
queries.SetUpdate(q.Query, cols)
result, err := q.Query.ExecContext(ctx, exec)
if err != nil {
return 0, errors.Wrap(err, "sqlite3: unable to update all for datahistoryjobresult")
}
rowsAff, err := result.RowsAffected()
if err != nil {
return 0, errors.Wrap(err, "sqlite3: unable to retrieve rows affected for datahistoryjobresult")
}
return rowsAff, nil
}
// UpdateAll updates all rows with the specified column values, using an executor.
func (o DatahistoryjobresultSlice) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) {
ln := int64(len(o))
if ln == 0 {
return 0, nil
}
if len(cols) == 0 {
return 0, errors.New("sqlite3: update all requires at least one column argument")
}
colNames := make([]string, len(cols))
args := make([]interface{}, len(cols))
i := 0
for name, value := range cols {
colNames[i] = name
args[i] = value
i++
}
// Append all of the primary key values for each column
for _, obj := range o {
pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), datahistoryjobresultPrimaryKeyMapping)
args = append(args, pkeyArgs...)
}
sql := fmt.Sprintf("UPDATE \"datahistoryjobresult\" SET %s WHERE %s",
strmangle.SetParamNames("\"", "\"", 0, colNames),
strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, datahistoryjobresultPrimaryKeyColumns, len(o)))
if boil.DebugMode {
fmt.Fprintln(boil.DebugWriter, sql)
fmt.Fprintln(boil.DebugWriter, args...)
}
result, err := exec.ExecContext(ctx, sql, args...)
if err != nil {
return 0, errors.Wrap(err, "sqlite3: unable to update all in datahistoryjobresult slice")
}
rowsAff, err := result.RowsAffected()
if err != nil {
return 0, errors.Wrap(err, "sqlite3: unable to retrieve rows affected all in update all datahistoryjobresult")
}
return rowsAff, nil
}
// Delete deletes a single Datahistoryjobresult record with an executor.
// Delete will match against the primary key column to find the record to delete.
func (o *Datahistoryjobresult) Delete(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
if o == nil {
return 0, errors.New("sqlite3: no Datahistoryjobresult provided for delete")
}
if err := o.doBeforeDeleteHooks(ctx, exec); err != nil {
return 0, err
}
args := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), datahistoryjobresultPrimaryKeyMapping)
sql := "DELETE FROM \"datahistoryjobresult\" WHERE \"id\"=?"
if boil.DebugMode {
fmt.Fprintln(boil.DebugWriter, sql)
fmt.Fprintln(boil.DebugWriter, args...)
}
result, err := exec.ExecContext(ctx, sql, args...)
if err != nil {
return 0, errors.Wrap(err, "sqlite3: unable to delete from datahistoryjobresult")
}
rowsAff, err := result.RowsAffected()
if err != nil {
return 0, errors.Wrap(err, "sqlite3: failed to get rows affected by delete for datahistoryjobresult")
}
if err := o.doAfterDeleteHooks(ctx, exec); err != nil {
return 0, err
}
return rowsAff, nil
}
// DeleteAll deletes all matching rows.
func (q datahistoryjobresultQuery) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
if q.Query == nil {
return 0, errors.New("sqlite3: no datahistoryjobresultQuery provided for delete all")
}
queries.SetDelete(q.Query)
result, err := q.Query.ExecContext(ctx, exec)
if err != nil {
return 0, errors.Wrap(err, "sqlite3: unable to delete all from datahistoryjobresult")
}
rowsAff, err := result.RowsAffected()
if err != nil {
return 0, errors.Wrap(err, "sqlite3: failed to get rows affected by deleteall for datahistoryjobresult")
}
return rowsAff, nil
}
// DeleteAll deletes all rows in the slice, using an executor.
func (o DatahistoryjobresultSlice) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
if len(o) == 0 {
return 0, nil
}
if len(datahistoryjobresultBeforeDeleteHooks) != 0 {
for _, obj := range o {
if err := obj.doBeforeDeleteHooks(ctx, exec); err != nil {
return 0, err
}
}
}
var args []interface{}
for _, obj := range o {
pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), datahistoryjobresultPrimaryKeyMapping)
args = append(args, pkeyArgs...)
}
sql := "DELETE FROM \"datahistoryjobresult\" WHERE " +
strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, datahistoryjobresultPrimaryKeyColumns, len(o))
if boil.DebugMode {
fmt.Fprintln(boil.DebugWriter, sql)
fmt.Fprintln(boil.DebugWriter, args)
}
result, err := exec.ExecContext(ctx, sql, args...)
if err != nil {
return 0, errors.Wrap(err, "sqlite3: unable to delete all from datahistoryjobresult slice")
}
rowsAff, err := result.RowsAffected()
if err != nil {
return 0, errors.Wrap(err, "sqlite3: failed to get rows affected by deleteall for datahistoryjobresult")
}
if len(datahistoryjobresultAfterDeleteHooks) != 0 {
for _, obj := range o {
if err := obj.doAfterDeleteHooks(ctx, exec); err != nil {
return 0, err
}
}
}
return rowsAff, nil
}
// Reload refetches the object from the database
// using the primary keys with an executor.
func (o *Datahistoryjobresult) Reload(ctx context.Context, exec boil.ContextExecutor) error {
ret, err := FindDatahistoryjobresult(ctx, exec, o.ID)
if err != nil {
return err
}
*o = *ret
return nil
}
// ReloadAll refetches every row with matching primary key column values
// and overwrites the original object slice with the newly updated slice.
func (o *DatahistoryjobresultSlice) ReloadAll(ctx context.Context, exec boil.ContextExecutor) error {
if o == nil || len(*o) == 0 {
return nil
}
slice := DatahistoryjobresultSlice{}
var args []interface{}
for _, obj := range *o {
pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), datahistoryjobresultPrimaryKeyMapping)
args = append(args, pkeyArgs...)
}
sql := "SELECT \"datahistoryjobresult\".* FROM \"datahistoryjobresult\" WHERE " +
strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, datahistoryjobresultPrimaryKeyColumns, len(*o))
q := queries.Raw(sql, args...)
err := q.Bind(ctx, exec, &slice)
if err != nil {
return errors.Wrap(err, "sqlite3: unable to reload all in DatahistoryjobresultSlice")
}
*o = slice
return nil
}
// DatahistoryjobresultExists checks if the Datahistoryjobresult row exists.
func DatahistoryjobresultExists(ctx context.Context, exec boil.ContextExecutor, iD string) (bool, error) {
var exists bool
sql := "select exists(select 1 from \"datahistoryjobresult\" where \"id\"=? limit 1)"
if boil.DebugMode {
fmt.Fprintln(boil.DebugWriter, sql)
fmt.Fprintln(boil.DebugWriter, iD)
}
row := exec.QueryRowContext(ctx, sql, iD)
err := row.Scan(&exists)
if err != nil {
return false, errors.Wrap(err, "sqlite3: unable to check if datahistoryjobresult exists")
}
return exists, nil
}

View File

@@ -0,0 +1,793 @@
// Code generated by SQLBoiler 3.5.0-gct (https://github.com/thrasher-corp/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package sqlite3
import (
"bytes"
"context"
"reflect"
"testing"
"github.com/thrasher-corp/sqlboiler/boil"
"github.com/thrasher-corp/sqlboiler/queries"
"github.com/thrasher-corp/sqlboiler/randomize"
"github.com/thrasher-corp/sqlboiler/strmangle"
)
var (
// Relationships sometimes use the reflection helper queries.Equal/queries.Assign
// so force a package dependency in case they don't.
_ = queries.Equal
)
func testDatahistoryjobresults(t *testing.T) {
t.Parallel()
query := Datahistoryjobresults()
if query.Query == nil {
t.Error("expected a query, got nothing")
}
}
func testDatahistoryjobresultsDelete(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if rowsAff, err := o.Delete(ctx, tx); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only have deleted one row, but affected:", rowsAff)
}
count, err := Datahistoryjobresults().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 0 {
t.Error("want zero records, got:", count)
}
}
func testDatahistoryjobresultsQueryDeleteAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if rowsAff, err := Datahistoryjobresults().DeleteAll(ctx, tx); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only have deleted one row, but affected:", rowsAff)
}
count, err := Datahistoryjobresults().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 0 {
t.Error("want zero records, got:", count)
}
}
func testDatahistoryjobresultsSliceDeleteAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice := DatahistoryjobresultSlice{o}
if rowsAff, err := slice.DeleteAll(ctx, tx); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only have deleted one row, but affected:", rowsAff)
}
count, err := Datahistoryjobresults().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 0 {
t.Error("want zero records, got:", count)
}
}
func testDatahistoryjobresultsExists(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
e, err := DatahistoryjobresultExists(ctx, tx, o.ID)
if err != nil {
t.Errorf("Unable to check if Datahistoryjobresult exists: %s", err)
}
if !e {
t.Errorf("Expected DatahistoryjobresultExists to return true, but got false.")
}
}
func testDatahistoryjobresultsFind(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
datahistoryjobresultFound, err := FindDatahistoryjobresult(ctx, tx, o.ID)
if err != nil {
t.Error(err)
}
if datahistoryjobresultFound == nil {
t.Error("want a record, got nil")
}
}
func testDatahistoryjobresultsBind(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = Datahistoryjobresults().Bind(ctx, tx, o); err != nil {
t.Error(err)
}
}
func testDatahistoryjobresultsOne(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if x, err := Datahistoryjobresults().One(ctx, tx); err != nil {
t.Error(err)
} else if x == nil {
t.Error("expected to get a non nil record")
}
}
func testDatahistoryjobresultsAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
datahistoryjobresultOne := &Datahistoryjobresult{}
datahistoryjobresultTwo := &Datahistoryjobresult{}
if err = randomize.Struct(seed, datahistoryjobresultOne, datahistoryjobresultDBTypes, false, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
if err = randomize.Struct(seed, datahistoryjobresultTwo, datahistoryjobresultDBTypes, false, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = datahistoryjobresultOne.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = datahistoryjobresultTwo.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice, err := Datahistoryjobresults().All(ctx, tx)
if err != nil {
t.Error(err)
}
if len(slice) != 2 {
t.Error("want 2 records, got:", len(slice))
}
}
func testDatahistoryjobresultsCount(t *testing.T) {
t.Parallel()
var err error
seed := randomize.NewSeed()
datahistoryjobresultOne := &Datahistoryjobresult{}
datahistoryjobresultTwo := &Datahistoryjobresult{}
if err = randomize.Struct(seed, datahistoryjobresultOne, datahistoryjobresultDBTypes, false, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
if err = randomize.Struct(seed, datahistoryjobresultTwo, datahistoryjobresultDBTypes, false, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = datahistoryjobresultOne.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = datahistoryjobresultTwo.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := Datahistoryjobresults().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 2 {
t.Error("want 2 records, got:", count)
}
}
func datahistoryjobresultBeforeInsertHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjobresult) error {
*o = Datahistoryjobresult{}
return nil
}
func datahistoryjobresultAfterInsertHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjobresult) error {
*o = Datahistoryjobresult{}
return nil
}
func datahistoryjobresultAfterSelectHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjobresult) error {
*o = Datahistoryjobresult{}
return nil
}
func datahistoryjobresultBeforeUpdateHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjobresult) error {
*o = Datahistoryjobresult{}
return nil
}
func datahistoryjobresultAfterUpdateHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjobresult) error {
*o = Datahistoryjobresult{}
return nil
}
func datahistoryjobresultBeforeDeleteHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjobresult) error {
*o = Datahistoryjobresult{}
return nil
}
func datahistoryjobresultAfterDeleteHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjobresult) error {
*o = Datahistoryjobresult{}
return nil
}
func datahistoryjobresultBeforeUpsertHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjobresult) error {
*o = Datahistoryjobresult{}
return nil
}
func datahistoryjobresultAfterUpsertHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjobresult) error {
*o = Datahistoryjobresult{}
return nil
}
func testDatahistoryjobresultsHooks(t *testing.T) {
t.Parallel()
var err error
ctx := context.Background()
empty := &Datahistoryjobresult{}
o := &Datahistoryjobresult{}
seed := randomize.NewSeed()
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, false); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult object: %s", err)
}
AddDatahistoryjobresultHook(boil.BeforeInsertHook, datahistoryjobresultBeforeInsertHook)
if err = o.doBeforeInsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeInsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeInsertHook function to empty object, but got: %#v", o)
}
datahistoryjobresultBeforeInsertHooks = []DatahistoryjobresultHook{}
AddDatahistoryjobresultHook(boil.AfterInsertHook, datahistoryjobresultAfterInsertHook)
if err = o.doAfterInsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterInsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterInsertHook function to empty object, but got: %#v", o)
}
datahistoryjobresultAfterInsertHooks = []DatahistoryjobresultHook{}
AddDatahistoryjobresultHook(boil.AfterSelectHook, datahistoryjobresultAfterSelectHook)
if err = o.doAfterSelectHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterSelectHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterSelectHook function to empty object, but got: %#v", o)
}
datahistoryjobresultAfterSelectHooks = []DatahistoryjobresultHook{}
AddDatahistoryjobresultHook(boil.BeforeUpdateHook, datahistoryjobresultBeforeUpdateHook)
if err = o.doBeforeUpdateHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeUpdateHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeUpdateHook function to empty object, but got: %#v", o)
}
datahistoryjobresultBeforeUpdateHooks = []DatahistoryjobresultHook{}
AddDatahistoryjobresultHook(boil.AfterUpdateHook, datahistoryjobresultAfterUpdateHook)
if err = o.doAfterUpdateHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterUpdateHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterUpdateHook function to empty object, but got: %#v", o)
}
datahistoryjobresultAfterUpdateHooks = []DatahistoryjobresultHook{}
AddDatahistoryjobresultHook(boil.BeforeDeleteHook, datahistoryjobresultBeforeDeleteHook)
if err = o.doBeforeDeleteHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeDeleteHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeDeleteHook function to empty object, but got: %#v", o)
}
datahistoryjobresultBeforeDeleteHooks = []DatahistoryjobresultHook{}
AddDatahistoryjobresultHook(boil.AfterDeleteHook, datahistoryjobresultAfterDeleteHook)
if err = o.doAfterDeleteHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterDeleteHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterDeleteHook function to empty object, but got: %#v", o)
}
datahistoryjobresultAfterDeleteHooks = []DatahistoryjobresultHook{}
AddDatahistoryjobresultHook(boil.BeforeUpsertHook, datahistoryjobresultBeforeUpsertHook)
if err = o.doBeforeUpsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeUpsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeUpsertHook function to empty object, but got: %#v", o)
}
datahistoryjobresultBeforeUpsertHooks = []DatahistoryjobresultHook{}
AddDatahistoryjobresultHook(boil.AfterUpsertHook, datahistoryjobresultAfterUpsertHook)
if err = o.doAfterUpsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterUpsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterUpsertHook function to empty object, but got: %#v", o)
}
datahistoryjobresultAfterUpsertHooks = []DatahistoryjobresultHook{}
}
func testDatahistoryjobresultsInsert(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := Datahistoryjobresults().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
}
func testDatahistoryjobresultsInsertWhitelist(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Whitelist(datahistoryjobresultColumnsWithoutDefault...)); err != nil {
t.Error(err)
}
count, err := Datahistoryjobresults().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
}
func testDatahistoryjobresultToOneDatahistoryjobUsingJob(t *testing.T) {
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
var local Datahistoryjobresult
var foreign Datahistoryjob
seed := randomize.NewSeed()
if err := randomize.Struct(seed, &local, datahistoryjobresultDBTypes, false, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
if err := randomize.Struct(seed, &foreign, datahistoryjobDBTypes, false, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
if err := foreign.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
local.JobID = foreign.ID
if err := local.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
check, err := local.Job().One(ctx, tx)
if err != nil {
t.Fatal(err)
}
if check.ID != foreign.ID {
t.Errorf("want: %v, got %v", foreign.ID, check.ID)
}
slice := DatahistoryjobresultSlice{&local}
if err = local.L.LoadJob(ctx, tx, false, (*[]*Datahistoryjobresult)(&slice), nil); err != nil {
t.Fatal(err)
}
if local.R.Job == nil {
t.Error("struct should have been eager loaded")
}
local.R.Job = nil
if err = local.L.LoadJob(ctx, tx, true, &local, nil); err != nil {
t.Fatal(err)
}
if local.R.Job == nil {
t.Error("struct should have been eager loaded")
}
}
func testDatahistoryjobresultToOneSetOpDatahistoryjobUsingJob(t *testing.T) {
var err error
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
var a Datahistoryjobresult
var b, c Datahistoryjob
seed := randomize.NewSeed()
if err = randomize.Struct(seed, &a, datahistoryjobresultDBTypes, false, strmangle.SetComplement(datahistoryjobresultPrimaryKeyColumns, datahistoryjobresultColumnsWithoutDefault)...); err != nil {
t.Fatal(err)
}
if err = randomize.Struct(seed, &b, datahistoryjobDBTypes, false, strmangle.SetComplement(datahistoryjobPrimaryKeyColumns, datahistoryjobColumnsWithoutDefault)...); err != nil {
t.Fatal(err)
}
if err = randomize.Struct(seed, &c, datahistoryjobDBTypes, false, strmangle.SetComplement(datahistoryjobPrimaryKeyColumns, datahistoryjobColumnsWithoutDefault)...); err != nil {
t.Fatal(err)
}
if err := a.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
if err = b.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
for i, x := range []*Datahistoryjob{&b, &c} {
err = a.SetJob(ctx, tx, i != 0, x)
if err != nil {
t.Fatal(err)
}
if a.R.Job != x {
t.Error("relationship struct not set to correct value")
}
if x.R.JobDatahistoryjobresults[0] != &a {
t.Error("failed to append to foreign relationship struct")
}
if a.JobID != x.ID {
t.Error("foreign key was wrong value", a.JobID)
}
zero := reflect.Zero(reflect.TypeOf(a.JobID))
reflect.Indirect(reflect.ValueOf(&a.JobID)).Set(zero)
if err = a.Reload(ctx, tx); err != nil {
t.Fatal("failed to reload", err)
}
if a.JobID != x.ID {
t.Error("foreign key was wrong value", a.JobID, x.ID)
}
}
}
func testDatahistoryjobresultsReload(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = o.Reload(ctx, tx); err != nil {
t.Error(err)
}
}
func testDatahistoryjobresultsReloadAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice := DatahistoryjobresultSlice{o}
if err = slice.ReloadAll(ctx, tx); err != nil {
t.Error(err)
}
}
func testDatahistoryjobresultsSelect(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice, err := Datahistoryjobresults().All(ctx, tx)
if err != nil {
t.Error(err)
}
if len(slice) != 1 {
t.Error("want one record, got:", len(slice))
}
}
var (
datahistoryjobresultDBTypes = map[string]string{`ID`: `TEXT`, `JobID`: `TEXT`, `Result`: `TEXT`, `Status`: `REAL`, `IntervalStartTime`: `TIMESTAMP`, `IntervalEndTime`: `TIMESTAMP`, `RunTime`: `TIMESTAMP`}
_ = bytes.MinRead
)
func testDatahistoryjobresultsUpdate(t *testing.T) {
t.Parallel()
if 0 == len(datahistoryjobresultPrimaryKeyColumns) {
t.Skip("Skipping table with no primary key columns")
}
if len(datahistoryjobresultAllColumns) == len(datahistoryjobresultPrimaryKeyColumns) {
t.Skip("Skipping table with only primary key columns")
}
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := Datahistoryjobresults().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultPrimaryKeyColumns...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
if rowsAff, err := o.Update(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only affect one row but affected", rowsAff)
}
}
func testDatahistoryjobresultsSliceUpdateAll(t *testing.T) {
t.Parallel()
if len(datahistoryjobresultAllColumns) == len(datahistoryjobresultPrimaryKeyColumns) {
t.Skip("Skipping table with only primary key columns")
}
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := Datahistoryjobresults().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultPrimaryKeyColumns...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
// Remove Primary keys and unique columns from what we plan to update
var fields []string
if strmangle.StringSliceMatch(datahistoryjobresultAllColumns, datahistoryjobresultPrimaryKeyColumns) {
fields = datahistoryjobresultAllColumns
} else {
fields = strmangle.SetComplement(
datahistoryjobresultAllColumns,
datahistoryjobresultPrimaryKeyColumns,
)
}
value := reflect.Indirect(reflect.ValueOf(o))
typ := reflect.TypeOf(o).Elem()
n := typ.NumField()
updateMap := M{}
for _, col := range fields {
for i := 0; i < n; i++ {
f := typ.Field(i)
if f.Tag.Get("boil") == col {
updateMap[col] = value.Field(i).Interface()
}
}
}
slice := DatahistoryjobresultSlice{o}
if rowsAff, err := slice.UpdateAll(ctx, tx, updateMap); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("wanted one record updated but got", rowsAff)
}
}

View File

@@ -51,10 +51,12 @@ var ExchangeWhere = struct {
var ExchangeRels = struct {
ExchangeNameCandle string
ExchangeNameTrade string
ExchangeNameDatahistoryjobs string
ExchangeNameWithdrawalHistories string
}{
ExchangeNameCandle: "ExchangeNameCandle",
ExchangeNameTrade: "ExchangeNameTrade",
ExchangeNameDatahistoryjobs: "ExchangeNameDatahistoryjobs",
ExchangeNameWithdrawalHistories: "ExchangeNameWithdrawalHistories",
}
@@ -62,6 +64,7 @@ var ExchangeRels = struct {
type exchangeR struct {
ExchangeNameCandle *Candle
ExchangeNameTrade *Trade
ExchangeNameDatahistoryjobs DatahistoryjobSlice
ExchangeNameWithdrawalHistories WithdrawalHistorySlice
}
@@ -383,6 +386,27 @@ func (o *Exchange) ExchangeNameTrade(mods ...qm.QueryMod) tradeQuery {
return query
}
// ExchangeNameDatahistoryjobs retrieves all the datahistoryjob's Datahistoryjobs with an executor via exchange_name_id column.
func (o *Exchange) ExchangeNameDatahistoryjobs(mods ...qm.QueryMod) datahistoryjobQuery {
var queryMods []qm.QueryMod
if len(mods) != 0 {
queryMods = append(queryMods, mods...)
}
queryMods = append(queryMods,
qm.Where("\"datahistoryjob\".\"exchange_name_id\"=?", o.ID),
)
query := Datahistoryjobs(queryMods...)
queries.SetFrom(query.Query, "\"datahistoryjob\"")
if len(queries.GetSelect(query.Query)) == 0 {
queries.SetSelect(query.Query, []string{"\"datahistoryjob\".*"})
}
return query
}
// ExchangeNameWithdrawalHistories retrieves all the withdrawal_history's WithdrawalHistories with an executor via exchange_name_id column.
func (o *Exchange) ExchangeNameWithdrawalHistories(mods ...qm.QueryMod) withdrawalHistoryQuery {
var queryMods []qm.QueryMod
@@ -600,6 +624,101 @@ func (exchangeL) LoadExchangeNameTrade(ctx context.Context, e boil.ContextExecut
return nil
}
// LoadExchangeNameDatahistoryjobs allows an eager lookup of values, cached into the
// loaded structs of the objects. This is for a 1-M or N-M relationship.
func (exchangeL) LoadExchangeNameDatahistoryjobs(ctx context.Context, e boil.ContextExecutor, singular bool, maybeExchange interface{}, mods queries.Applicator) error {
var slice []*Exchange
var object *Exchange
if singular {
object = maybeExchange.(*Exchange)
} else {
slice = *maybeExchange.(*[]*Exchange)
}
args := make([]interface{}, 0, 1)
if singular {
if object.R == nil {
object.R = &exchangeR{}
}
args = append(args, object.ID)
} else {
Outer:
for _, obj := range slice {
if obj.R == nil {
obj.R = &exchangeR{}
}
for _, a := range args {
if a == obj.ID {
continue Outer
}
}
args = append(args, obj.ID)
}
}
if len(args) == 0 {
return nil
}
query := NewQuery(qm.From(`datahistoryjob`), qm.WhereIn(`datahistoryjob.exchange_name_id in ?`, args...))
if mods != nil {
mods.Apply(query)
}
results, err := query.QueryContext(ctx, e)
if err != nil {
return errors.Wrap(err, "failed to eager load datahistoryjob")
}
var resultSlice []*Datahistoryjob
if err = queries.Bind(results, &resultSlice); err != nil {
return errors.Wrap(err, "failed to bind eager loaded slice datahistoryjob")
}
if err = results.Close(); err != nil {
return errors.Wrap(err, "failed to close results in eager load on datahistoryjob")
}
if err = results.Err(); err != nil {
return errors.Wrap(err, "error occurred during iteration of eager loaded relations for datahistoryjob")
}
if len(datahistoryjobAfterSelectHooks) != 0 {
for _, obj := range resultSlice {
if err := obj.doAfterSelectHooks(ctx, e); err != nil {
return err
}
}
}
if singular {
object.R.ExchangeNameDatahistoryjobs = resultSlice
for _, foreign := range resultSlice {
if foreign.R == nil {
foreign.R = &datahistoryjobR{}
}
foreign.R.ExchangeName = object
}
return nil
}
for _, foreign := range resultSlice {
for _, local := range slice {
if local.ID == foreign.ExchangeNameID {
local.R.ExchangeNameDatahistoryjobs = append(local.R.ExchangeNameDatahistoryjobs, foreign)
if foreign.R == nil {
foreign.R = &datahistoryjobR{}
}
foreign.R.ExchangeName = local
break
}
}
}
return nil
}
// LoadExchangeNameWithdrawalHistories allows an eager lookup of values, cached into the
// loaded structs of the objects. This is for a 1-M or N-M relationship.
func (exchangeL) LoadExchangeNameWithdrawalHistories(ctx context.Context, e boil.ContextExecutor, singular bool, maybeExchange interface{}, mods queries.Applicator) error {
@@ -797,6 +916,59 @@ func (o *Exchange) SetExchangeNameTrade(ctx context.Context, exec boil.ContextEx
return nil
}
// AddExchangeNameDatahistoryjobs adds the given related objects to the existing relationships
// of the exchange, optionally inserting them as new records.
// Appends related to o.R.ExchangeNameDatahistoryjobs.
// Sets related.R.ExchangeName appropriately.
func (o *Exchange) AddExchangeNameDatahistoryjobs(ctx context.Context, exec boil.ContextExecutor, insert bool, related ...*Datahistoryjob) error {
var err error
for _, rel := range related {
if insert {
rel.ExchangeNameID = o.ID
if err = rel.Insert(ctx, exec, boil.Infer()); err != nil {
return errors.Wrap(err, "failed to insert into foreign table")
}
} else {
updateQuery := fmt.Sprintf(
"UPDATE \"datahistoryjob\" SET %s WHERE %s",
strmangle.SetParamNames("\"", "\"", 0, []string{"exchange_name_id"}),
strmangle.WhereClause("\"", "\"", 0, datahistoryjobPrimaryKeyColumns),
)
values := []interface{}{o.ID, rel.ID}
if boil.DebugMode {
fmt.Fprintln(boil.DebugWriter, updateQuery)
fmt.Fprintln(boil.DebugWriter, values)
}
if _, err = exec.ExecContext(ctx, updateQuery, values...); err != nil {
return errors.Wrap(err, "failed to update foreign table")
}
rel.ExchangeNameID = o.ID
}
}
if o.R == nil {
o.R = &exchangeR{
ExchangeNameDatahistoryjobs: related,
}
} else {
o.R.ExchangeNameDatahistoryjobs = append(o.R.ExchangeNameDatahistoryjobs, related...)
}
for _, rel := range related {
if rel.R == nil {
rel.R = &datahistoryjobR{
ExchangeName: o,
}
} else {
rel.R.ExchangeName = o
}
}
return nil
}
// AddExchangeNameWithdrawalHistories adds the given related objects to the existing relationships
// of the exchange, optionally inserting them as new records.
// Appends related to o.R.ExchangeNameWithdrawalHistories.

View File

@@ -719,6 +719,84 @@ func testExchangeOneToOneSetOpTradeUsingExchangeNameTrade(t *testing.T) {
}
}
func testExchangeToManyExchangeNameDatahistoryjobs(t *testing.T) {
var err error
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
var a Exchange
var b, c Datahistoryjob
seed := randomize.NewSeed()
if err = randomize.Struct(seed, &a, exchangeDBTypes, true, exchangeColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Exchange struct: %s", err)
}
if err := a.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
if err = randomize.Struct(seed, &b, datahistoryjobDBTypes, false, datahistoryjobColumnsWithDefault...); err != nil {
t.Fatal(err)
}
if err = randomize.Struct(seed, &c, datahistoryjobDBTypes, false, datahistoryjobColumnsWithDefault...); err != nil {
t.Fatal(err)
}
b.ExchangeNameID = a.ID
c.ExchangeNameID = a.ID
if err = b.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
if err = c.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
check, err := a.ExchangeNameDatahistoryjobs().All(ctx, tx)
if err != nil {
t.Fatal(err)
}
bFound, cFound := false, false
for _, v := range check {
if v.ExchangeNameID == b.ExchangeNameID {
bFound = true
}
if v.ExchangeNameID == c.ExchangeNameID {
cFound = true
}
}
if !bFound {
t.Error("expected to find b")
}
if !cFound {
t.Error("expected to find c")
}
slice := ExchangeSlice{&a}
if err = a.L.LoadExchangeNameDatahistoryjobs(ctx, tx, false, (*[]*Exchange)(&slice), nil); err != nil {
t.Fatal(err)
}
if got := len(a.R.ExchangeNameDatahistoryjobs); got != 2 {
t.Error("number of eager loaded records wrong, got:", got)
}
a.R.ExchangeNameDatahistoryjobs = nil
if err = a.L.LoadExchangeNameDatahistoryjobs(ctx, tx, true, &a, nil); err != nil {
t.Fatal(err)
}
if got := len(a.R.ExchangeNameDatahistoryjobs); got != 2 {
t.Error("number of eager loaded records wrong, got:", got)
}
if t.Failed() {
t.Logf("%#v", check)
}
}
func testExchangeToManyExchangeNameWithdrawalHistories(t *testing.T) {
var err error
ctx := context.Background()
@@ -797,6 +875,81 @@ func testExchangeToManyExchangeNameWithdrawalHistories(t *testing.T) {
}
}
func testExchangeToManyAddOpExchangeNameDatahistoryjobs(t *testing.T) {
var err error
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
var a Exchange
var b, c, d, e Datahistoryjob
seed := randomize.NewSeed()
if err = randomize.Struct(seed, &a, exchangeDBTypes, false, strmangle.SetComplement(exchangePrimaryKeyColumns, exchangeColumnsWithoutDefault)...); err != nil {
t.Fatal(err)
}
foreigners := []*Datahistoryjob{&b, &c, &d, &e}
for _, x := range foreigners {
if err = randomize.Struct(seed, x, datahistoryjobDBTypes, false, strmangle.SetComplement(datahistoryjobPrimaryKeyColumns, datahistoryjobColumnsWithoutDefault)...); err != nil {
t.Fatal(err)
}
}
if err := a.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
if err = b.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
if err = c.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
foreignersSplitByInsertion := [][]*Datahistoryjob{
{&b, &c},
{&d, &e},
}
for i, x := range foreignersSplitByInsertion {
err = a.AddExchangeNameDatahistoryjobs(ctx, tx, i != 0, x...)
if err != nil {
t.Fatal(err)
}
first := x[0]
second := x[1]
if a.ID != first.ExchangeNameID {
t.Error("foreign key was wrong value", a.ID, first.ExchangeNameID)
}
if a.ID != second.ExchangeNameID {
t.Error("foreign key was wrong value", a.ID, second.ExchangeNameID)
}
if first.R.ExchangeName != &a {
t.Error("relationship was not added properly to the foreign slice")
}
if second.R.ExchangeName != &a {
t.Error("relationship was not added properly to the foreign slice")
}
if a.R.ExchangeNameDatahistoryjobs[i*2] != first {
t.Error("relationship struct slice not set to correct value")
}
if a.R.ExchangeNameDatahistoryjobs[i*2+1] != second {
t.Error("relationship struct slice not set to correct value")
}
count, err := a.ExchangeNameDatahistoryjobs().Count(ctx, tx)
if err != nil {
t.Fatal(err)
}
if want := int64((i + 1) * 2); count != want {
t.Error("want", want, "got", count)
}
}
}
func testExchangeToManyAddOpExchangeNameWithdrawalHistories(t *testing.T) {
var err error

View File

@@ -63,28 +63,6 @@ var TradeColumns = struct {
}
// Generated where
type whereHelpernull_String struct{ field string }
func (w whereHelpernull_String) EQ(x null.String) qm.QueryMod {
return qmhelper.WhereNullEQ(w.field, false, x)
}
func (w whereHelpernull_String) NEQ(x null.String) qm.QueryMod {
return qmhelper.WhereNullEQ(w.field, true, x)
}
func (w whereHelpernull_String) IsNull() qm.QueryMod { return qmhelper.WhereIsNull(w.field) }
func (w whereHelpernull_String) IsNotNull() qm.QueryMod { return qmhelper.WhereIsNotNull(w.field) }
func (w whereHelpernull_String) LT(x null.String) qm.QueryMod {
return qmhelper.Where(w.field, qmhelper.LT, x)
}
func (w whereHelpernull_String) LTE(x null.String) qm.QueryMod {
return qmhelper.Where(w.field, qmhelper.LTE, x)
}
func (w whereHelpernull_String) GT(x null.String) qm.QueryMod {
return qmhelper.Where(w.field, qmhelper.GT, x)
}
func (w whereHelpernull_String) GTE(x null.String) qm.QueryMod {
return qmhelper.Where(w.field, qmhelper.GTE, x)
}
var TradeWhere = struct {
ID whereHelperstring

View File

@@ -80,7 +80,7 @@ func Series(exchangeName, base, quote string, interval int64, asset string, star
}
}
if len(out.Candles) < 1 {
return out, fmt.Errorf(errNoCandleDataFound, exchangeName, base, quote, interval, asset)
return out, fmt.Errorf("%w: %s %s %s %v %s", ErrNoCandleDataFound, exchangeName, base, quote, interval, asset)
}
out.ExchangeID = exchangeName

View File

@@ -242,10 +242,8 @@ func TestSeries(t *testing.T) {
}
ret, err = Series("", "", "", 0, "", start, end)
if err != nil {
if !errors.Is(err, errInvalidInput) {
t.Fatal(err)
}
if !errors.Is(err, errInvalidInput) {
t.Fatal(err)
}
ret, err = Series(testExchanges[0].Name,
@@ -254,9 +252,7 @@ func TestSeries(t *testing.T) {
start, end)
if err != nil {
if !errors.Is(err, errInvalidInput) {
if err.Error() != fmt.Errorf(errNoCandleDataFound, testExchanges[0].Name,
"BTC", "MOON",
"864000", "spot").Error() {
if !errors.Is(err, ErrNoCandleDataFound) {
t.Fatal(err)
}
}

View File

@@ -5,13 +5,11 @@ import (
"time"
)
const (
errNoCandleDataFound = "no candle data found: %v %v %v %v %v"
)
var (
errInvalidInput = errors.New("exchange, base, quote, asset, interval, start & end cannot be empty")
errNoCandleData = errors.New("no candle data provided")
// ErrNoCandleDataFound returns when no candle data is found
ErrNoCandleDataFound = errors.New("no candle data found")
)
// Item generic candle holder for modelPSQL & modelSQLite

View File

@@ -0,0 +1,694 @@
package datahistoryjob
import (
"context"
"database/sql"
"fmt"
"strings"
"time"
"github.com/thrasher-corp/gocryptotrader/database"
"github.com/thrasher-corp/gocryptotrader/database/models/postgres"
"github.com/thrasher-corp/gocryptotrader/database/models/sqlite3"
"github.com/thrasher-corp/gocryptotrader/database/repository/datahistoryjobresult"
"github.com/thrasher-corp/gocryptotrader/log"
"github.com/thrasher-corp/sqlboiler/boil"
"github.com/thrasher-corp/sqlboiler/queries/qm"
)
// Setup returns a DBService
func Setup(db database.IDatabase) (*DBService, error) {
if db == nil {
return nil, database.ErrNilInstance
}
if !db.IsConnected() {
return nil, database.ErrDatabaseNotConnected
}
cfg := db.GetConfig()
dbCon, err := db.GetSQL()
if err != nil {
return nil, err
}
return &DBService{
sql: dbCon,
driver: cfg.Driver,
}, nil
}
// Upsert inserts or updates jobs into the database
func (db *DBService) Upsert(jobs ...*DataHistoryJob) error {
ctx := context.Background()
tx, err := db.sql.BeginTx(ctx, nil)
if err != nil {
return fmt.Errorf("beginTx %w", err)
}
defer func() {
if err != nil {
errRB := tx.Rollback()
if errRB != nil {
log.Errorf(log.DatabaseMgr, "Insert tx.Rollback %v", errRB)
}
}
}()
switch db.driver {
case database.DBSQLite3, database.DBSQLite:
err = upsertSqlite(ctx, tx, jobs...)
case database.DBPostgreSQL:
err = upsertPostgres(ctx, tx, jobs...)
default:
return database.ErrNoDatabaseProvided
}
if err != nil {
return err
}
return tx.Commit()
}
// GetByNickName returns a job by its nickname
func (db *DBService) GetByNickName(nickname string) (*DataHistoryJob, error) {
switch db.driver {
case database.DBSQLite3, database.DBSQLite:
return db.getByNicknameSQLite(nickname)
case database.DBPostgreSQL:
return db.getByNicknamePostgres(nickname)
default:
return nil, database.ErrNoDatabaseProvided
}
}
// GetByID returns a job by its id
func (db *DBService) GetByID(id string) (*DataHistoryJob, error) {
switch db.driver {
case database.DBSQLite3, database.DBSQLite:
return db.getByIDSQLite(id)
case database.DBPostgreSQL:
return db.getByIDPostgres(id)
default:
return nil, database.ErrNoDatabaseProvided
}
}
// GetJobsBetween will return all jobs between two dates
func (db *DBService) GetJobsBetween(startDate, endDate time.Time) ([]DataHistoryJob, error) {
switch db.driver {
case database.DBSQLite3, database.DBSQLite:
return db.getJobsBetweenSQLite(startDate, endDate)
case database.DBPostgreSQL:
return db.getJobsBetweenPostgres(startDate, endDate)
default:
return nil, database.ErrNoDatabaseProvided
}
}
// GetAllIncompleteJobsAndResults returns all jobs that have the status "active"
func (db *DBService) GetAllIncompleteJobsAndResults() ([]DataHistoryJob, error) {
switch db.driver {
case database.DBSQLite3, database.DBSQLite:
return db.getAllIncompleteJobsAndResultsSQLite()
case database.DBPostgreSQL:
return db.getAllIncompleteJobsAndResultsPostgres()
default:
return nil, database.ErrNoDatabaseProvided
}
}
// GetJobAndAllResults returns a job and joins all job results
func (db *DBService) GetJobAndAllResults(nickname string) (*DataHistoryJob, error) {
switch db.driver {
case database.DBSQLite3, database.DBSQLite:
return db.getJobAndAllResultsSQLite(nickname)
case database.DBPostgreSQL:
return db.getJobAndAllResultsPostgres(nickname)
default:
return nil, database.ErrNoDatabaseProvided
}
}
func upsertSqlite(ctx context.Context, tx *sql.Tx, jobs ...*DataHistoryJob) error {
for i := range jobs {
r, err := sqlite3.Exchanges(
qm.Where("name = ?", strings.ToLower(jobs[i].ExchangeName))).One(ctx, tx)
if err != nil {
return err
}
var tempEvent = sqlite3.Datahistoryjob{
ID: jobs[i].ID,
ExchangeNameID: r.ID,
Nickname: strings.ToLower(jobs[i].Nickname),
Asset: strings.ToLower(jobs[i].Asset),
Base: strings.ToUpper(jobs[i].Base),
Quote: strings.ToUpper(jobs[i].Quote),
StartTime: jobs[i].StartDate.UTC().Format(time.RFC3339),
EndTime: jobs[i].EndDate.UTC().Format(time.RFC3339),
Interval: float64(jobs[i].Interval),
DataType: float64(jobs[i].DataType),
RequestSize: float64(jobs[i].RequestSizeLimit),
MaxRetries: float64(jobs[i].MaxRetryAttempts),
BatchCount: float64(jobs[i].BatchSize),
Status: float64(jobs[i].Status),
Created: time.Now().UTC().Format(time.RFC3339),
}
err = tempEvent.Insert(ctx, tx, boil.Infer())
if err != nil {
return err
}
}
return nil
}
func upsertPostgres(ctx context.Context, tx *sql.Tx, jobs ...*DataHistoryJob) error {
for i := range jobs {
r, err := postgres.Exchanges(
qm.Where("name = ?", strings.ToLower(jobs[i].ExchangeName))).One(ctx, tx)
if err != nil {
return err
}
var tempEvent = postgres.Datahistoryjob{
ID: jobs[i].ID,
Nickname: strings.ToLower(jobs[i].Nickname),
ExchangeNameID: r.ID,
Asset: strings.ToLower(jobs[i].Asset),
Base: strings.ToUpper(jobs[i].Base),
Quote: strings.ToUpper(jobs[i].Quote),
StartTime: jobs[i].StartDate.UTC(),
EndTime: jobs[i].EndDate.UTC(),
Interval: float64(jobs[i].Interval),
DataType: float64(jobs[i].DataType),
BatchCount: float64(jobs[i].BatchSize),
RequestSize: float64(jobs[i].RequestSizeLimit),
MaxRetries: float64(jobs[i].MaxRetryAttempts),
Status: float64(jobs[i].Status),
Created: time.Now().UTC(),
}
err = tempEvent.Upsert(ctx, tx, true, []string{"nickname"}, boil.Infer(), boil.Infer())
if err != nil {
return err
}
}
return nil
}
func (db *DBService) getByNicknameSQLite(nickname string) (*DataHistoryJob, error) {
var job *DataHistoryJob
result, err := sqlite3.Datahistoryjobs(qm.Where("nickname = ?", strings.ToLower(nickname))).One(context.Background(), db.sql)
if err != nil {
return job, err
}
exchangeResult, err := result.ExchangeName().One(context.Background(), db.sql)
if err != nil {
return job, err
}
ts, err := time.Parse(time.RFC3339, result.StartTime)
if err != nil {
return nil, err
}
te, err := time.Parse(time.RFC3339, result.EndTime)
if err != nil {
return nil, err
}
c, err := time.Parse(time.RFC3339, result.Created)
if err != nil {
return nil, err
}
job = &DataHistoryJob{
ID: result.ID,
Nickname: result.Nickname,
ExchangeID: result.ExchangeNameID,
ExchangeName: exchangeResult.Name,
Asset: result.Asset,
Base: result.Base,
Quote: result.Quote,
StartDate: ts,
EndDate: te,
Interval: int64(result.Interval),
BatchSize: int64(result.BatchCount),
RequestSizeLimit: int64(result.RequestSize),
DataType: int64(result.DataType),
MaxRetryAttempts: int64(result.MaxRetries),
Status: int64(result.Status),
CreatedDate: c,
}
return job, nil
}
func (db *DBService) getByNicknamePostgres(nickname string) (*DataHistoryJob, error) {
var job *DataHistoryJob
query := postgres.Datahistoryjobs(qm.Where("nickname = ?", strings.ToLower(nickname)))
result, err := query.One(context.Background(), db.sql)
if err != nil {
return job, err
}
exchangeResult, err := result.ExchangeName().One(context.Background(), db.sql)
if err != nil {
return job, err
}
job = &DataHistoryJob{
ID: result.ID,
Nickname: result.Nickname,
ExchangeID: result.ExchangeNameID,
ExchangeName: exchangeResult.Name,
Asset: result.Asset,
Base: result.Base,
Quote: result.Quote,
StartDate: result.StartTime,
EndDate: result.EndTime,
Interval: int64(result.Interval),
BatchSize: int64(result.BatchCount),
RequestSizeLimit: int64(result.RequestSize),
DataType: int64(result.DataType),
MaxRetryAttempts: int64(result.MaxRetries),
Status: int64(result.Status),
CreatedDate: result.Created,
}
return job, nil
}
func (db *DBService) getByIDSQLite(id string) (*DataHistoryJob, error) {
var job *DataHistoryJob
result, err := sqlite3.Datahistoryjobs(qm.Where("id = ?", id)).One(context.Background(), db.sql)
if err != nil {
return job, err
}
exchangeResult, err := result.ExchangeName().One(context.Background(), db.sql)
if err != nil {
return job, err
}
ts, err := time.Parse(time.RFC3339, result.StartTime)
if err != nil {
return nil, err
}
te, err := time.Parse(time.RFC3339, result.EndTime)
if err != nil {
return nil, err
}
c, err := time.Parse(time.RFC3339, result.Created)
if err != nil {
return nil, err
}
job = &DataHistoryJob{
ID: result.ID,
Nickname: result.Nickname,
ExchangeID: result.ExchangeNameID,
ExchangeName: exchangeResult.Name,
Asset: result.Asset,
Base: result.Base,
Quote: result.Quote,
StartDate: ts,
EndDate: te,
Interval: int64(result.Interval),
RequestSizeLimit: int64(result.RequestSize),
DataType: int64(result.DataType),
MaxRetryAttempts: int64(result.MaxRetries),
BatchSize: int64(result.BatchCount),
Status: int64(result.Status),
CreatedDate: c,
}
return job, nil
}
func (db *DBService) getByIDPostgres(id string) (*DataHistoryJob, error) {
var job *DataHistoryJob
query := postgres.Datahistoryjobs(qm.Where("id = ?", id))
result, err := query.One(context.Background(), db.sql)
if err != nil {
return job, err
}
exchangeResult, err := result.ExchangeName().One(context.Background(), db.sql)
if err != nil {
return job, err
}
job = &DataHistoryJob{
ID: result.ID,
Nickname: result.Nickname,
ExchangeID: result.ExchangeNameID,
ExchangeName: exchangeResult.Name,
Asset: result.Asset,
Base: result.Base,
Quote: result.Quote,
StartDate: result.StartTime,
EndDate: result.EndTime,
Interval: int64(result.Interval),
BatchSize: int64(result.BatchCount),
RequestSizeLimit: int64(result.RequestSize),
DataType: int64(result.DataType),
MaxRetryAttempts: int64(result.MaxRetries),
Status: int64(result.Status),
CreatedDate: result.Created,
}
return job, nil
}
func (db *DBService) getJobsBetweenSQLite(startDate, endDate time.Time) ([]DataHistoryJob, error) {
var jobs []DataHistoryJob
query := sqlite3.Datahistoryjobs(qm.Where("created BETWEEN ? AND ? ", startDate.UTC().Format(time.RFC3339), endDate.UTC().Format(time.RFC3339)))
results, err := query.All(context.Background(), db.sql)
if err != nil {
return jobs, err
}
for i := range results {
exchangeResult, err := results[i].ExchangeName(qm.Where("id = ?", results[i].ExchangeNameID)).One(context.Background(), db.sql)
if err != nil {
return nil, err
}
ts, err := time.Parse(time.RFC3339, results[i].StartTime)
if err != nil {
return nil, err
}
te, err := time.Parse(time.RFC3339, results[i].EndTime)
if err != nil {
return nil, err
}
c, err := time.Parse(time.RFC3339, results[i].Created)
if err != nil {
return nil, err
}
jobs = append(jobs, DataHistoryJob{
ID: results[i].ID,
Nickname: results[i].Nickname,
ExchangeID: results[i].ExchangeNameID,
ExchangeName: exchangeResult.Name,
Asset: results[i].Asset,
Base: results[i].Base,
Quote: results[i].Quote,
StartDate: ts,
EndDate: te,
Interval: int64(results[i].Interval),
RequestSizeLimit: int64(results[i].RequestSize),
BatchSize: int64(results[i].BatchCount),
DataType: int64(results[i].DataType),
MaxRetryAttempts: int64(results[i].MaxRetries),
Status: int64(results[i].Status),
CreatedDate: c,
})
}
return jobs, nil
}
func (db *DBService) getJobsBetweenPostgres(startDate, endDate time.Time) ([]DataHistoryJob, error) {
var jobs []DataHistoryJob
query := postgres.Datahistoryjobs(qm.Where("created BETWEEN ? AND ? ", startDate, endDate))
results, err := query.All(context.Background(), db.sql)
if err != nil {
return jobs, err
}
for i := range results {
exchangeResult, err := results[i].ExchangeName(qm.Where("id = ?", results[i].ExchangeNameID)).One(context.Background(), db.sql)
if err != nil {
return nil, err
}
jobs = append(jobs, DataHistoryJob{
ID: results[i].ID,
Nickname: results[i].Nickname,
ExchangeID: results[i].ExchangeNameID,
ExchangeName: exchangeResult.Name,
Asset: results[i].Asset,
Base: results[i].Base,
Quote: results[i].Quote,
StartDate: results[i].StartTime,
EndDate: results[i].EndTime,
Interval: int64(results[i].Interval),
BatchSize: int64(results[i].BatchCount),
RequestSizeLimit: int64(results[i].RequestSize),
DataType: int64(results[i].DataType),
MaxRetryAttempts: int64(results[i].MaxRetries),
Status: int64(results[i].Status),
CreatedDate: results[i].Created,
})
}
return jobs, nil
}
func (db *DBService) getJobAndAllResultsSQLite(nickname string) (*DataHistoryJob, error) {
var job *DataHistoryJob
query := sqlite3.Datahistoryjobs(
qm.Load(sqlite3.DatahistoryjobRels.JobDatahistoryjobresults),
qm.Load(sqlite3.DatahistoryjobRels.ExchangeName),
qm.Where("nickname = ?", strings.ToLower(nickname)))
result, err := query.One(context.Background(), db.sql)
if err != nil {
return nil, err
}
var jobResults []*datahistoryjobresult.DataHistoryJobResult
for i := range result.R.JobDatahistoryjobresults {
var start, end, run time.Time
start, err = time.Parse(time.RFC3339, result.R.JobDatahistoryjobresults[i].IntervalStartTime)
if err != nil {
return nil, err
}
end, err = time.Parse(time.RFC3339, result.R.JobDatahistoryjobresults[i].IntervalEndTime)
if err != nil {
return nil, err
}
run, err = time.Parse(time.RFC3339, result.R.JobDatahistoryjobresults[i].RunTime)
if err != nil {
return nil, err
}
jobResults = append(jobResults, &datahistoryjobresult.DataHistoryJobResult{
ID: result.R.JobDatahistoryjobresults[i].ID,
JobID: result.R.JobDatahistoryjobresults[i].JobID,
IntervalStartDate: start,
IntervalEndDate: end,
Status: int64(result.R.JobDatahistoryjobresults[i].Status),
Result: result.R.JobDatahistoryjobresults[i].Result.String,
Date: run,
})
}
start, err := time.Parse(time.RFC3339, result.StartTime)
if err != nil {
return nil, err
}
end, err := time.Parse(time.RFC3339, result.EndTime)
if err != nil {
return nil, err
}
created, err := time.Parse(time.RFC3339, result.Created)
if err != nil {
return nil, err
}
job = &DataHistoryJob{
ID: result.ID,
Nickname: result.Nickname,
ExchangeID: result.ExchangeNameID,
ExchangeName: result.R.ExchangeName.Name,
Asset: result.Asset,
Base: result.Base,
Quote: result.Quote,
StartDate: start,
EndDate: end,
Interval: int64(result.Interval),
BatchSize: int64(result.BatchCount),
RequestSizeLimit: int64(result.RequestSize),
DataType: int64(result.DataType),
MaxRetryAttempts: int64(result.MaxRetries),
Status: int64(result.Status),
CreatedDate: created,
Results: jobResults,
}
return job, nil
}
func (db *DBService) getJobAndAllResultsPostgres(nickname string) (*DataHistoryJob, error) {
var job *DataHistoryJob
query := postgres.Datahistoryjobs(
qm.Load(postgres.DatahistoryjobRels.ExchangeName),
qm.Load(postgres.DatahistoryjobRels.JobDatahistoryjobresults),
qm.Where("nickname = ?", strings.ToLower(nickname)))
result, err := query.One(context.Background(), db.sql)
if err != nil {
return job, err
}
var jobResults []*datahistoryjobresult.DataHistoryJobResult
for i := range result.R.JobDatahistoryjobresults {
jobResults = append(jobResults, &datahistoryjobresult.DataHistoryJobResult{
ID: result.R.JobDatahistoryjobresults[i].ID,
JobID: result.R.JobDatahistoryjobresults[i].JobID,
IntervalStartDate: result.R.JobDatahistoryjobresults[i].IntervalStartTime,
IntervalEndDate: result.R.JobDatahistoryjobresults[i].IntervalEndTime,
Status: int64(result.R.JobDatahistoryjobresults[i].Status),
Result: result.R.JobDatahistoryjobresults[i].Result.String,
Date: result.R.JobDatahistoryjobresults[i].RunTime,
})
}
job = &DataHistoryJob{
ID: result.ID,
Nickname: result.Nickname,
ExchangeID: result.ExchangeNameID,
ExchangeName: result.R.ExchangeName.Name,
Asset: result.Asset,
Base: result.Base,
Quote: result.Quote,
StartDate: result.StartTime,
EndDate: result.EndTime,
Interval: int64(result.Interval),
BatchSize: int64(result.BatchCount),
RequestSizeLimit: int64(result.RequestSize),
DataType: int64(result.DataType),
MaxRetryAttempts: int64(result.MaxRetries),
Status: int64(result.Status),
CreatedDate: result.Created,
Results: jobResults,
}
return job, nil
}
func (db *DBService) getAllIncompleteJobsAndResultsSQLite() ([]DataHistoryJob, error) {
var jobs []DataHistoryJob
query := sqlite3.Datahistoryjobs(
qm.Load(sqlite3.DatahistoryjobRels.ExchangeName),
qm.Load(sqlite3.DatahistoryjobRels.JobDatahistoryjobresults),
qm.Where("status = ?", 0))
results, err := query.All(context.Background(), db.sql)
if err != nil {
return jobs, err
}
for i := range results {
var jobResults []*datahistoryjobresult.DataHistoryJobResult
for j := range results[i].R.JobDatahistoryjobresults {
var start, end, run time.Time
start, err = time.Parse(time.RFC3339, results[i].R.JobDatahistoryjobresults[j].IntervalStartTime)
if err != nil {
return nil, err
}
end, err = time.Parse(time.RFC3339, results[i].R.JobDatahistoryjobresults[j].IntervalEndTime)
if err != nil {
return nil, err
}
run, err = time.Parse(time.RFC3339, results[i].R.JobDatahistoryjobresults[j].RunTime)
if err != nil {
return nil, err
}
jobResults = append(jobResults, &datahistoryjobresult.DataHistoryJobResult{
ID: results[i].R.JobDatahistoryjobresults[j].ID,
JobID: results[i].R.JobDatahistoryjobresults[j].JobID,
IntervalStartDate: start,
IntervalEndDate: end,
Status: int64(results[i].R.JobDatahistoryjobresults[j].Status),
Result: results[i].R.JobDatahistoryjobresults[j].Result.String,
Date: run,
})
}
start, err := time.Parse(time.RFC3339, results[i].StartTime)
if err != nil {
return nil, err
}
end, err := time.Parse(time.RFC3339, results[i].EndTime)
if err != nil {
return nil, err
}
created, err := time.Parse(time.RFC3339, results[i].Created)
if err != nil {
return nil, err
}
jobs = append(jobs, DataHistoryJob{
ID: results[i].ID,
Nickname: results[i].Nickname,
ExchangeID: results[i].ExchangeNameID,
ExchangeName: results[i].R.ExchangeName.Name,
Asset: results[i].Asset,
Base: results[i].Base,
Quote: results[i].Quote,
StartDate: start,
EndDate: end,
Interval: int64(results[i].Interval),
BatchSize: int64(results[i].BatchCount),
RequestSizeLimit: int64(results[i].RequestSize),
DataType: int64(results[i].DataType),
MaxRetryAttempts: int64(results[i].MaxRetries),
Status: int64(results[i].Status),
CreatedDate: created,
Results: jobResults,
})
}
return jobs, nil
}
func (db *DBService) getAllIncompleteJobsAndResultsPostgres() ([]DataHistoryJob, error) {
var jobs []DataHistoryJob
query := postgres.Datahistoryjobs(
qm.Load(postgres.DatahistoryjobRels.ExchangeName),
qm.Load(postgres.DatahistoryjobRels.JobDatahistoryjobresults),
qm.Where("status = ?", 0))
results, err := query.All(context.Background(), db.sql)
if err != nil {
return jobs, err
}
for i := range results {
var jobResults []*datahistoryjobresult.DataHistoryJobResult
for j := range results[i].R.JobDatahistoryjobresults {
jobResults = append(jobResults, &datahistoryjobresult.DataHistoryJobResult{
ID: results[i].R.JobDatahistoryjobresults[j].ID,
JobID: results[i].R.JobDatahistoryjobresults[j].JobID,
IntervalStartDate: results[i].R.JobDatahistoryjobresults[j].IntervalStartTime,
IntervalEndDate: results[i].R.JobDatahistoryjobresults[j].IntervalEndTime,
Status: int64(results[i].R.JobDatahistoryjobresults[j].Status),
Result: results[i].R.JobDatahistoryjobresults[j].Result.String,
Date: results[i].R.JobDatahistoryjobresults[j].RunTime,
})
}
jobs = append(jobs, DataHistoryJob{
ID: results[i].ID,
Nickname: results[i].Nickname,
ExchangeID: results[i].ExchangeNameID,
ExchangeName: results[i].R.ExchangeName.Name,
Asset: results[i].Asset,
Base: results[i].Base,
Quote: results[i].Quote,
StartDate: results[i].StartTime,
EndDate: results[i].EndTime,
Interval: int64(results[i].Interval),
BatchSize: int64(results[i].BatchCount),
RequestSizeLimit: int64(results[i].RequestSize),
DataType: int64(results[i].DataType),
MaxRetryAttempts: int64(results[i].MaxRetries),
Status: int64(results[i].Status),
CreatedDate: results[i].Created,
Results: jobResults,
})
}
return jobs, nil
}

View File

@@ -0,0 +1,212 @@
package datahistoryjob
import (
"fmt"
"io/ioutil"
"log"
"os"
"strings"
"testing"
"time"
"github.com/gofrs/uuid"
"github.com/thrasher-corp/gocryptotrader/currency"
"github.com/thrasher-corp/gocryptotrader/database"
"github.com/thrasher-corp/gocryptotrader/database/drivers"
"github.com/thrasher-corp/gocryptotrader/database/repository/exchange"
"github.com/thrasher-corp/gocryptotrader/database/testhelpers"
"github.com/thrasher-corp/gocryptotrader/exchanges/asset"
)
var (
verbose = false
testExchanges = []exchange.Details{
{
Name: "one",
},
{
Name: "two",
},
}
)
func TestMain(m *testing.M) {
if verbose {
testhelpers.EnableVerboseTestOutput()
}
var err error
testhelpers.PostgresTestDatabase = testhelpers.GetConnectionDetails()
testhelpers.TempDir, err = ioutil.TempDir("", "gct-temp")
if err != nil {
log.Fatal(err)
}
t := m.Run()
err = os.RemoveAll(testhelpers.TempDir)
if err != nil {
fmt.Printf("Failed to remove temp db file: %v", err)
}
os.Exit(t)
}
func seedDB() error {
err := exchange.InsertMany(testExchanges)
if err != nil {
return err
}
for i := range testExchanges {
lol, err := exchange.One(testExchanges[i].Name)
if err != nil {
return err
}
testExchanges[i].UUID = lol.UUID
}
return nil
}
func TestDataHistoryJob(t *testing.T) {
testCases := []struct {
name string
config *database.Config
seedDB func() error
runner func(t *testing.T)
closer func(dbConn *database.Instance) error
}{
{
name: "postgresql",
config: testhelpers.PostgresTestDatabase,
seedDB: seedDB,
},
{
name: "SQLite",
config: &database.Config{
Driver: database.DBSQLite3,
ConnectionDetails: drivers.ConnectionDetails{Database: "./testdb"},
},
seedDB: seedDB,
},
}
for x := range testCases {
test := testCases[x]
t.Run(test.name, func(t *testing.T) {
if !testhelpers.CheckValidConfig(&test.config.ConnectionDetails) {
t.Skip("database not configured skipping test")
}
dbConn, err := testhelpers.ConnectToDatabase(test.config)
if err != nil {
t.Fatal(err)
}
if test.seedDB != nil {
err = test.seedDB()
if err != nil {
t.Error(err)
}
}
db, err := Setup(dbConn)
if err != nil {
log.Fatal(err)
}
var jerberinos, jerberoos []*DataHistoryJob
for i := 0; i < 20; i++ {
uu, _ := uuid.NewV4()
jerberinos = append(jerberinos, &DataHistoryJob{
ID: uu.String(),
Nickname: fmt.Sprintf("TestDataHistoryJob%v", i),
ExchangeID: testExchanges[0].UUID.String(),
ExchangeName: testExchanges[0].Name,
Asset: asset.Spot.String(),
Base: currency.BTC.String(),
Quote: currency.USD.String(),
StartDate: time.Now().Add(time.Duration(i) * time.Second),
EndDate: time.Now().Add(time.Minute * time.Duration(i)),
Interval: int64(i),
})
}
err = db.Upsert(jerberinos...)
if err != nil {
t.Fatal(err)
}
// insert the same jerbs to test conflict resolution
for i := 0; i < 20; i++ {
uu, _ := uuid.NewV4()
j := &DataHistoryJob{
ID: uu.String(),
Nickname: fmt.Sprintf("TestDataHistoryJob%v", i),
ExchangeID: testExchanges[0].UUID.String(),
ExchangeName: testExchanges[0].Name,
Asset: asset.Spot.String(),
Base: currency.BTC.String(),
Quote: currency.USD.String(),
StartDate: time.Now().Add(time.Duration(i) * time.Second),
EndDate: time.Now().Add(time.Minute * time.Duration(i)),
Interval: int64(i),
}
if i == 19 {
j.Status = 1
}
jerberoos = append(jerberoos, j)
}
err = db.Upsert(jerberoos...)
if err != nil {
t.Fatal(err)
}
_, err = db.GetJobsBetween(time.Now(), time.Now().Add(time.Hour))
if err != nil {
t.Fatal(err)
}
resp, err := db.GetByNickName("TestDataHistoryJob19")
if err != nil {
t.Fatal(err)
}
if !strings.EqualFold(resp.Nickname, "TestDataHistoryJob19") {
t.Fatal("the database no longer functions")
}
results, err := db.GetAllIncompleteJobsAndResults()
if err != nil {
t.Error(err)
}
if len(results) != 19 {
t.Errorf("expected 19, received %v", len(results))
}
jerb, err := db.getJobAndAllResultsPostgres(jerberoos[0].Nickname)
if err != nil {
t.Fatal(err)
}
if !strings.EqualFold(jerb.Nickname, jerberoos[0].Nickname) {
t.Errorf("expected %v, received %v", jerb.Nickname, jerberoos[0].Nickname)
}
results, err = db.GetJobsBetween(time.Now().Add(-time.Hour), time.Now())
if err != nil {
t.Error(err)
}
if len(results) != 20 {
t.Errorf("expected 20, received %v", len(results))
}
jerb, err = db.GetJobAndAllResults(jerberoos[0].Nickname)
if err != nil {
t.Error(err)
}
if !strings.EqualFold(jerb.Nickname, jerberoos[0].Nickname) {
t.Errorf("expected %v, received %v", jerb.Nickname, jerberoos[0].Nickname)
}
err = testhelpers.CloseDatabase(dbConn)
if err != nil {
t.Error(err)
}
})
}
}

View File

@@ -0,0 +1,47 @@
package datahistoryjob
import (
"time"
"github.com/thrasher-corp/gocryptotrader/database"
"github.com/thrasher-corp/gocryptotrader/database/repository/datahistoryjobresult"
)
// DataHistoryJob is a DTO for database data
type DataHistoryJob struct {
ID string
Nickname string
ExchangeID string
ExchangeName string
Asset string
Base string
Quote string
StartDate time.Time
EndDate time.Time
Interval int64
RequestSizeLimit int64
DataType int64
MaxRetryAttempts int64
BatchSize int64
Status int64
CreatedDate time.Time
Results []*datahistoryjobresult.DataHistoryJobResult
}
// DBService is a service which allows the interaction with
// the database without a direct reference to a global
type DBService struct {
sql database.ISQL
driver string
}
// IDBService allows using data history job database service
// without needing to care about implementation
type IDBService interface {
Upsert(jobs ...*DataHistoryJob) error
GetByNickName(nickname string) (*DataHistoryJob, error)
GetByID(id string) (*DataHistoryJob, error)
GetJobsBetween(startDate, endDate time.Time) ([]DataHistoryJob, error)
GetAllIncompleteJobsAndResults() ([]DataHistoryJob, error)
GetJobAndAllResults(nickname string) (*DataHistoryJob, error)
}

View File

@@ -0,0 +1,280 @@
package datahistoryjobresult
import (
"context"
"database/sql"
"fmt"
"time"
"github.com/gofrs/uuid"
"github.com/thrasher-corp/gocryptotrader/database"
"github.com/thrasher-corp/gocryptotrader/database/models/postgres"
"github.com/thrasher-corp/gocryptotrader/database/models/sqlite3"
"github.com/thrasher-corp/gocryptotrader/log"
"github.com/thrasher-corp/sqlboiler/boil"
"github.com/thrasher-corp/sqlboiler/queries/qm"
"github.com/volatiletech/null"
)
// Setup returns a DBService
func Setup(db database.IDatabase) (*DBService, error) {
if db == nil {
return nil, nil
}
if !db.IsConnected() {
return nil, nil
}
cfg := db.GetConfig()
dbCon, err := db.GetSQL()
if err != nil {
return nil, err
}
return &DBService{
sql: dbCon,
driver: cfg.Driver,
}, nil
}
// Upsert inserts or updates jobs into the database
func (db *DBService) Upsert(jobs ...*DataHistoryJobResult) error {
if len(jobs) == 0 {
return nil
}
ctx := context.Background()
tx, err := db.sql.BeginTx(ctx, nil)
if err != nil {
return fmt.Errorf("beginTx %w", err)
}
defer func() {
if err != nil {
errRB := tx.Rollback()
if errRB != nil {
log.Errorf(log.DatabaseMgr, "Insert tx.Rollback %v", errRB)
}
}
}()
switch db.driver {
case database.DBSQLite3, database.DBSQLite:
err = upsertSqlite(ctx, tx, jobs...)
case database.DBPostgreSQL:
err = upsertPostgres(ctx, tx, jobs...)
default:
return database.ErrNoDatabaseProvided
}
if err != nil {
return err
}
return tx.Commit()
}
// GetByJobID returns a job by its related JobID
func (db *DBService) GetByJobID(jobID string) ([]DataHistoryJobResult, error) {
var err error
var job []DataHistoryJobResult
switch db.driver {
case database.DBSQLite3, database.DBSQLite:
job, err = db.getByJobIDSQLite(jobID)
case database.DBPostgreSQL:
job, err = db.getByJobIDPostgres(jobID)
default:
return nil, database.ErrNoDatabaseProvided
}
if err != nil {
return nil, err
}
return job, nil
}
// GetJobResultsBetween will return all jobs between two dates
func (db *DBService) GetJobResultsBetween(jobID string, startDate, endDate time.Time) ([]DataHistoryJobResult, error) {
var err error
var jobs []DataHistoryJobResult
switch db.driver {
case database.DBSQLite3, database.DBSQLite:
jobs, err = db.getJobResultsBetweenSQLite(jobID, startDate, endDate)
case database.DBPostgreSQL:
jobs, err = db.getJobResultsBetweenPostgres(jobID, startDate, endDate)
default:
return nil, database.ErrNoDatabaseProvided
}
if err != nil {
return nil, err
}
return jobs, nil
}
func upsertSqlite(ctx context.Context, tx *sql.Tx, results ...*DataHistoryJobResult) error {
for i := range results {
if results[i].ID == "" {
freshUUID, err := uuid.NewV4()
if err != nil {
return err
}
results[i].ID = freshUUID.String()
}
var tempEvent = sqlite3.Datahistoryjobresult{
ID: results[i].ID,
JobID: results[i].JobID,
Result: null.NewString(results[i].Result, results[i].Result != ""),
Status: float64(results[i].Status),
IntervalStartTime: results[i].IntervalStartDate.UTC().Format(time.RFC3339),
IntervalEndTime: results[i].IntervalEndDate.UTC().Format(time.RFC3339),
RunTime: results[i].Date.UTC().Format(time.RFC3339),
}
err := tempEvent.Insert(ctx, tx, boil.Infer())
if err != nil {
return err
}
}
return nil
}
func upsertPostgres(ctx context.Context, tx *sql.Tx, results ...*DataHistoryJobResult) error {
var err error
for i := range results {
if results[i].ID == "" {
var freshUUID uuid.UUID
freshUUID, err = uuid.NewV4()
if err != nil {
return err
}
results[i].ID = freshUUID.String()
}
var tempEvent = postgres.Datahistoryjobresult{
ID: results[i].ID,
JobID: results[i].JobID,
Result: null.NewString(results[i].Result, results[i].Result != ""),
Status: float64(results[i].Status),
IntervalStartTime: results[i].IntervalStartDate.UTC(),
IntervalEndTime: results[i].IntervalEndDate.UTC(),
RunTime: results[i].Date.UTC(),
}
err = tempEvent.Upsert(ctx, tx, true, nil, boil.Infer(), boil.Infer())
if err != nil {
return err
}
}
return nil
}
func (db *DBService) getByJobIDSQLite(jobID string) ([]DataHistoryJobResult, error) {
query := sqlite3.Datahistoryjobresults(qm.Where("job_id = ?", jobID))
results, err := query.All(context.Background(), db.sql)
if err != nil {
return nil, err
}
var resp []DataHistoryJobResult
for i := range results {
var start, end, run time.Time
start, err = time.Parse(time.RFC3339, results[i].IntervalStartTime)
if err != nil {
return nil, err
}
end, err = time.Parse(time.RFC3339, results[i].IntervalEndTime)
if err != nil {
return nil, err
}
run, err = time.Parse(time.RFC3339, results[i].RunTime)
if err != nil {
return nil, err
}
resp = append(resp, DataHistoryJobResult{
ID: results[i].ID,
JobID: results[i].JobID,
IntervalStartDate: start,
IntervalEndDate: end,
Status: int64(results[i].Status),
Result: results[i].Result.String,
Date: run,
})
}
return resp, nil
}
func (db *DBService) getByJobIDPostgres(jobID string) ([]DataHistoryJobResult, error) {
query := postgres.Datahistoryjobresults(qm.Where("job_id = ?", jobID))
results, err := query.All(context.Background(), db.sql)
if err != nil {
return nil, err
}
var resp []DataHistoryJobResult
for i := range results {
resp = append(resp, DataHistoryJobResult{
ID: results[i].ID,
JobID: results[i].JobID,
IntervalStartDate: results[i].IntervalStartTime,
IntervalEndDate: results[i].IntervalEndTime,
Status: int64(results[i].Status),
Result: results[i].Result.String,
Date: results[i].RunTime,
})
}
return resp, nil
}
func (db *DBService) getJobResultsBetweenSQLite(jobID string, startDate, endDate time.Time) ([]DataHistoryJobResult, error) {
var results []DataHistoryJobResult
query := sqlite3.Datahistoryjobresults(qm.Where("job_id = ? AND run_time BETWEEN ? AND ? ", jobID, startDate.UTC().Format(time.RFC3339), endDate.UTC().Format(time.RFC3339)))
resp, err := query.All(context.Background(), db.sql)
if err != nil {
return results, err
}
for i := range resp {
var start, end, run time.Time
start, err = time.Parse(time.RFC3339, resp[i].IntervalStartTime)
if err != nil {
return nil, err
}
end, err = time.Parse(time.RFC3339, resp[i].IntervalEndTime)
if err != nil {
return nil, err
}
run, err = time.Parse(time.RFC3339, resp[i].RunTime)
if err != nil {
return nil, err
}
results = append(results, DataHistoryJobResult{
ID: resp[i].ID,
JobID: resp[i].JobID,
IntervalStartDate: start,
IntervalEndDate: end,
Status: int64(resp[i].Status),
Result: resp[i].Result.String,
Date: run,
})
}
return results, nil
}
func (db *DBService) getJobResultsBetweenPostgres(jobID string, startDate, endDate time.Time) ([]DataHistoryJobResult, error) {
var jobs []DataHistoryJobResult
query := postgres.Datahistoryjobresults(qm.Where("job_id = ? AND run_time BETWEEN ? AND ? ", jobID, startDate, endDate))
results, err := query.All(context.Background(), db.sql)
if err != nil {
return jobs, err
}
for i := range results {
jobs = append(jobs, DataHistoryJobResult{
ID: results[i].ID,
JobID: results[i].JobID,
IntervalStartDate: results[i].IntervalStartTime,
IntervalEndDate: results[i].IntervalEndTime,
Status: int64(results[i].Status),
Result: results[i].Result.String,
Date: results[i].RunTime,
})
}
return jobs, nil
}

View File

@@ -0,0 +1,200 @@
package datahistoryjobresult
import (
"database/sql"
"fmt"
"io/ioutil"
"log"
"os"
"testing"
"time"
"github.com/gofrs/uuid"
"github.com/thrasher-corp/gocryptotrader/database"
"github.com/thrasher-corp/gocryptotrader/database/drivers"
"github.com/thrasher-corp/gocryptotrader/database/repository/exchange"
"github.com/thrasher-corp/gocryptotrader/database/testhelpers"
)
var (
verbose = false
testExchanges = []exchange.Details{
{
Name: "one",
},
{
Name: "two",
},
}
)
func TestMain(m *testing.M) {
if verbose {
testhelpers.EnableVerboseTestOutput()
}
var err error
testhelpers.PostgresTestDatabase = testhelpers.GetConnectionDetails()
testhelpers.TempDir, err = ioutil.TempDir("", "gct-temp")
if err != nil {
log.Fatal(err)
}
t := m.Run()
err = os.RemoveAll(testhelpers.TempDir)
if err != nil {
fmt.Printf("Failed to remove temp db file: %v", err)
}
os.Exit(t)
}
func seedDB() error {
err := exchange.InsertMany(testExchanges)
if err != nil {
return err
}
for i := range testExchanges {
lol, err := exchange.One(testExchanges[i].Name)
if err != nil {
return err
}
testExchanges[i].UUID = lol.UUID
}
return nil
}
func TestDataHistoryJob(t *testing.T) {
testCases := []struct {
name string
config *database.Config
seedDB func() error
runner func(t *testing.T)
closer func(dbConn *database.Instance) error
}{
{
name: "postgresql",
config: testhelpers.PostgresTestDatabase,
seedDB: seedDB,
},
{
name: "SQLite",
config: &database.Config{
Driver: database.DBSQLite3,
ConnectionDetails: drivers.ConnectionDetails{Database: "./testdb"},
},
seedDB: seedDB,
},
}
for x := range testCases {
test := testCases[x]
t.Run(test.name, func(t *testing.T) {
if !testhelpers.CheckValidConfig(&test.config.ConnectionDetails) {
t.Skip("database not configured skipping test")
}
dbConn, err := testhelpers.ConnectToDatabase(test.config)
if err != nil {
t.Fatal(err)
}
if test.seedDB != nil {
err = test.seedDB()
if err != nil {
t.Error(err)
}
}
db, err := Setup(dbConn)
if err != nil {
t.Fatal(err)
}
// postgres requires job for tests to function
var id string
if test.name == "postgresql" {
var selectID *sql.Rows
selectID, err = db.sql.Query("select id from datahistoryjob where nickname = 'testdatahistoryjob1'")
if err != nil {
t.Fatal(err)
}
defer func() {
err = selectID.Close()
if err != nil {
t.Fatal(err)
}
if selectID.Err() != nil {
t.Fatal(selectID.Err())
}
}()
selectID.Next()
err = selectID.Scan(&id)
if err != nil {
t.Error(err)
}
}
var resulterinos, resultaroos []*DataHistoryJobResult
for i := 0; i < 20; i++ {
uu, _ := uuid.NewV4()
resulterinos = append(resulterinos, &DataHistoryJobResult{
ID: uu.String(),
JobID: id,
IntervalStartDate: time.Now(),
IntervalEndDate: time.Now().Add(time.Second),
Status: 0,
Result: "Yay",
Date: time.Now(),
})
}
err = db.Upsert(resulterinos...)
if err != nil {
t.Fatal(err)
}
// insert the same results to test conflict resolution
for i := 0; i < 20; i++ {
uu, _ := uuid.NewV4()
j := &DataHistoryJobResult{
ID: uu.String(),
JobID: id,
IntervalStartDate: time.Now(),
IntervalEndDate: time.Now().Add(time.Second),
Status: 0,
Result: "Wow",
Date: time.Now(),
}
if i == 19 {
j.Status = 1
j.Date = time.Now().Add(time.Hour * 24)
}
resultaroos = append(resultaroos, j)
}
err = db.Upsert(resultaroos...)
if err != nil {
t.Fatal(err)
}
results, err := db.GetByJobID(id)
if err != nil {
t.Fatal(err)
}
if len(results) == 0 {
t.Error("expected job results")
}
results, err = db.GetJobResultsBetween(id, time.Now().Add(time.Hour*23), time.Now().Add(time.Hour*25))
if err != nil {
t.Fatal(err)
}
if len(results) == 0 {
t.Errorf("expected job result, received %v", len(results))
}
err = testhelpers.CloseDatabase(dbConn)
if err != nil {
t.Error(err)
}
})
}
}

View File

@@ -0,0 +1,33 @@
package datahistoryjobresult
import (
"time"
"github.com/thrasher-corp/gocryptotrader/database"
)
// DataHistoryJobResult is a DTO for database data
type DataHistoryJobResult struct {
ID string
JobID string
IntervalStartDate time.Time
IntervalEndDate time.Time
Status int64
Result string
Date time.Time
}
// DBService is a service which allows the interaction with
// the database without a direct reference to a global
type DBService struct {
sql database.ISQL
driver string
}
// IDBService allows using data history job result database service
// without needing to care about implementation
type IDBService interface {
Upsert(jobs ...*DataHistoryJobResult) error
GetByJobID(jobID string) ([]DataHistoryJobResult, error)
GetJobResultsBetween(jobID string, startDate, endDate time.Time) ([]DataHistoryJobResult, error)
}

View File

@@ -8,7 +8,7 @@ import (
)
var (
exchangeCache = cache.New(10)
exchangeCache = cache.New(30)
// ErrNoExchangeFound is a basic predefined error
ErrNoExchangeFound = errors.New("exchange not found")
)

View File

@@ -32,9 +32,10 @@ func TestGetSQLDialect(t *testing.T) {
test := testCases[x]
t.Run(test.driver, func(t *testing.T) {
err := database.DB.SetConfig(&database.Config{
cfg := &database.Config{
Driver: test.driver,
})
}
err := database.DB.SetConfig(cfg)
if err != nil {
t.Error(err)
}

View File

@@ -10,10 +10,11 @@ import (
"github.com/gofrs/uuid"
"github.com/thrasher-corp/gocryptotrader/database"
modelPSQL "github.com/thrasher-corp/gocryptotrader/database/models/postgres"
modelSQLite "github.com/thrasher-corp/gocryptotrader/database/models/sqlite3"
"github.com/thrasher-corp/gocryptotrader/database/models/postgres"
"github.com/thrasher-corp/gocryptotrader/database/models/sqlite3"
"github.com/thrasher-corp/gocryptotrader/database/repository"
"github.com/thrasher-corp/gocryptotrader/database/repository/exchange"
"github.com/thrasher-corp/gocryptotrader/exchanges/kline"
"github.com/thrasher-corp/gocryptotrader/log"
"github.com/thrasher-corp/sqlboiler/boil"
"github.com/thrasher-corp/sqlboiler/queries/qm"
@@ -61,6 +62,89 @@ func Insert(trades ...Data) error {
return tx.Commit()
}
// VerifyTradeInIntervals will query for ONE trade within each kline interval and verify if data exists
// if it does, it will set the range holder property "HasData" to true
func VerifyTradeInIntervals(exchangeName, assetType, base, quote string, irh *kline.IntervalRangeHolder) error {
ctx := context.Background()
ctx = boil.SkipTimestamps(ctx)
tx, err := database.DB.SQL.BeginTx(ctx, nil)
if err != nil {
return fmt.Errorf("beginTx %w", err)
}
defer func() {
if err != nil {
errRB := tx.Rollback()
if errRB != nil {
log.Errorf(log.DatabaseMgr, "Insert tx.Rollback %v", errRB)
}
}
}()
if repository.GetSQLDialect() == database.DBSQLite3 || repository.GetSQLDialect() == database.DBSQLite {
err = verifyTradeInIntervalsSqlite(ctx, tx, exchangeName, assetType, base, quote, irh)
} else {
err = verifyTradeInIntervalsPostgres(ctx, tx, exchangeName, assetType, base, quote, irh)
}
if err != nil {
return err
}
return tx.Commit()
}
func verifyTradeInIntervalsSqlite(ctx context.Context, tx *sql.Tx, exchangeName, assetType, base, quote string, irh *kline.IntervalRangeHolder) error {
exch, err := sqlite3.Exchanges(qm.Where("name = ?", exchangeName)).One(ctx, tx)
if err != nil {
return err
}
for i := range irh.Ranges {
for j := range irh.Ranges[i].Intervals {
result, err := sqlite3.Trades(qm.Where("exchange_name_id = ? AND asset = ? AND base = ? AND quote = ? AND timestamp between ? AND ?",
exch.ID,
assetType,
base,
quote,
irh.Ranges[i].Intervals[j].Start.Time.UTC().Format(time.RFC3339),
irh.Ranges[i].Intervals[j].End.Time.UTC().Format(time.RFC3339))).One(ctx, tx)
if err != nil {
return err
}
if result != nil {
irh.Ranges[i].Intervals[j].HasData = true
}
}
}
return nil
}
func verifyTradeInIntervalsPostgres(ctx context.Context, tx *sql.Tx, exchangeName, assetType, base, quote string, irh *kline.IntervalRangeHolder) error {
exch, err := postgres.Exchanges(qm.Where("name = ?", exchangeName)).One(ctx, tx)
if err != nil {
return err
}
for i := range irh.Ranges {
for j := range irh.Ranges[i].Intervals {
result, err := postgres.Trades(qm.Where("exchange_name_id = ? AND asset = ? AND base = ? AND quote = ? timestamp between ? AND ?",
exch.ID,
assetType,
base,
quote,
irh.Ranges[i].Intervals[j].Start.Time.UTC().Format(time.RFC3339),
irh.Ranges[i].Intervals[j].End.Time.UTC().Format(time.RFC3339))).One(ctx, tx)
if err != nil {
return err
}
if result != nil {
irh.Ranges[i].Intervals[j].HasData = true
}
}
}
return nil
}
func insertSQLite(ctx context.Context, tx *sql.Tx, trades ...Data) error {
for i := range trades {
if trades[i].ID == "" {
@@ -70,7 +154,7 @@ func insertSQLite(ctx context.Context, tx *sql.Tx, trades ...Data) error {
}
trades[i].ID = freshUUID.String()
}
var tempEvent = modelSQLite.Trade{
var tempEvent = sqlite3.Trade{
ID: trades[i].ID,
ExchangeNameID: trades[i].ExchangeNameID,
Base: strings.ToUpper(trades[i].Base),
@@ -106,7 +190,7 @@ func insertPostgres(ctx context.Context, tx *sql.Tx, trades ...Data) error {
}
trades[i].ID = freshUUID.String()
}
var tempEvent = modelPSQL.Trade{
var tempEvent = postgres.Trade{
ExchangeNameID: trades[i].ExchangeNameID,
Base: strings.ToUpper(trades[i].Base),
Quote: strings.ToUpper(trades[i].Quote),
@@ -152,7 +236,7 @@ func GetByUUID(uuid string) (td Data, err error) {
func getByUUIDSQLite(uuid string) (Data, error) {
var td Data
var ts time.Time
query := modelSQLite.Trades(qm.Where("id = ?", uuid))
query := sqlite3.Trades(qm.Where("id = ?", uuid))
result, err := query.One(context.Background(), database.DB.SQL)
if err != nil {
return td, err
@@ -179,8 +263,8 @@ func getByUUIDSQLite(uuid string) (Data, error) {
}
func getByUUIDPostgres(uuid string) (td Data, err error) {
query := modelPSQL.Trades(qm.Where("id = ?", uuid))
var result *modelPSQL.Trade
query := postgres.Trades(qm.Where("id = ?", uuid))
var result *postgres.Trade
result, err = query.One(context.Background(), database.DB.SQL)
if err != nil {
return td, err
@@ -232,8 +316,8 @@ func getInRangeSQLite(exchangeName, assetType, base, quote string, startDate, en
"quote": strings.ToUpper(quote),
}
q := generateQuery(wheres, startDate, endDate)
query := modelSQLite.Trades(q...)
var result []*modelSQLite.Trade
query := sqlite3.Trades(q...)
var result []*sqlite3.Trade
result, err = query.All(context.Background(), database.DB.SQL)
if err != nil {
return td, err
@@ -274,8 +358,8 @@ func getInRangePostgres(exchangeName, assetType, base, quote string, startDate,
"quote": strings.ToUpper(quote),
}
q := generateQuery(wheres, startDate, endDate)
query := modelPSQL.Trades(q...)
var result []*modelPSQL.Trade
query := postgres.Trades(q...)
var result []*postgres.Trade
result, err = query.All(context.Background(), database.DB.SQL)
if err != nil {
return td, err
@@ -333,7 +417,7 @@ func deleteTradesSQLite(ctx context.Context, tx *sql.Tx, trades ...Data) error {
for i := range trades {
tradeIDs = append(tradeIDs, trades[i].ID)
}
query := modelSQLite.Trades(qm.WhereIn(`id in ?`, tradeIDs...))
query := sqlite3.Trades(qm.WhereIn(`id in ?`, tradeIDs...))
_, err := query.DeleteAll(ctx, tx)
return err
}
@@ -343,7 +427,7 @@ func deleteTradesPostgres(ctx context.Context, tx *sql.Tx, trades ...Data) error
for i := range trades {
tradeIDs = append(tradeIDs, trades[i].ID)
}
query := modelPSQL.Trades(qm.WhereIn(`id in ?`, tradeIDs...))
query := postgres.Trades(qm.WhereIn(`id in ?`, tradeIDs...))
_, err := query.DeleteAll(ctx, tx)
return err
}

View File

@@ -15,6 +15,7 @@ import (
"github.com/thrasher-corp/gocryptotrader/database/repository/exchange"
"github.com/thrasher-corp/gocryptotrader/database/testhelpers"
"github.com/thrasher-corp/gocryptotrader/exchanges/asset"
"github.com/thrasher-corp/gocryptotrader/exchanges/kline"
"github.com/thrasher-corp/gocryptotrader/exchanges/order"
)
@@ -97,12 +98,12 @@ func TestTrades(t *testing.T) {
func tradeSQLTester(t *testing.T) {
var trades, trades2 []Data
firstTime := time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC)
for i := 0; i < 20; i++ {
uu, _ := uuid.NewV4()
trades = append(trades, Data{
ID: uu.String(),
Timestamp: time.Now(),
Timestamp: firstTime.Add(time.Minute * time.Duration(i)),
Exchange: testExchanges[0].Name,
Base: currency.BTC.String(),
Quote: currency.USD.String(),
@@ -122,7 +123,7 @@ func tradeSQLTester(t *testing.T) {
uu, _ := uuid.NewV4()
trades2 = append(trades2, Data{
ID: uu.String(),
Timestamp: time.Now(),
Timestamp: firstTime.Add(time.Minute * time.Duration(i)),
Exchange: testExchanges[0].Name,
Base: currency.BTC.String(),
Quote: currency.USD.String(),
@@ -142,8 +143,8 @@ func tradeSQLTester(t *testing.T) {
asset.Spot.String(),
currency.BTC.String(),
currency.USD.String(),
time.Now().Add(-time.Hour),
time.Now().Add(time.Hour),
firstTime.Add(-time.Hour),
firstTime.Add(time.Hour),
)
if err != nil {
t.Error(err)
@@ -157,8 +158,8 @@ func tradeSQLTester(t *testing.T) {
asset.Spot.String(),
currency.BTC.String(),
currency.USD.String(),
time.Now().Add(-time.Hour),
time.Now().Add(time.Hour))
firstTime.Add(-time.Hour),
firstTime.Add(time.Hour))
if err != nil {
t.Error(err)
}
@@ -166,6 +167,24 @@ func tradeSQLTester(t *testing.T) {
t.Error("Bad get!")
}
ranges, err := kline.CalculateCandleDateRanges(firstTime, firstTime.Add(20*time.Minute), kline.OneMin, 100)
if err != nil {
t.Error(err)
}
err = VerifyTradeInIntervals(testExchanges[0].Name,
asset.Spot.String(),
currency.BTC.String(),
currency.USD.String(),
ranges)
if err != nil {
t.Error(err)
}
if !ranges.HasDataAtDate(firstTime) {
t.Error("expected data")
}
err = DeleteTrades(trades...)
if err != nil {
t.Error(err)

View File

@@ -80,13 +80,13 @@ func ConnectToDatabase(conn *database.Config) (dbConn *database.Instance, err er
return nil, err
}
if conn.Driver == database.DBPostgreSQL {
dbConn, err = psqlConn.Connect()
dbConn, err = psqlConn.Connect(conn)
if err != nil {
return nil, err
}
} else if conn.Driver == database.DBSQLite3 || conn.Driver == database.DBSQLite {
database.DB.DataPath = TempDir
dbConn, err = sqliteConn.Connect()
dbConn, err = sqliteConn.Connect(conn.Database)
if err != nil {
return nil, err
}
@@ -96,7 +96,7 @@ func ConnectToDatabase(conn *database.Config) (dbConn *database.Instance, err er
if err != nil {
return nil, err
}
database.DB.SetConnected(true)
return
}