mirror of
https://github.com/d0zingcat/gocryptotrader.git
synced 2026-05-16 15:09:57 +00:00
* Adds lovely initial concept for historical data doer
* Adds ability to save tasks. Adds config. Adds startStop to engine
* Has a database microservice without use of globals! Further infrastructure design. Adds readme
* Commentary to help design
* Adds migrations for database
* readme and adds database models
* Some modelling that doesn't work end of day
* Completes datahistoryjob sql.Begins datahistoryjobresult
* Adds datahistoryjob functions to retreive job results. Adapts subsystem
* Adds process for upserting jobs and job results to the database
* Broken end of day weird sqlboiler crap
* Fixes issue with SQL generation.
* RPC generation and addition of basic upsert command
* Renames types
* Adds rpc functions
* quick commit before context swithc. Exchanges aren't being populated
* Begin the tests!
* complete sql tests. stop failed jobs. CLI command creation
* Defines rpc commands
* Fleshes out RPC implementation
* Expands testing
* Expands testing, removes double remove
* Adds coverage of data history subsystem, expands errors and nil checks
* Minor logic improvement
* streamlines datahistory test setup
* End of day minor linting
* Lint, convert simplify, rpc expansion, type expansion, readme expansion
* Documentation update
* Renames for consistency
* Completes RPC server commands
* Fixes tests
* Speeds up testing by reducing unnecessary actions. Adds maxjobspercycle config
* Comments for everything
* Adds missing result string. checks interval supported. default start end cli
* Fixes ID problem. Improves binance trade fetch. job ranges are processed
* adds dbservice coverage. adds rpcserver coverage
* docs regen, uses dbcon interface, reverts binance, fixes races, toggle manager
* Speed up tests, remove bad global usage, fix uuid check
* Adds verbose. Updates docs. Fixes postgres
* Minor changes to logging and start stop
* Fixes postgres db tests, fixes postgres column typo
* Fixes old string typo,removes constraint,error parsing for nonreaders
* prevents dhm running when table doesn't exist. Adds prereq documentation
* Adds parallel, rmlines, err fix, comment fix, minor param fixes
* doc regen, common time range check and test updating
* Fixes job validation issues. Updates candle range checker.
* Ensures test cannot fail due to time.Now() shenanigans
* Fixes oopsie, adds documentation and a warn
* Fixes another time test, adjusts copy
* Drastically speeds up data history manager tests via function overrides
* Fixes summary bug and better logs
* Fixes local time test, fixes websocket tests
* removes defaults and comment,updates error messages,sets cli command args
* Fixes FTX trade processing
* Fixes issue where jobs got stuck if data wasn't returned but retrieval was successful
* Improves test speed. Simplifies trade verification SQL. Adds command help
* Fixes the oopsies
* Fixes use of query within transaction. Fixes trade err
* oopsie, not needed
* Adds missing data status. Properly ends job even when data is missing
* errors are more verbose and so have more words to describe them
* Doc regen for new status
* tiny test tinkering
* str := string("Removes .String()").String()
* Merge fixups
* Fixes a data race discovered during github actions
* Allows websocket test to pass consistently
* Fixes merge issue preventing datahistorymanager from starting via config
* Niterinos cmd defaults and explanations
* fixes default oopsie
* Fixes lack of nil protection
* Additional oopsie
* More detailed error for validating job exchange
794 lines
23 KiB
Go
794 lines
23 KiB
Go
// Code generated by SQLBoiler 3.5.0-gct (https://github.com/thrasher-corp/sqlboiler). DO NOT EDIT.
|
|
// This file is meant to be re-generated in place and/or deleted at any time.
|
|
|
|
package sqlite3
|
|
|
|
import (
|
|
"bytes"
|
|
"context"
|
|
"reflect"
|
|
"testing"
|
|
|
|
"github.com/thrasher-corp/sqlboiler/boil"
|
|
"github.com/thrasher-corp/sqlboiler/queries"
|
|
"github.com/thrasher-corp/sqlboiler/randomize"
|
|
"github.com/thrasher-corp/sqlboiler/strmangle"
|
|
)
|
|
|
|
var (
|
|
// Relationships sometimes use the reflection helper queries.Equal/queries.Assign
|
|
// so force a package dependency in case they don't.
|
|
_ = queries.Equal
|
|
)
|
|
|
|
func testDatahistoryjobresults(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
query := Datahistoryjobresults()
|
|
|
|
if query.Query == nil {
|
|
t.Error("expected a query, got nothing")
|
|
}
|
|
}
|
|
|
|
func testDatahistoryjobresultsDelete(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
seed := randomize.NewSeed()
|
|
var err error
|
|
o := &Datahistoryjobresult{}
|
|
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
|
|
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
|
|
}
|
|
|
|
ctx := context.Background()
|
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
|
defer func() { _ = tx.Rollback() }()
|
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
|
t.Error(err)
|
|
}
|
|
|
|
if rowsAff, err := o.Delete(ctx, tx); err != nil {
|
|
t.Error(err)
|
|
} else if rowsAff != 1 {
|
|
t.Error("should only have deleted one row, but affected:", rowsAff)
|
|
}
|
|
|
|
count, err := Datahistoryjobresults().Count(ctx, tx)
|
|
if err != nil {
|
|
t.Error(err)
|
|
}
|
|
|
|
if count != 0 {
|
|
t.Error("want zero records, got:", count)
|
|
}
|
|
}
|
|
|
|
func testDatahistoryjobresultsQueryDeleteAll(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
seed := randomize.NewSeed()
|
|
var err error
|
|
o := &Datahistoryjobresult{}
|
|
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
|
|
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
|
|
}
|
|
|
|
ctx := context.Background()
|
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
|
defer func() { _ = tx.Rollback() }()
|
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
|
t.Error(err)
|
|
}
|
|
|
|
if rowsAff, err := Datahistoryjobresults().DeleteAll(ctx, tx); err != nil {
|
|
t.Error(err)
|
|
} else if rowsAff != 1 {
|
|
t.Error("should only have deleted one row, but affected:", rowsAff)
|
|
}
|
|
|
|
count, err := Datahistoryjobresults().Count(ctx, tx)
|
|
if err != nil {
|
|
t.Error(err)
|
|
}
|
|
|
|
if count != 0 {
|
|
t.Error("want zero records, got:", count)
|
|
}
|
|
}
|
|
|
|
func testDatahistoryjobresultsSliceDeleteAll(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
seed := randomize.NewSeed()
|
|
var err error
|
|
o := &Datahistoryjobresult{}
|
|
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
|
|
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
|
|
}
|
|
|
|
ctx := context.Background()
|
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
|
defer func() { _ = tx.Rollback() }()
|
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
|
t.Error(err)
|
|
}
|
|
|
|
slice := DatahistoryjobresultSlice{o}
|
|
|
|
if rowsAff, err := slice.DeleteAll(ctx, tx); err != nil {
|
|
t.Error(err)
|
|
} else if rowsAff != 1 {
|
|
t.Error("should only have deleted one row, but affected:", rowsAff)
|
|
}
|
|
|
|
count, err := Datahistoryjobresults().Count(ctx, tx)
|
|
if err != nil {
|
|
t.Error(err)
|
|
}
|
|
|
|
if count != 0 {
|
|
t.Error("want zero records, got:", count)
|
|
}
|
|
}
|
|
|
|
func testDatahistoryjobresultsExists(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
seed := randomize.NewSeed()
|
|
var err error
|
|
o := &Datahistoryjobresult{}
|
|
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
|
|
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
|
|
}
|
|
|
|
ctx := context.Background()
|
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
|
defer func() { _ = tx.Rollback() }()
|
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
|
t.Error(err)
|
|
}
|
|
|
|
e, err := DatahistoryjobresultExists(ctx, tx, o.ID)
|
|
if err != nil {
|
|
t.Errorf("Unable to check if Datahistoryjobresult exists: %s", err)
|
|
}
|
|
if !e {
|
|
t.Errorf("Expected DatahistoryjobresultExists to return true, but got false.")
|
|
}
|
|
}
|
|
|
|
func testDatahistoryjobresultsFind(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
seed := randomize.NewSeed()
|
|
var err error
|
|
o := &Datahistoryjobresult{}
|
|
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
|
|
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
|
|
}
|
|
|
|
ctx := context.Background()
|
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
|
defer func() { _ = tx.Rollback() }()
|
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
|
t.Error(err)
|
|
}
|
|
|
|
datahistoryjobresultFound, err := FindDatahistoryjobresult(ctx, tx, o.ID)
|
|
if err != nil {
|
|
t.Error(err)
|
|
}
|
|
|
|
if datahistoryjobresultFound == nil {
|
|
t.Error("want a record, got nil")
|
|
}
|
|
}
|
|
|
|
func testDatahistoryjobresultsBind(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
seed := randomize.NewSeed()
|
|
var err error
|
|
o := &Datahistoryjobresult{}
|
|
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
|
|
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
|
|
}
|
|
|
|
ctx := context.Background()
|
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
|
defer func() { _ = tx.Rollback() }()
|
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
|
t.Error(err)
|
|
}
|
|
|
|
if err = Datahistoryjobresults().Bind(ctx, tx, o); err != nil {
|
|
t.Error(err)
|
|
}
|
|
}
|
|
|
|
func testDatahistoryjobresultsOne(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
seed := randomize.NewSeed()
|
|
var err error
|
|
o := &Datahistoryjobresult{}
|
|
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
|
|
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
|
|
}
|
|
|
|
ctx := context.Background()
|
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
|
defer func() { _ = tx.Rollback() }()
|
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
|
t.Error(err)
|
|
}
|
|
|
|
if x, err := Datahistoryjobresults().One(ctx, tx); err != nil {
|
|
t.Error(err)
|
|
} else if x == nil {
|
|
t.Error("expected to get a non nil record")
|
|
}
|
|
}
|
|
|
|
func testDatahistoryjobresultsAll(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
seed := randomize.NewSeed()
|
|
var err error
|
|
datahistoryjobresultOne := &Datahistoryjobresult{}
|
|
datahistoryjobresultTwo := &Datahistoryjobresult{}
|
|
if err = randomize.Struct(seed, datahistoryjobresultOne, datahistoryjobresultDBTypes, false, datahistoryjobresultColumnsWithDefault...); err != nil {
|
|
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
|
|
}
|
|
if err = randomize.Struct(seed, datahistoryjobresultTwo, datahistoryjobresultDBTypes, false, datahistoryjobresultColumnsWithDefault...); err != nil {
|
|
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
|
|
}
|
|
|
|
ctx := context.Background()
|
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
|
defer func() { _ = tx.Rollback() }()
|
|
if err = datahistoryjobresultOne.Insert(ctx, tx, boil.Infer()); err != nil {
|
|
t.Error(err)
|
|
}
|
|
if err = datahistoryjobresultTwo.Insert(ctx, tx, boil.Infer()); err != nil {
|
|
t.Error(err)
|
|
}
|
|
|
|
slice, err := Datahistoryjobresults().All(ctx, tx)
|
|
if err != nil {
|
|
t.Error(err)
|
|
}
|
|
|
|
if len(slice) != 2 {
|
|
t.Error("want 2 records, got:", len(slice))
|
|
}
|
|
}
|
|
|
|
func testDatahistoryjobresultsCount(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
var err error
|
|
seed := randomize.NewSeed()
|
|
datahistoryjobresultOne := &Datahistoryjobresult{}
|
|
datahistoryjobresultTwo := &Datahistoryjobresult{}
|
|
if err = randomize.Struct(seed, datahistoryjobresultOne, datahistoryjobresultDBTypes, false, datahistoryjobresultColumnsWithDefault...); err != nil {
|
|
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
|
|
}
|
|
if err = randomize.Struct(seed, datahistoryjobresultTwo, datahistoryjobresultDBTypes, false, datahistoryjobresultColumnsWithDefault...); err != nil {
|
|
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
|
|
}
|
|
|
|
ctx := context.Background()
|
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
|
defer func() { _ = tx.Rollback() }()
|
|
if err = datahistoryjobresultOne.Insert(ctx, tx, boil.Infer()); err != nil {
|
|
t.Error(err)
|
|
}
|
|
if err = datahistoryjobresultTwo.Insert(ctx, tx, boil.Infer()); err != nil {
|
|
t.Error(err)
|
|
}
|
|
|
|
count, err := Datahistoryjobresults().Count(ctx, tx)
|
|
if err != nil {
|
|
t.Error(err)
|
|
}
|
|
|
|
if count != 2 {
|
|
t.Error("want 2 records, got:", count)
|
|
}
|
|
}
|
|
|
|
func datahistoryjobresultBeforeInsertHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjobresult) error {
|
|
*o = Datahistoryjobresult{}
|
|
return nil
|
|
}
|
|
|
|
func datahistoryjobresultAfterInsertHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjobresult) error {
|
|
*o = Datahistoryjobresult{}
|
|
return nil
|
|
}
|
|
|
|
func datahistoryjobresultAfterSelectHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjobresult) error {
|
|
*o = Datahistoryjobresult{}
|
|
return nil
|
|
}
|
|
|
|
func datahistoryjobresultBeforeUpdateHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjobresult) error {
|
|
*o = Datahistoryjobresult{}
|
|
return nil
|
|
}
|
|
|
|
func datahistoryjobresultAfterUpdateHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjobresult) error {
|
|
*o = Datahistoryjobresult{}
|
|
return nil
|
|
}
|
|
|
|
func datahistoryjobresultBeforeDeleteHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjobresult) error {
|
|
*o = Datahistoryjobresult{}
|
|
return nil
|
|
}
|
|
|
|
func datahistoryjobresultAfterDeleteHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjobresult) error {
|
|
*o = Datahistoryjobresult{}
|
|
return nil
|
|
}
|
|
|
|
func datahistoryjobresultBeforeUpsertHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjobresult) error {
|
|
*o = Datahistoryjobresult{}
|
|
return nil
|
|
}
|
|
|
|
func datahistoryjobresultAfterUpsertHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjobresult) error {
|
|
*o = Datahistoryjobresult{}
|
|
return nil
|
|
}
|
|
|
|
func testDatahistoryjobresultsHooks(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
var err error
|
|
|
|
ctx := context.Background()
|
|
empty := &Datahistoryjobresult{}
|
|
o := &Datahistoryjobresult{}
|
|
|
|
seed := randomize.NewSeed()
|
|
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, false); err != nil {
|
|
t.Errorf("Unable to randomize Datahistoryjobresult object: %s", err)
|
|
}
|
|
|
|
AddDatahistoryjobresultHook(boil.BeforeInsertHook, datahistoryjobresultBeforeInsertHook)
|
|
if err = o.doBeforeInsertHooks(ctx, nil); err != nil {
|
|
t.Errorf("Unable to execute doBeforeInsertHooks: %s", err)
|
|
}
|
|
if !reflect.DeepEqual(o, empty) {
|
|
t.Errorf("Expected BeforeInsertHook function to empty object, but got: %#v", o)
|
|
}
|
|
datahistoryjobresultBeforeInsertHooks = []DatahistoryjobresultHook{}
|
|
|
|
AddDatahistoryjobresultHook(boil.AfterInsertHook, datahistoryjobresultAfterInsertHook)
|
|
if err = o.doAfterInsertHooks(ctx, nil); err != nil {
|
|
t.Errorf("Unable to execute doAfterInsertHooks: %s", err)
|
|
}
|
|
if !reflect.DeepEqual(o, empty) {
|
|
t.Errorf("Expected AfterInsertHook function to empty object, but got: %#v", o)
|
|
}
|
|
datahistoryjobresultAfterInsertHooks = []DatahistoryjobresultHook{}
|
|
|
|
AddDatahistoryjobresultHook(boil.AfterSelectHook, datahistoryjobresultAfterSelectHook)
|
|
if err = o.doAfterSelectHooks(ctx, nil); err != nil {
|
|
t.Errorf("Unable to execute doAfterSelectHooks: %s", err)
|
|
}
|
|
if !reflect.DeepEqual(o, empty) {
|
|
t.Errorf("Expected AfterSelectHook function to empty object, but got: %#v", o)
|
|
}
|
|
datahistoryjobresultAfterSelectHooks = []DatahistoryjobresultHook{}
|
|
|
|
AddDatahistoryjobresultHook(boil.BeforeUpdateHook, datahistoryjobresultBeforeUpdateHook)
|
|
if err = o.doBeforeUpdateHooks(ctx, nil); err != nil {
|
|
t.Errorf("Unable to execute doBeforeUpdateHooks: %s", err)
|
|
}
|
|
if !reflect.DeepEqual(o, empty) {
|
|
t.Errorf("Expected BeforeUpdateHook function to empty object, but got: %#v", o)
|
|
}
|
|
datahistoryjobresultBeforeUpdateHooks = []DatahistoryjobresultHook{}
|
|
|
|
AddDatahistoryjobresultHook(boil.AfterUpdateHook, datahistoryjobresultAfterUpdateHook)
|
|
if err = o.doAfterUpdateHooks(ctx, nil); err != nil {
|
|
t.Errorf("Unable to execute doAfterUpdateHooks: %s", err)
|
|
}
|
|
if !reflect.DeepEqual(o, empty) {
|
|
t.Errorf("Expected AfterUpdateHook function to empty object, but got: %#v", o)
|
|
}
|
|
datahistoryjobresultAfterUpdateHooks = []DatahistoryjobresultHook{}
|
|
|
|
AddDatahistoryjobresultHook(boil.BeforeDeleteHook, datahistoryjobresultBeforeDeleteHook)
|
|
if err = o.doBeforeDeleteHooks(ctx, nil); err != nil {
|
|
t.Errorf("Unable to execute doBeforeDeleteHooks: %s", err)
|
|
}
|
|
if !reflect.DeepEqual(o, empty) {
|
|
t.Errorf("Expected BeforeDeleteHook function to empty object, but got: %#v", o)
|
|
}
|
|
datahistoryjobresultBeforeDeleteHooks = []DatahistoryjobresultHook{}
|
|
|
|
AddDatahistoryjobresultHook(boil.AfterDeleteHook, datahistoryjobresultAfterDeleteHook)
|
|
if err = o.doAfterDeleteHooks(ctx, nil); err != nil {
|
|
t.Errorf("Unable to execute doAfterDeleteHooks: %s", err)
|
|
}
|
|
if !reflect.DeepEqual(o, empty) {
|
|
t.Errorf("Expected AfterDeleteHook function to empty object, but got: %#v", o)
|
|
}
|
|
datahistoryjobresultAfterDeleteHooks = []DatahistoryjobresultHook{}
|
|
|
|
AddDatahistoryjobresultHook(boil.BeforeUpsertHook, datahistoryjobresultBeforeUpsertHook)
|
|
if err = o.doBeforeUpsertHooks(ctx, nil); err != nil {
|
|
t.Errorf("Unable to execute doBeforeUpsertHooks: %s", err)
|
|
}
|
|
if !reflect.DeepEqual(o, empty) {
|
|
t.Errorf("Expected BeforeUpsertHook function to empty object, but got: %#v", o)
|
|
}
|
|
datahistoryjobresultBeforeUpsertHooks = []DatahistoryjobresultHook{}
|
|
|
|
AddDatahistoryjobresultHook(boil.AfterUpsertHook, datahistoryjobresultAfterUpsertHook)
|
|
if err = o.doAfterUpsertHooks(ctx, nil); err != nil {
|
|
t.Errorf("Unable to execute doAfterUpsertHooks: %s", err)
|
|
}
|
|
if !reflect.DeepEqual(o, empty) {
|
|
t.Errorf("Expected AfterUpsertHook function to empty object, but got: %#v", o)
|
|
}
|
|
datahistoryjobresultAfterUpsertHooks = []DatahistoryjobresultHook{}
|
|
}
|
|
|
|
func testDatahistoryjobresultsInsert(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
seed := randomize.NewSeed()
|
|
var err error
|
|
o := &Datahistoryjobresult{}
|
|
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
|
|
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
|
|
}
|
|
|
|
ctx := context.Background()
|
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
|
defer func() { _ = tx.Rollback() }()
|
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
|
t.Error(err)
|
|
}
|
|
|
|
count, err := Datahistoryjobresults().Count(ctx, tx)
|
|
if err != nil {
|
|
t.Error(err)
|
|
}
|
|
|
|
if count != 1 {
|
|
t.Error("want one record, got:", count)
|
|
}
|
|
}
|
|
|
|
func testDatahistoryjobresultsInsertWhitelist(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
seed := randomize.NewSeed()
|
|
var err error
|
|
o := &Datahistoryjobresult{}
|
|
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true); err != nil {
|
|
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
|
|
}
|
|
|
|
ctx := context.Background()
|
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
|
defer func() { _ = tx.Rollback() }()
|
|
if err = o.Insert(ctx, tx, boil.Whitelist(datahistoryjobresultColumnsWithoutDefault...)); err != nil {
|
|
t.Error(err)
|
|
}
|
|
|
|
count, err := Datahistoryjobresults().Count(ctx, tx)
|
|
if err != nil {
|
|
t.Error(err)
|
|
}
|
|
|
|
if count != 1 {
|
|
t.Error("want one record, got:", count)
|
|
}
|
|
}
|
|
|
|
func testDatahistoryjobresultToOneDatahistoryjobUsingJob(t *testing.T) {
|
|
ctx := context.Background()
|
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
|
defer func() { _ = tx.Rollback() }()
|
|
|
|
var local Datahistoryjobresult
|
|
var foreign Datahistoryjob
|
|
|
|
seed := randomize.NewSeed()
|
|
if err := randomize.Struct(seed, &local, datahistoryjobresultDBTypes, false, datahistoryjobresultColumnsWithDefault...); err != nil {
|
|
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
|
|
}
|
|
if err := randomize.Struct(seed, &foreign, datahistoryjobDBTypes, false, datahistoryjobColumnsWithDefault...); err != nil {
|
|
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
|
|
}
|
|
|
|
if err := foreign.Insert(ctx, tx, boil.Infer()); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
local.JobID = foreign.ID
|
|
if err := local.Insert(ctx, tx, boil.Infer()); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
check, err := local.Job().One(ctx, tx)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
if check.ID != foreign.ID {
|
|
t.Errorf("want: %v, got %v", foreign.ID, check.ID)
|
|
}
|
|
|
|
slice := DatahistoryjobresultSlice{&local}
|
|
if err = local.L.LoadJob(ctx, tx, false, (*[]*Datahistoryjobresult)(&slice), nil); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if local.R.Job == nil {
|
|
t.Error("struct should have been eager loaded")
|
|
}
|
|
|
|
local.R.Job = nil
|
|
if err = local.L.LoadJob(ctx, tx, true, &local, nil); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if local.R.Job == nil {
|
|
t.Error("struct should have been eager loaded")
|
|
}
|
|
}
|
|
|
|
func testDatahistoryjobresultToOneSetOpDatahistoryjobUsingJob(t *testing.T) {
|
|
var err error
|
|
|
|
ctx := context.Background()
|
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
|
defer func() { _ = tx.Rollback() }()
|
|
|
|
var a Datahistoryjobresult
|
|
var b, c Datahistoryjob
|
|
|
|
seed := randomize.NewSeed()
|
|
if err = randomize.Struct(seed, &a, datahistoryjobresultDBTypes, false, strmangle.SetComplement(datahistoryjobresultPrimaryKeyColumns, datahistoryjobresultColumnsWithoutDefault)...); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err = randomize.Struct(seed, &b, datahistoryjobDBTypes, false, strmangle.SetComplement(datahistoryjobPrimaryKeyColumns, datahistoryjobColumnsWithoutDefault)...); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err = randomize.Struct(seed, &c, datahistoryjobDBTypes, false, strmangle.SetComplement(datahistoryjobPrimaryKeyColumns, datahistoryjobColumnsWithoutDefault)...); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
if err := a.Insert(ctx, tx, boil.Infer()); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if err = b.Insert(ctx, tx, boil.Infer()); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
for i, x := range []*Datahistoryjob{&b, &c} {
|
|
err = a.SetJob(ctx, tx, i != 0, x)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
if a.R.Job != x {
|
|
t.Error("relationship struct not set to correct value")
|
|
}
|
|
|
|
if x.R.JobDatahistoryjobresults[0] != &a {
|
|
t.Error("failed to append to foreign relationship struct")
|
|
}
|
|
if a.JobID != x.ID {
|
|
t.Error("foreign key was wrong value", a.JobID)
|
|
}
|
|
|
|
zero := reflect.Zero(reflect.TypeOf(a.JobID))
|
|
reflect.Indirect(reflect.ValueOf(&a.JobID)).Set(zero)
|
|
|
|
if err = a.Reload(ctx, tx); err != nil {
|
|
t.Fatal("failed to reload", err)
|
|
}
|
|
|
|
if a.JobID != x.ID {
|
|
t.Error("foreign key was wrong value", a.JobID, x.ID)
|
|
}
|
|
}
|
|
}
|
|
|
|
func testDatahistoryjobresultsReload(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
seed := randomize.NewSeed()
|
|
var err error
|
|
o := &Datahistoryjobresult{}
|
|
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
|
|
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
|
|
}
|
|
|
|
ctx := context.Background()
|
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
|
defer func() { _ = tx.Rollback() }()
|
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
|
t.Error(err)
|
|
}
|
|
|
|
if err = o.Reload(ctx, tx); err != nil {
|
|
t.Error(err)
|
|
}
|
|
}
|
|
|
|
func testDatahistoryjobresultsReloadAll(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
seed := randomize.NewSeed()
|
|
var err error
|
|
o := &Datahistoryjobresult{}
|
|
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
|
|
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
|
|
}
|
|
|
|
ctx := context.Background()
|
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
|
defer func() { _ = tx.Rollback() }()
|
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
|
t.Error(err)
|
|
}
|
|
|
|
slice := DatahistoryjobresultSlice{o}
|
|
|
|
if err = slice.ReloadAll(ctx, tx); err != nil {
|
|
t.Error(err)
|
|
}
|
|
}
|
|
|
|
func testDatahistoryjobresultsSelect(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
seed := randomize.NewSeed()
|
|
var err error
|
|
o := &Datahistoryjobresult{}
|
|
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
|
|
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
|
|
}
|
|
|
|
ctx := context.Background()
|
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
|
defer func() { _ = tx.Rollback() }()
|
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
|
t.Error(err)
|
|
}
|
|
|
|
slice, err := Datahistoryjobresults().All(ctx, tx)
|
|
if err != nil {
|
|
t.Error(err)
|
|
}
|
|
|
|
if len(slice) != 1 {
|
|
t.Error("want one record, got:", len(slice))
|
|
}
|
|
}
|
|
|
|
var (
|
|
datahistoryjobresultDBTypes = map[string]string{`ID`: `TEXT`, `JobID`: `TEXT`, `Result`: `TEXT`, `Status`: `REAL`, `IntervalStartTime`: `TIMESTAMP`, `IntervalEndTime`: `TIMESTAMP`, `RunTime`: `TIMESTAMP`}
|
|
_ = bytes.MinRead
|
|
)
|
|
|
|
func testDatahistoryjobresultsUpdate(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
if 0 == len(datahistoryjobresultPrimaryKeyColumns) {
|
|
t.Skip("Skipping table with no primary key columns")
|
|
}
|
|
if len(datahistoryjobresultAllColumns) == len(datahistoryjobresultPrimaryKeyColumns) {
|
|
t.Skip("Skipping table with only primary key columns")
|
|
}
|
|
|
|
seed := randomize.NewSeed()
|
|
var err error
|
|
o := &Datahistoryjobresult{}
|
|
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
|
|
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
|
|
}
|
|
|
|
ctx := context.Background()
|
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
|
defer func() { _ = tx.Rollback() }()
|
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
|
t.Error(err)
|
|
}
|
|
|
|
count, err := Datahistoryjobresults().Count(ctx, tx)
|
|
if err != nil {
|
|
t.Error(err)
|
|
}
|
|
|
|
if count != 1 {
|
|
t.Error("want one record, got:", count)
|
|
}
|
|
|
|
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultPrimaryKeyColumns...); err != nil {
|
|
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
|
|
}
|
|
|
|
if rowsAff, err := o.Update(ctx, tx, boil.Infer()); err != nil {
|
|
t.Error(err)
|
|
} else if rowsAff != 1 {
|
|
t.Error("should only affect one row but affected", rowsAff)
|
|
}
|
|
}
|
|
|
|
func testDatahistoryjobresultsSliceUpdateAll(t *testing.T) {
|
|
t.Parallel()
|
|
|
|
if len(datahistoryjobresultAllColumns) == len(datahistoryjobresultPrimaryKeyColumns) {
|
|
t.Skip("Skipping table with only primary key columns")
|
|
}
|
|
|
|
seed := randomize.NewSeed()
|
|
var err error
|
|
o := &Datahistoryjobresult{}
|
|
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
|
|
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
|
|
}
|
|
|
|
ctx := context.Background()
|
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
|
defer func() { _ = tx.Rollback() }()
|
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
|
t.Error(err)
|
|
}
|
|
|
|
count, err := Datahistoryjobresults().Count(ctx, tx)
|
|
if err != nil {
|
|
t.Error(err)
|
|
}
|
|
|
|
if count != 1 {
|
|
t.Error("want one record, got:", count)
|
|
}
|
|
|
|
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultPrimaryKeyColumns...); err != nil {
|
|
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
|
|
}
|
|
|
|
// Remove Primary keys and unique columns from what we plan to update
|
|
var fields []string
|
|
if strmangle.StringSliceMatch(datahistoryjobresultAllColumns, datahistoryjobresultPrimaryKeyColumns) {
|
|
fields = datahistoryjobresultAllColumns
|
|
} else {
|
|
fields = strmangle.SetComplement(
|
|
datahistoryjobresultAllColumns,
|
|
datahistoryjobresultPrimaryKeyColumns,
|
|
)
|
|
}
|
|
|
|
value := reflect.Indirect(reflect.ValueOf(o))
|
|
typ := reflect.TypeOf(o).Elem()
|
|
n := typ.NumField()
|
|
|
|
updateMap := M{}
|
|
for _, col := range fields {
|
|
for i := 0; i < n; i++ {
|
|
f := typ.Field(i)
|
|
if f.Tag.Get("boil") == col {
|
|
updateMap[col] = value.Field(i).Interface()
|
|
}
|
|
}
|
|
}
|
|
|
|
slice := DatahistoryjobresultSlice{o}
|
|
if rowsAff, err := slice.UpdateAll(ctx, tx, updateMap); err != nil {
|
|
t.Error(err)
|
|
} else if rowsAff != 1 {
|
|
t.Error("wanted one record updated but got", rowsAff)
|
|
}
|
|
}
|