Feature: Data history manager engine subsystem (#693)

* Adds lovely initial concept for historical data doer

* Adds ability to save tasks. Adds config. Adds startStop to engine

* Has a database microservice without use of globals! Further infrastructure design. Adds readme

* Commentary to help design

* Adds migrations for database

* readme and adds database models

* Some modelling that doesn't work end of day

* Completes datahistoryjob sql.Begins datahistoryjobresult

* Adds datahistoryjob functions to retreive job results. Adapts subsystem

* Adds process for upserting jobs and job results to the database

* Broken end of day weird sqlboiler crap

* Fixes issue with SQL generation.

* RPC generation and addition of basic upsert command

* Renames types

* Adds rpc functions

* quick commit before context swithc. Exchanges aren't being populated

* Begin the tests!

* complete sql tests. stop failed jobs. CLI command creation

* Defines rpc commands

* Fleshes out RPC implementation

* Expands testing

* Expands testing, removes double remove

* Adds coverage of data history subsystem, expands errors and nil checks

* Minor logic improvement

* streamlines datahistory test setup

* End of day minor linting

* Lint, convert simplify, rpc expansion, type expansion, readme expansion

* Documentation update

* Renames for consistency

* Completes RPC server commands

* Fixes tests

* Speeds up testing by reducing unnecessary actions. Adds maxjobspercycle config

* Comments for everything

* Adds missing result string. checks interval supported. default start end cli

* Fixes ID problem. Improves binance trade fetch. job ranges are processed

* adds dbservice coverage. adds rpcserver coverage

* docs regen, uses dbcon interface, reverts binance, fixes races, toggle manager

* Speed up tests, remove bad global usage, fix uuid check

* Adds verbose. Updates docs. Fixes postgres

* Minor changes to logging and start stop

* Fixes postgres db tests, fixes postgres column typo

* Fixes old string typo,removes constraint,error parsing for nonreaders

* prevents dhm running when table doesn't exist. Adds prereq documentation

* Adds parallel, rmlines, err fix, comment fix, minor param fixes

* doc regen, common time range check and test updating

* Fixes job validation issues. Updates candle range checker.

* Ensures test cannot fail due to time.Now() shenanigans

* Fixes oopsie, adds documentation and a warn

* Fixes another time test, adjusts copy

* Drastically speeds up data history manager tests via function overrides

* Fixes summary bug and better logs

* Fixes local time test, fixes websocket tests

* removes defaults and comment,updates error messages,sets cli command args

* Fixes FTX trade processing

* Fixes issue where jobs got stuck if data wasn't returned but retrieval was successful

* Improves test speed. Simplifies trade verification SQL. Adds command help

* Fixes the oopsies

* Fixes use of query within transaction. Fixes trade err

* oopsie, not needed

* Adds missing data status. Properly ends job even when data is missing

* errors are more verbose and so have more words to describe them

* Doc regen for new status

* tiny test tinkering

* str := string("Removes .String()").String()

* Merge fixups

* Fixes a data race discovered during github actions

* Allows websocket test to pass consistently

* Fixes merge issue preventing datahistorymanager from starting via config

* Niterinos cmd defaults and explanations

* fixes default oopsie

* Fixes lack of nil protection

* Additional oopsie

* More detailed error for validating job exchange
This commit is contained in:
Scott
2021-07-01 16:21:48 +10:00
committed by GitHub
parent c109cfb6b4
commit 197ef2df21
133 changed files with 17770 additions and 1367 deletions

View File

@@ -9,8 +9,8 @@ Rots | https://github.com/Rots
vazha | https://github.com/vazha
ermalguni | https://github.com/ermalguni
MadCozBadd | https://github.com/MadCozBadd
vadimzhukck | https://github.com/vadimzhukck
dependabot[bot] | https://github.com/apps/dependabot
vadimzhukck | https://github.com/vadimzhukck
140am | https://github.com/140am
marcofranssen | https://github.com/marcofranssen
dackroyd | https://github.com/dackroyd
@@ -21,6 +21,7 @@ azhang | https://github.com/azhang
andreygrehov | https://github.com/andreygrehov
bretep | https://github.com/bretep
Christian-Achilli | https://github.com/Christian-Achilli
yangrq1018 | https://github.com/yangrq1018
gam-phon | https://github.com/gam-phon
cornelk | https://github.com/cornelk
if1live | https://github.com/if1live
@@ -29,7 +30,7 @@ soxipy | https://github.com/soxipy
mshogin | https://github.com/mshogin
herenow | https://github.com/herenow
blombard | https://github.com/blombard
CodeLingoBot | https://github.com/CodeLingoBot
tk42 | https://github.com/tk42
daniel-cohen | https://github.com/daniel-cohen
DirectX | https://github.com/DirectX
frankzougc | https://github.com/frankzougc
@@ -45,3 +46,4 @@ lookfirst | https://github.com/lookfirst
merkeld | https://github.com/merkeld
CodeLingoTeam | https://github.com/CodeLingoTeam
Daanikus | https://github.com/Daanikus
CodeLingoBot | https://github.com/CodeLingoBot

View File

@@ -143,17 +143,17 @@ Binaries will be published once the codebase reaches a stable condition.
|User|Contribution Amount|
|--|--|
| [thrasher-](https://github.com/thrasher-) | 654 |
| [thrasher-](https://github.com/thrasher-) | 655 |
| [shazbert](https://github.com/shazbert) | 207 |
| [gloriousCode](https://github.com/gloriousCode) | 179 |
| [gloriousCode](https://github.com/gloriousCode) | 180 |
| [dependabot-preview[bot]](https://github.com/apps/dependabot-preview) | 88 |
| [xtda](https://github.com/xtda) | 47 |
| [Rots](https://github.com/Rots) | 15 |
| [vazha](https://github.com/vazha) | 15 |
| [ermalguni](https://github.com/ermalguni) | 14 |
| [MadCozBadd](https://github.com/MadCozBadd) | 12 |
| [dependabot[bot]](https://github.com/apps/dependabot) | 12 |
| [vadimzhukck](https://github.com/vadimzhukck) | 10 |
| [dependabot[bot]](https://github.com/apps/dependabot) | 10 |
| [140am](https://github.com/140am) | 8 |
| [marcofranssen](https://github.com/marcofranssen) | 8 |
| [dackroyd](https://github.com/dackroyd) | 5 |
@@ -164,6 +164,7 @@ Binaries will be published once the codebase reaches a stable condition.
| [andreygrehov](https://github.com/andreygrehov) | 2 |
| [bretep](https://github.com/bretep) | 2 |
| [Christian-Achilli](https://github.com/Christian-Achilli) | 2 |
| [yangrq1018](https://github.com/yangrq1018) | 2 |
| [gam-phon](https://github.com/gam-phon) | 2 |
| [cornelk](https://github.com/cornelk) | 2 |
| [if1live](https://github.com/if1live) | 2 |
@@ -172,7 +173,7 @@ Binaries will be published once the codebase reaches a stable condition.
| [mshogin](https://github.com/mshogin) | 2 |
| [herenow](https://github.com/herenow) | 2 |
| [blombard](https://github.com/blombard) | 1 |
| [CodeLingoBot](https://github.com/CodeLingoBot) | 1 |
| [tk42](https://github.com/tk42) | 2 |
| [daniel-cohen](https://github.com/daniel-cohen) | 1 |
| [DirectX](https://github.com/DirectX) | 1 |
| [frankzougc](https://github.com/frankzougc) | 1 |
@@ -188,3 +189,4 @@ Binaries will be published once the codebase reaches a stable condition.
| [merkeld](https://github.com/merkeld) | 1 |
| [CodeLingoTeam](https://github.com/CodeLingoTeam) | 1 |
| [Daanikus](https://github.com/Daanikus) | 1 |
| [CodeLingoBot](https://github.com/CodeLingoBot) | 1 |

View File

@@ -461,19 +461,19 @@ func (bt *BackTest) loadData(cfg *config.Config, exch gctexchange.IBotExchange,
}
resp.Item.RemoveDuplicates()
resp.Item.SortCandlesByTimestamp(false)
resp.Range = gctkline.CalculateCandleDateRanges(
resp.Range, err = gctkline.CalculateCandleDateRanges(
resp.Item.Candles[0].Time,
resp.Item.Candles[len(resp.Item.Candles)-1].Time.Add(cfg.DataSettings.Interval),
gctkline.Interval(cfg.DataSettings.Interval),
0,
)
err = resp.Range.VerifyResultsHaveData(resp.Item.Candles)
if err != nil {
if strings.Contains(err.Error(), "missing candles data between") {
log.Warn(log.BackTester, err.Error())
} else {
return nil, err
}
return nil, err
}
resp.Range.SetHasDataFromCandles(resp.Item.Candles)
summary := resp.Range.DataSummary(false)
if len(summary) > 0 {
log.Warnf(log.BackTester, "%v", summary)
}
case cfg.DataSettings.DatabaseData != nil:
if cfg.DataSettings.DatabaseData.InclusiveEndDate {
@@ -509,19 +509,19 @@ func (bt *BackTest) loadData(cfg *config.Config, exch gctexchange.IBotExchange,
resp.Item.RemoveDuplicates()
resp.Item.SortCandlesByTimestamp(false)
resp.Range = gctkline.CalculateCandleDateRanges(
resp.Range, err = gctkline.CalculateCandleDateRanges(
cfg.DataSettings.DatabaseData.StartDate,
cfg.DataSettings.DatabaseData.EndDate,
gctkline.Interval(cfg.DataSettings.Interval),
0,
)
err = resp.Range.VerifyResultsHaveData(resp.Item.Candles)
if err != nil {
if strings.Contains(err.Error(), "missing candles data between") {
log.Warn(log.BackTester, err.Error())
} else {
return nil, err
}
return nil, err
}
resp.Range.SetHasDataFromCandles(resp.Item.Candles)
summary := resp.Range.DataSummary(false)
if len(summary) > 0 {
log.Warnf(log.BackTester, "%v", summary)
}
case cfg.DataSettings.APIData != nil:
if cfg.DataSettings.APIData.InclusiveEndDate {
@@ -601,11 +601,14 @@ func loadAPIData(cfg *config.Config, exch gctexchange.IBotExchange, fPair curren
if cfg.DataSettings.Interval <= 0 {
return nil, errIntervalUnset
}
dates := gctkline.CalculateCandleDateRanges(
dates, err := gctkline.CalculateCandleDateRanges(
cfg.DataSettings.APIData.StartDate,
cfg.DataSettings.APIData.EndDate,
gctkline.Interval(cfg.DataSettings.Interval),
resultLimit)
if err != nil {
return nil, err
}
candles, err := api.LoadData(
dataType,
cfg.DataSettings.APIData.StartDate,
@@ -617,14 +620,12 @@ func loadAPIData(cfg *config.Config, exch gctexchange.IBotExchange, fPair curren
if err != nil {
return nil, fmt.Errorf("%v. Please check your GoCryptoTrader configuration", err)
}
err = dates.VerifyResultsHaveData(candles.Candles)
if err != nil && errors.Is(err, gctkline.ErrMissingCandleData) {
log.Warn(log.BackTester, err.Error())
} else if err != nil {
return nil, err
dates.SetHasDataFromCandles(candles.Candles)
summary := dates.DataSummary(false)
if len(summary) > 0 {
log.Warnf(log.BackTester, "%v", summary)
}
candles.FillMissingDataWithEmptyEntries(&dates)
candles.FillMissingDataWithEmptyEntries(dates)
candles.RemoveOutsideRange(cfg.DataSettings.APIData.StartDate, cfg.DataSettings.APIData.EndDate)
return &kline.DataFromKline{
Item: *candles,
@@ -983,13 +984,16 @@ func (bt *BackTest) loadLiveData(resp *kline.DataFromKline, cfg *config.Config,
}
endDate := candles.Candles[len(candles.Candles)-1].Time.Add(cfg.DataSettings.Interval)
if resp.Range.Ranges == nil {
dataRange := gctkline.CalculateCandleDateRanges(
dataRange, err := gctkline.CalculateCandleDateRanges(
startDate,
endDate,
gctkline.Interval(cfg.DataSettings.Interval),
0,
)
resp.Range = gctkline.IntervalRangeHolder{
if err != nil {
return err
}
resp.Range = &gctkline.IntervalRangeHolder{
Start: gctkline.CreateIntervalTime(startDate),
End: gctkline.CreateIntervalTime(endDate),
Ranges: dataRange.Ranges,

View File

@@ -430,7 +430,7 @@ func TestFullCycle(t *testing.T) {
}},
},
Base: data.Base{},
Range: gctkline.IntervalRangeHolder{
Range: &gctkline.IntervalRangeHolder{
Start: gctkline.CreateIntervalTime(tt),
End: gctkline.CreateIntervalTime(tt.Add(gctkline.FifteenMin.Duration())),
Ranges: []gctkline.IntervalRange{
@@ -528,7 +528,7 @@ func TestFullCycleMulti(t *testing.T) {
}},
},
Base: data.Base{},
Range: gctkline.IntervalRangeHolder{
Range: &gctkline.IntervalRangeHolder{
Start: gctkline.CreateIntervalTime(tt),
End: gctkline.CreateIntervalTime(tt.Add(gctkline.FifteenMin.Duration())),
Ranges: []gctkline.IntervalRange{

View File

@@ -400,13 +400,13 @@ func parseDatabase(reader *bufio.Reader, cfg *config.Config) error {
return fmt.Errorf("database failed to set config: %w", err)
}
if cfg.DataSettings.DatabaseData.ConfigOverride.Driver == database.DBPostgreSQL {
_, err = dbPSQL.Connect()
_, err = dbPSQL.Connect(cfg.DataSettings.DatabaseData.ConfigOverride)
if err != nil {
return fmt.Errorf("database failed to connect: %v", err)
}
} else if cfg.DataSettings.DatabaseData.ConfigOverride.Driver == database.DBSQLite ||
cfg.DataSettings.DatabaseData.ConfigOverride.Driver == database.DBSQLite3 {
_, err = dbsqlite3.Connect()
_, err = dbsqlite3.Connect(cfg.DataSettings.DatabaseData.ConfigOverride.Database)
if err != nil {
return fmt.Errorf("database failed to connect: %v", err)
}

View File

@@ -71,7 +71,7 @@ func TestLoadCandles(t *testing.T) {
func TestLoadTrades(t *testing.T) {
t.Parallel()
interval := gctkline.FifteenMin
tt1 := time.Now().Add(-time.Minute * 15).Round(interval.Duration())
tt1 := time.Now().Add(-time.Minute * 30).Round(interval.Duration())
tt2 := time.Now().Round(interval.Duration())
a := asset.Spot
p := currency.NewPair(currency.BTC, currency.USDT)

View File

@@ -13,6 +13,9 @@ import (
// HasDataAtTime verifies checks the underlying range data
// To determine whether there is any candle data present at the time provided
func (d *DataFromKline) HasDataAtTime(t time.Time) bool {
if d.Range == nil {
return false
}
return d.Range.HasDataAtDate(t)
}

View File

@@ -88,9 +88,12 @@ func TestHasDataAtTime(t *testing.T) {
t.Error("expected false")
}
ranger := gctkline.CalculateCandleDateRanges(dStart, dEnd, gctkline.OneDay, 100000)
ranger, err := gctkline.CalculateCandleDateRanges(dStart, dEnd, gctkline.OneDay, 100000)
if err != nil {
t.Error(err)
}
d.Range = ranger
_ = d.Range.VerifyResultsHaveData(d.Item.Candles)
d.Range.SetHasDataFromCandles(d.Item.Candles)
has = d.HasDataAtTime(dInsert)
if !has {
t.Error("expected true")

View File

@@ -15,7 +15,7 @@ var errNoCandleData = errors.New("no candle data provided")
type DataFromKline struct {
Item gctkline.Item
data.Base
Range gctkline.IntervalRangeHolder
Range *gctkline.IntervalRangeHolder
addedTimes map[time.Time]bool
}

View File

@@ -259,11 +259,15 @@ func calculateMaxDrawdown(closePrices []common.DataEventHandler) Swing {
lowestTime = currTime
}
if highestPrice < currHigh && highestPrice > 0 {
intervals := gctkline.CalculateCandleDateRanges(highestTime, lowestTime, closePrices[i].GetInterval(), 0)
if lowestTime.Equal(highestTime) {
// create distinction if the greatest drawdown occurs within the same candle
lowestTime = lowestTime.Add((time.Hour * 23) + (time.Minute * 59) + (time.Second * 59))
}
intervals, err := gctkline.CalculateCandleDateRanges(highestTime, lowestTime, closePrices[i].GetInterval(), 0)
if err != nil {
log.Error(log.BackTester, err)
continue
}
swings = append(swings, Swing{
Highest: Iteration{
Time: highestTime,
@@ -285,7 +289,14 @@ func calculateMaxDrawdown(closePrices []common.DataEventHandler) Swing {
}
if (len(swings) > 0 && swings[len(swings)-1].Lowest.Price != closePrices[len(closePrices)-1].LowPrice()) || swings == nil {
// need to close out the final drawdown
intervals := gctkline.CalculateCandleDateRanges(highestTime, lowestTime, closePrices[0].GetInterval(), 0)
if lowestTime.Equal(highestTime) {
// create distinction if the greatest drawdown occurs within the same candle
lowestTime = lowestTime.Add((time.Hour * 23) + (time.Minute * 59) + (time.Second * 59))
}
intervals, err := gctkline.CalculateCandleDateRanges(highestTime, lowestTime, closePrices[0].GetInterval(), 0)
if err != nil {
log.Error(log.BackTester, err)
}
drawdownPercent := 0.0
if highestPrice > 0 {
drawdownPercent = ((lowestPrice - highestPrice) / highestPrice) * 100

View File

@@ -219,7 +219,7 @@ func TestPrintResults(t *testing.T) {
}
func TestCalculateMaxDrawdown(t *testing.T) {
tt1 := time.Now().Round(gctkline.OneDay.Duration())
tt1 := time.Now().Add(-gctkline.OneDay.Duration() * 7).Round(gctkline.OneDay.Duration())
exch := testExchange
a := asset.Spot
p := currency.NewPair(currency.BTC, currency.USDT)

View File

@@ -555,8 +555,8 @@ func TestCalculateTheResults(t *testing.T) {
t.Error(err)
}
tt := time.Now()
tt2 := time.Now().Add(time.Hour)
tt := time.Now().Add(-gctkline.OneDay.Duration() * 7)
tt2 := time.Now().Add(-gctkline.OneDay.Duration() * 6)
exch := testExchange
a := asset.Spot
p := currency.NewPair(currency.BTC, currency.USDT)

View File

@@ -50,7 +50,7 @@ func TestGetBase(t *testing.T) {
_, err = s.GetBaseData(&datakline.DataFromKline{
Item: gctkline.Item{},
Base: d,
Range: gctkline.IntervalRangeHolder{},
Range: &gctkline.IntervalRangeHolder{},
})
if err != nil {
t.Error(err)

View File

@@ -73,7 +73,7 @@ func TestOnSignal(t *testing.T) {
da := &kline.DataFromKline{
Item: gctkline.Item{},
Base: d,
Range: gctkline.IntervalRangeHolder{},
Range: &gctkline.IntervalRangeHolder{},
}
var resp signal.Event
resp, err = s.OnSignal(da, nil)
@@ -105,9 +105,12 @@ func TestOnSignal(t *testing.T) {
t.Error(err)
}
ranger := gctkline.CalculateCandleDateRanges(dStart, dEnd, gctkline.OneDay, 100000)
ranger, err := gctkline.CalculateCandleDateRanges(dStart, dEnd, gctkline.OneDay, 100000)
if err != nil {
t.Error(err)
}
da.Range = ranger
_ = da.Range.VerifyResultsHaveData(da.Item.Candles)
da.Range.SetHasDataFromCandles(da.Item.Candles)
resp, err = s.OnSignal(da, nil)
if err != nil {
t.Error(err)
@@ -149,7 +152,7 @@ func TestOnSignals(t *testing.T) {
da := &kline.DataFromKline{
Item: gctkline.Item{},
Base: d,
Range: gctkline.IntervalRangeHolder{},
Range: &gctkline.IntervalRangeHolder{},
}
var resp []signal.Event
resp, err = s.OnSimultaneousSignals([]data.Handler{da}, nil)
@@ -184,9 +187,12 @@ func TestOnSignals(t *testing.T) {
t.Error(err)
}
ranger := gctkline.CalculateCandleDateRanges(dStart, dEnd, gctkline.OneDay, 100000)
ranger, err := gctkline.CalculateCandleDateRanges(dStart, dEnd, gctkline.OneDay, 100000)
if err != nil {
t.Error(err)
}
da.Range = ranger
_ = da.Range.VerifyResultsHaveData(da.Item.Candles)
da.Range.SetHasDataFromCandles(da.Item.Candles)
resp, err = s.OnSimultaneousSignals([]data.Handler{da}, nil)
if err != nil {
t.Error(err)

View File

@@ -110,7 +110,7 @@ func TestOnSignal(t *testing.T) {
da := &kline.DataFromKline{
Item: gctkline.Item{},
Base: d,
Range: gctkline.IntervalRangeHolder{},
Range: &gctkline.IntervalRangeHolder{},
}
var resp signal.Event
_, err = s.OnSignal(da, nil)
@@ -145,9 +145,12 @@ func TestOnSignal(t *testing.T) {
t.Error(err)
}
ranger := gctkline.CalculateCandleDateRanges(dStart, dEnd, gctkline.OneDay, 100000)
ranger, err := gctkline.CalculateCandleDateRanges(dStart, dEnd, gctkline.OneDay, 100000)
if err != nil {
t.Error(err)
}
da.Range = ranger
_ = da.Range.VerifyResultsHaveData(da.Item.Candles)
da.Range.SetHasDataFromCandles(da.Item.Candles)
resp, err = s.OnSignal(da, nil)
if err != nil {
t.Error(err)
@@ -186,7 +189,7 @@ func TestOnSignals(t *testing.T) {
da := &kline.DataFromKline{
Item: gctkline.Item{},
Base: d,
Range: gctkline.IntervalRangeHolder{},
Range: &gctkline.IntervalRangeHolder{},
}
_, err = s.OnSimultaneousSignals([]data.Handler{da}, nil)
if !errors.Is(err, base.ErrSimultaneousProcessingNotSupported) {

View File

@@ -26,15 +26,15 @@ var (
args string
)
func openDBConnection(driver string) (err error) {
if driver == database.DBPostgreSQL {
dbConn, err = dbPSQL.Connect()
func openDBConnection(cfg *database.Config) (err error) {
if cfg.Driver == database.DBPostgreSQL {
dbConn, err = dbPSQL.Connect(cfg)
if err != nil {
return fmt.Errorf("database failed to connect: %v, some features that utilise a database will be unavailable", err)
}
return nil
} else if driver == database.DBSQLite || driver == database.DBSQLite3 {
dbConn, err = dbsqlite3.Connect()
} else if cfg.Driver == database.DBSQLite || cfg.Driver == database.DBSQLite3 {
dbConn, err = dbsqlite3.Connect(cfg.Database)
if err != nil {
return fmt.Errorf("database failed to connect: %v, some features that utilise a database will be unavailable", err)
}
@@ -68,14 +68,13 @@ func main() {
os.Exit(1)
}
err = openDBConnection(conf.Database.Driver)
err = openDBConnection(&conf.Database)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
drv := repository.GetSQLDialect()
if drv == database.DBSQLite || drv == database.DBSQLite3 {
fmt.Printf("Database file: %s\n", conf.Database.Database)
} else {

View File

@@ -28,7 +28,7 @@ func load(c *cli.Context) error {
return database.ErrDatabaseSupportDisabled
}
err = openDBConnection(c, conf.Database.Driver)
err = openDBConnection(c, &conf.Database)
if err != nil {
return err
}
@@ -43,18 +43,18 @@ func load(c *cli.Context) error {
return nil
}
func openDBConnection(c *cli.Context, driver string) (err error) {
func openDBConnection(c *cli.Context, cfg *database.Config) (err error) {
if c.IsSet("verbose") {
boil.DebugMode = true
}
if driver == database.DBPostgreSQL {
dbConn, err = dbPSQL.Connect()
if cfg.Driver == database.DBPostgreSQL {
dbConn, err = dbPSQL.Connect(cfg)
if err != nil {
return fmt.Errorf("database failed to connect: %v, some features that utilise a database will be unavailable", err)
}
return nil
} else if driver == database.DBSQLite || driver == database.DBSQLite3 {
dbConn, err = dbsqlite3.Connect()
} else if cfg.Driver == database.DBSQLite || cfg.Driver == database.DBSQLite3 {
dbConn, err = dbsqlite3.Connect(cfg.Database)
if err != nil {
return fmt.Errorf("database failed to connect: %v, some features that utilise a database will be unavailable", err)
}

View File

@@ -246,6 +246,10 @@ func main() {
Login: "Daanikus",
URL: "https://github.com/Daanikus",
Contributions: 1,
}, {
Login: "CodeLingoBot",
URL: "https://github.com/CodeLingoBot",
Contributions: 1,
},
}...)
@@ -434,7 +438,7 @@ func GetPackageName(name string, capital bool) string {
i = len(newStrings) - 1
}
if capital {
return strings.Title(newStrings[i])
return strings.Replace(strings.Title(newStrings[i]), "_", " ", -1)
}
return newStrings[i]
}

View File

@@ -0,0 +1,110 @@
{{define "engine datahistory_manager" -}}
{{template "header" .}}
## Current Features for {{.CapitalName}}
+ The data history manager is an engine subsystem responsible for ensuring that the candle/trade history in the range you define is synchronised to your database
+ It is a long running synchronisation task designed to not overwhelm resources and ensure that all data requested is accounted for and saved to the database
+ The data history manager is disabled by default and requires a database connection to function
+ It can be enabled either via a runtime param, config modification or via RPC command `enablesubsystem`
+ The data history manager accepts jobs from RPC commands
+ A job is defined in the `Database tables` section below
+ Jobs will be addressed by the data history manager at an interval defined in your config, this is detailed below in the `Application run time parameters` table below
+ Jobs will fetch data at sizes you request (which can cater to hardware limitations such as low RAM)
+ Jobs are completed once all data has been fetched/attempted to be fetched in the time range
## What are the prerequisites?
+ Ensure you have a database setup, you can read about that [here](/database)
+ Ensure you have run dbmigrate under `/cmd/dbmigrate` via `dbmigrate -command=up`, you can read about that [here](/database#create-and-run-migrations)
+ Ensure you have seeded exchanges to the database via the application dbseed under `/cmd/dbseed`, you can read about it [here](/cmd/dbseed)
+ Ensure you have the database setup and enabled in your config, this can also be seen [here](/database)
+ Data retrieval can only be made on exchanges that support it, see the readmes for [candles](/docs/OHLCV.md) and [trades](/exchanges/trade#exchange-support-table)
+ Read below on how to enable the data history manager and add data history jobs
## What is a data history job?
A job is a set of parameters which will allow GoCryptoTrader to periodically retrieve historical data. Its purpose is to break up the process of retrieving large sets of data for multiple currencies and exchanges into more manageable chunks in a "set and forget" style.
For a breakdown of what a job consists of and what each parameter does, please review the database tables and the cycle details below.
## What happens during a data history cycle?
+ Once the checkInterval ticker timer has finished, the data history manager will process all jobs considered `active`.
+ A job's start and end time is broken down into intervals defined by the `interval` variable of a job. For a job beginning `2020-01-01` to `2020-01-02` with an interval of one hour will create 24 chunks to retrieve
+ The number of intervals it will then request from an API is defined by the `RequestSizeLimit`. A `RequestSizeLimit` of 2 will mean when processing a job, the data history manager will fetch 2 hours worth of data
+ When processing a job the `RunBatchLimit` defines how many `RequestSizeLimits` it will fetch. A `RunBatchLimit` of 3 means when processing a job, the history manager will fetch 3 lots of 2 hour chunks from the API in a run of a job
+ If the data is successfully retrieved, that chunk will be considered `complete` and saved to the database
+ The `MaxRetryAttempts` defines how many times the data history manager will attempt to fetch a chunk of data before flagging it as `failed`.
+ A chunk is only attempted once per processing time.
+ If it fails, the next attempt will be after the `checkInterval` has finished again.
+ The errors for retrieval failures are stored in the database, allowing you to understand why a certain chunk of time is unavailable (eg exchange downtime and missing data)
+ All results are saved to the database, the data history manager will analyse all results and ready jobs for the next round of processing
## How do I add one?
+ First ensure that the data history monitor is enabled, you can do this via the config (see table `dataHistoryManager` under Config parameters below), via run time parameter (see table Application run time parameters below) or via the RPC command `enablesubsystem --subsystemname="data_history_manager"`
+ The simplest way of adding a new data history job is via the GCTCLI under `/cmd/gctcli`.
+ Modify the following example command to your needs: `.\gctcli.exe datahistory upsertjob --nickname=binance-spot-bnb-btc-1h-candles --exchange=binance --asset=spot --pair=BNB-BTC --interval=3600 --start_date="2020-06-02 12:00:00" --end_date="2020-12-02 12:00:00" --request_size_limit=10 --data_type=0 --max_retry_attempts=3 --batch_size=3`
### Candle intervals and trade fetching
+ A candle interval is required for a job, even when fetching trade data. This is to appropriately break down requests into time interval chunks. However, it is restricted to only a small range of times. This is to prevent fetching issues as fetching trades over a period of days or weeks will take a significant amount of time. When setting a job to fetch trades, the allowable range is less than 4 hours and greater than 10 minutes. So an interval of 1 hour will then fetch an hour's worth of trade data.
### Application run time parameters
| Parameter | Description | Example |
| ------ | ----------- | ------- |
| datahistorymanager | A boolean value which determines if the data history manager is enabled. Defaults to `false` | `-datahistorymanager=true` |
### Config parameters
#### dataHistoryManager
| Config | Description | Example |
| ------ | ----------- | ------- |
| enabled | If enabled will run the data history manager on startup | `true` |
| checkInterval | A golang `time.Duration` interval of when to attempt to fetch all active jobs' data | `15000000000` |
| maxJobsPerCycle | Allows you to control how many jobs are processed after the `checkInterval` timer finishes. Useful if you have many jobs, but don't wish to constantly be retrieving data | `5` |
| verbose | Displays some extra logs to your logging output to help debug | `false` |
### RPC commands
The below table is a summary of commands. For more details, view the commands in `/cmd/gctcli` or `/gctrpc/rpc.swagger.json`
| Command | Description |
| ------ | ----------- |
| UpsertDataHistoryJob | Updates or Inserts a job to the manager and database |
| GetDataHistoryJobDetails | Returns a job's details via its nickname or ID. Can optionally return an array of all run results |
| GetActiveDataHistoryJobs | Will return all jobs that have an `active` status |
| DeleteJob | Will remove a job for processing. Data is preserved in the database for later reference |
| GetDataHistoryJobsBetween | Returns all jobs, of all status types between the dates provided |
| GetDataHistoryJobSummary | Will return an executive summary of the progress of your job by nickname |
### Database tables
#### datahistoryjob
| Field | Description | Example |
| ------ | ----------- | ------- |
| id | Unique ID of the job. Generated at creation | `deadbeef-dead-beef-dead-beef13371337` |
| nickname | A custom name for the job that is unique for lookups | `binance-xrp-doge-2017` |
| exchange_name_id | The exchange id to fetch data from. The ID should be generated via `/cmd/dbmigrate`. When creating a job, you only need to provide the exchange name | `binance` |
| asset | The asset type of the data to be fetching | `spot` |
| base | The currency pair base of the data to be fetching | `xrp` |
| quote | The currency pair quote of the data to be fetching | `doge` |
| start_time | When to begin fetching data | `01-01-2017T13:33:37Z` |
| end_time | When to finish fetching data | `01-01-2018T13:33:37Z` |
| interval | A golang `time.Duration` representation of the candle interval to use. | `30000000000` |
| data_type | The data type to fetch. `0` is candles and `1` is trades | `0` |
| request_size | The number of candles to fetch. eg if `500`, the data history manager will break up the request into the appropriate timeframe to ensure the data history run interval will fetch 500 candles to save to the database | `500` |
| max_retries | For an interval period, the amount of attempts the data history manager is allowed to attempt to fetch data before moving onto the next period. This can be useful for determining whether the exchange is missing the data in that time period or, if just one failure of three, just means that the data history manager couldn't finish one request | `3` |
| batch_count | The number of requests to make when processing a job | `3` |
| status | A numerical representation for the status. `0` is active, `1` is failed `2` is complete, `3` is removed and `4` is missing data | `0` |
| created | The date the job was created. | `2020-01-01T13:33:37Z` |
#### datahistoryjobresult
| Field | Description | Example |
| ------ | ----------- | ------- |
| id | Unique ID of the job status | `deadbeef-dead-beef-dead-beef13371337` |
| job_id | The job ID being referenced | `deadbeef-dead-beef-dead-beef13371337` |
| result | If there is an error, it will be detailed here | `exchange missing candle data for 2020-01-01 13:37Z` |
| status | A numerical representation of the job result status. `1` is failed, `2` is complete and `4` is missing data | `2` |
| interval_start_time | The start date of the period fetched | `2020-01-01T13:33:37Z` |
| interval_end_time | The end date of the period fetched | `2020-01-02T13:33:37Z` |
| run_time | The time the job was ran | `2020-01-03T13:33:37Z` |
### Please click GoDocs chevron above to view current GoDoc information for this package
{{template "contributions"}}
{{template "donations" .}}
{{end}}

View File

@@ -15,7 +15,7 @@ import (
"github.com/thrasher-corp/gocryptotrader/common"
"github.com/thrasher-corp/gocryptotrader/currency"
"github.com/thrasher-corp/gocryptotrader/gctrpc"
cli "github.com/urfave/cli/v2"
"github.com/urfave/cli/v2"
"google.golang.org/grpc"
)
@@ -4058,9 +4058,9 @@ func gctScriptUpload(c *cli.Context) error {
return nil
}
const klineMessage = "%v in seconds supported values are: 15, 60(1min), 180(3min), 300(5min), 600(10min), 900(15min), " +
"1800(30min), 3600(1h), 7200(2h), 14400(4h), 21600(6h), 28800(8h), 43200(12h), 86400(1d), 259200(3d) " +
"60480(1w), 1209600(2w), 1296000(15d), 2592000(1M), 31536000(1Y)"
const klineMessage = `interval in seconds. supported values are: 15, 60(1min), 180(3min), 300(5min), 600(10min),
900(15min) 1800(30min), 3600(1h), 7200(2h), 14400(4h), 21600(6h), 28800(8h), 43200(12h),
86400(1d), 259200(3d) 604800(1w), 1209600(2w), 1296000(15d), 2592000(1M), 31536000(1Y)`
var candleRangeSize, candleGranularity int64
var getHistoricCandlesCommand = &cli.Command{
@@ -4092,7 +4092,7 @@ var getHistoricCandlesCommand = &cli.Command{
&cli.Int64Flag{
Name: "granularity",
Aliases: []string{"g"},
Usage: fmt.Sprintf(klineMessage, "granularity"),
Usage: klineMessage,
Value: 86400,
Destination: &candleGranularity,
},
@@ -4226,7 +4226,7 @@ var getHistoricCandlesExtendedCommand = &cli.Command{
&cli.Int64Flag{
Name: "interval",
Aliases: []string{"i"},
Usage: fmt.Sprintf(klineMessage, "interval"),
Usage: klineMessage,
Value: 86400,
Destination: &candleGranularity,
},
@@ -4422,7 +4422,7 @@ var findMissingSavedCandleIntervalsCommand = &cli.Command{
&cli.Int64Flag{
Name: "interval",
Aliases: []string{"i"},
Usage: fmt.Sprintf(klineMessage, "interval"),
Usage: klineMessage,
Value: 86400,
Destination: &candleGranularity,
},

499
cmd/gctcli/data_history.go Normal file
View File

@@ -0,0 +1,499 @@
package main
import (
"context"
"errors"
"fmt"
"strings"
"time"
"github.com/thrasher-corp/gocryptotrader/common"
"github.com/thrasher-corp/gocryptotrader/common/convert"
"github.com/thrasher-corp/gocryptotrader/currency"
"github.com/thrasher-corp/gocryptotrader/gctrpc"
"github.com/urfave/cli/v2"
)
var (
maxRetryAttempts, requestSizeLimit, batchSize uint64
guidExample = "deadbeef-dead-beef-dead-beef13371337"
specificJobSubCommands = []cli.Flag{
&cli.StringFlag{
Name: "id",
Usage: guidExample,
},
&cli.StringFlag{
Name: "nickname",
Usage: "binance-spot-btc-usdt-2019-trades",
},
}
fullJobSubCommands = []cli.Flag{
&cli.StringFlag{
Name: "nickname",
Usage: "binance-spot-btc-usdt-2019-trades",
Required: true,
},
&cli.StringFlag{
Name: "exchange",
Usage: "binance",
},
&cli.StringFlag{
Name: "asset",
Usage: "spot",
},
&cli.StringFlag{
Name: "pair",
Usage: "btc-usdt",
},
&cli.StringFlag{
Name: "start_date",
Usage: "formatted as: 2006-01-02 15:04:05",
Value: time.Now().AddDate(-1, 0, 0).Format(common.SimpleTimeFormat),
Destination: &startTime,
},
&cli.StringFlag{
Name: "end_date",
Usage: "formatted as: 2006-01-02 15:04:05",
Value: time.Now().AddDate(0, -1, 0).Format(common.SimpleTimeFormat),
Destination: &endTime,
},
&cli.Uint64Flag{
Name: "interval",
Usage: klineMessage,
},
&cli.Uint64Flag{
Name: "request_size_limit",
Usage: "the number of candles to retrieve per API request",
Destination: &requestSizeLimit,
Value: 500,
},
&cli.Uint64Flag{
Name: "data_type",
Usage: "0 for candles, 1 for trades",
},
&cli.Uint64Flag{
Name: "max_retry_attempts",
Usage: "the maximum retry attempts for an interval period before giving up",
Value: 3,
Destination: &maxRetryAttempts,
},
&cli.Uint64Flag{
Name: "batch_size",
Usage: "the amount of API calls to make per run",
Destination: &batchSize,
Value: 3,
},
}
)
var dataHistoryCommands = &cli.Command{
Name: "datahistory",
Usage: "manage data history jobs to retrieve historic trade or candle data over time",
ArgsUsage: "<command> <args>",
Subcommands: []*cli.Command{
{
Name: "getactivejobs",
Usage: "returns all jobs that are currently active",
Flags: []cli.Flag{},
Action: getActiveDataHistoryJobs,
},
{
Name: "getjobsbetweendates",
Usage: "returns all jobs with creation dates between the two provided dates",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "start_date",
Usage: "formatted as: 2006-01-02 15:04:05",
},
&cli.StringFlag{
Name: "end_date",
Usage: "formatted as: 2006-01-02 15:04:05",
},
},
Action: getDataHistoryJobsBetween,
},
{
Name: "getajob",
Usage: "returns a job by either its id or nickname",
Description: "na-na, why don't you get a job?",
ArgsUsage: "<id> or <nickname>",
Action: getDataHistoryJob,
Flags: specificJobSubCommands,
},
{
Name: "getjobwithdetailedresults",
Usage: "returns a job by either its nickname along with all its data retrieval results",
Description: "results may be large",
ArgsUsage: "<nickname>",
Action: getDataHistoryJob,
Flags: []cli.Flag{
&cli.StringFlag{
Name: "nickname",
Usage: "binance-spot-btc-usdt-2019-trades",
},
},
},
{
Name: "getjobstatussummary",
Usage: "returns a job with human readable summary of its status",
ArgsUsage: "<nickname>",
Action: getDataHistoryJobSummary,
Flags: []cli.Flag{
&cli.StringFlag{
Name: "nickname",
Usage: "binance-spot-btc-usdt-2019-trades",
},
},
},
{
Name: "addnewjob",
Usage: "creates a new data history job",
Flags: fullJobSubCommands,
Action: upsertDataHistoryJob,
},
{
Name: "upsertjob",
Usage: "adds a new job, or updates an existing one if it matches jobid OR nickname",
Flags: fullJobSubCommands,
Action: upsertDataHistoryJob,
},
{
Name: "deletejob",
Usage: "sets a jobs status to deleted so it no longer is processed",
ArgsUsage: "<id> or <nickname>",
Flags: specificJobSubCommands,
Action: deleteDataHistoryJob,
},
},
}
func getDataHistoryJob(c *cli.Context) error {
if c.NArg() == 0 && c.NumFlags() == 0 {
return cli.ShowCommandHelp(c, c.Command.Name)
}
var id string
if c.IsSet("id") {
id = c.String("id")
} else {
id = c.Args().First()
}
var nickname string
if c.IsSet("nickname") {
nickname = c.String("nickname")
}
if nickname != "" && id != "" {
return errors.New("can only set 'id' OR 'nickname'")
}
conn, err := setupClient()
if err != nil {
return err
}
defer func() {
err = conn.Close()
if err != nil {
fmt.Print(err)
}
}()
client := gctrpc.NewGoCryptoTraderClient(conn)
request := &gctrpc.GetDataHistoryJobDetailsRequest{
Id: id,
Nickname: nickname,
}
if strings.EqualFold(c.Command.Name, "getjobwithdetailedresults") {
request.FullDetails = true
}
result, err := client.GetDataHistoryJobDetails(context.Background(), request)
if err != nil {
return err
}
jsonOutput(result)
return nil
}
func getActiveDataHistoryJobs(_ *cli.Context) error {
conn, err := setupClient()
if err != nil {
return err
}
defer func() {
err = conn.Close()
if err != nil {
fmt.Print(err)
}
}()
client := gctrpc.NewGoCryptoTraderClient(conn)
result, err := client.GetActiveDataHistoryJobs(context.Background(),
&gctrpc.GetInfoRequest{})
if err != nil {
return err
}
jsonOutput(result)
return nil
}
func upsertDataHistoryJob(c *cli.Context) error {
if c.NArg() == 0 && c.NumFlags() == 0 {
return cli.ShowCommandHelp(c, c.Command.Name)
}
var (
err error
nickname, exchange, assetType, pair string
interval, dataType int64
)
if c.IsSet("nickname") {
nickname = c.String("nickname")
} else {
nickname = c.Args().First()
}
if c.IsSet("exchange") {
exchange = c.String("exchange")
} else {
exchange = c.Args().Get(1)
}
if !validExchange(exchange) {
return errInvalidExchange
}
if c.IsSet("asset") {
assetType = c.String("asset")
} else {
assetType = c.Args().Get(2)
}
if !validAsset(assetType) {
return errInvalidAsset
}
if c.IsSet("pair") {
pair = c.String("pair")
} else {
pair = c.Args().Get(3)
}
if !validPair(pair) {
return errInvalidPair
}
p, err := currency.NewPairDelimiter(pair, pairDelimiter)
if err != nil {
return fmt.Errorf("cannot process pair: %w", err)
}
if c.IsSet("start_date") {
startTime = c.String("start_date")
}
if c.IsSet("end_date") {
endTime = c.String("end_date")
}
var s, e time.Time
s, err = time.Parse(common.SimpleTimeFormat, startTime)
if err != nil {
return fmt.Errorf("invalid time format for start: %v", err)
}
e, err = time.Parse(common.SimpleTimeFormat, endTime)
if err != nil {
return fmt.Errorf("invalid time format for end: %v", err)
}
if c.IsSet("interval") {
interval = c.Int64("interval")
} else {
interval, err = convert.Int64FromString(c.Args().Get(6))
if err != nil {
return fmt.Errorf("cannot process interval: %w", err)
}
}
candleInterval := time.Duration(interval) * time.Second
if c.IsSet("request_size_limit") {
requestSizeLimit = c.Uint64("request_size_limit")
}
if c.IsSet("data_type") {
dataType = c.Int64("data_type")
}
if c.IsSet("max_retry_attempts") {
maxRetryAttempts = c.Uint64("max_retry_attempts")
}
if c.IsSet("batch_size") {
batchSize = c.Uint64("batch_size")
}
conn, err := setupClient()
if err != nil {
return err
}
defer func() {
err = conn.Close()
if err != nil {
fmt.Print(err)
}
}()
client := gctrpc.NewGoCryptoTraderClient(conn)
request := &gctrpc.UpsertDataHistoryJobRequest{
Nickname: nickname,
Exchange: exchange,
Asset: assetType,
Pair: &gctrpc.CurrencyPair{
Delimiter: p.Delimiter,
Base: p.Base.String(),
Quote: p.Quote.String(),
},
StartDate: negateLocalOffset(s),
EndDate: negateLocalOffset(e),
Interval: int64(candleInterval),
RequestSizeLimit: int64(requestSizeLimit),
DataType: dataType,
MaxRetryAttempts: int64(maxRetryAttempts),
BatchSize: int64(batchSize),
}
if strings.EqualFold(c.Command.Name, "addnewjob") {
request.InsertOnly = true
}
result, err := client.UpsertDataHistoryJob(context.Background(), request)
if err != nil {
return err
}
jsonOutput(result)
return nil
}
func getDataHistoryJobsBetween(c *cli.Context) error {
if c.NArg() == 0 && c.NumFlags() == 0 {
return cli.ShowCommandHelp(c, c.Command.Name)
}
if c.IsSet("start_date") {
startTime = c.String("start_date")
} else {
startTime = c.Args().First()
}
if c.IsSet("end_date") {
endTime = c.String("end_date")
} else {
endTime = c.Args().Get(1)
}
s, err := time.Parse(common.SimpleTimeFormat, startTime)
if err != nil {
return fmt.Errorf("invalid time format for start: %v", err)
}
e, err := time.Parse(common.SimpleTimeFormat, endTime)
if err != nil {
return fmt.Errorf("invalid time format for end: %v", err)
}
if e.Before(s) {
return errors.New("start cannot be after end")
}
conn, err := setupClient()
if err != nil {
return err
}
defer func() {
err = conn.Close()
if err != nil {
fmt.Print(err)
}
}()
client := gctrpc.NewGoCryptoTraderClient(conn)
result, err := client.GetDataHistoryJobsBetween(context.Background(),
&gctrpc.GetDataHistoryJobsBetweenRequest{
StartDate: negateLocalOffset(s),
EndDate: negateLocalOffset(e),
})
if err != nil {
return err
}
jsonOutput(result)
return nil
}
func deleteDataHistoryJob(c *cli.Context) error {
if c.NArg() == 0 && c.NumFlags() == 0 {
return cli.ShowCommandHelp(c, c.Command.Name)
}
var id string
if c.IsSet("id") {
id = c.String("id")
} else {
id = c.Args().First()
}
var nickname string
if c.IsSet("nickname") {
nickname = c.String("nickname")
}
if nickname != "" && id != "" {
return errors.New("can only set 'id' OR 'nickname'")
}
conn, err := setupClient()
if err != nil {
return err
}
defer func() {
err = conn.Close()
if err != nil {
fmt.Print(err)
}
}()
client := gctrpc.NewGoCryptoTraderClient(conn)
request := &gctrpc.GetDataHistoryJobDetailsRequest{
Id: id,
Nickname: nickname,
}
result, err := client.DeleteDataHistoryJob(context.Background(), request)
if err != nil {
return err
}
jsonOutput(result)
return nil
}
func getDataHistoryJobSummary(c *cli.Context) error {
if c.NArg() == 0 && c.NumFlags() == 0 {
return cli.ShowCommandHelp(c, c.Command.Name)
}
var nickname string
if c.IsSet("nickname") {
nickname = c.String("nickname")
} else {
nickname = c.Args().First()
}
conn, err := setupClient()
if err != nil {
return err
}
defer func() {
err = conn.Close()
if err != nil {
fmt.Print(err)
}
}()
client := gctrpc.NewGoCryptoTraderClient(conn)
request := &gctrpc.GetDataHistoryJobDetailsRequest{
Nickname: nickname,
}
result, err := client.GetDataHistoryJobSummary(context.Background(), request)
if err != nil {
return err
}
jsonOutput(result)
return nil
}

View File

@@ -11,7 +11,7 @@ import (
"github.com/thrasher-corp/gocryptotrader/common"
"github.com/thrasher-corp/gocryptotrader/core"
"github.com/thrasher-corp/gocryptotrader/gctrpc/auth"
cli "github.com/urfave/cli/v2"
"github.com/urfave/cli/v2"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
)
@@ -147,6 +147,7 @@ func main() {
gctScriptCommand,
websocketManagerCommand,
tradeCommand,
dataHistoryCommands,
}
err := app.Run(os.Args)

View File

@@ -6,7 +6,7 @@ import (
"github.com/thrasher-corp/gocryptotrader/currency"
"github.com/thrasher-corp/gocryptotrader/gctrpc"
cli "github.com/urfave/cli/v2"
"github.com/urfave/cli/v2"
)
var exchangePairManagerCommand = &cli.Command{

View File

@@ -10,7 +10,7 @@ import (
"github.com/thrasher-corp/gocryptotrader/common"
"github.com/thrasher-corp/gocryptotrader/currency"
"github.com/thrasher-corp/gocryptotrader/gctrpc"
cli "github.com/urfave/cli/v2"
"github.com/urfave/cli/v2"
)
var tradeCommand = &cli.Command{
@@ -187,7 +187,7 @@ var tradeCommand = &cli.Command{
&cli.Int64Flag{
Name: "interval",
Aliases: []string{"i"},
Usage: fmt.Sprintf(klineMessage, "interval"),
Usage: klineMessage,
Value: 86400,
Destination: &candleGranularity,
},

View File

@@ -5,7 +5,7 @@ import (
"fmt"
"github.com/thrasher-corp/gocryptotrader/gctrpc"
cli "github.com/urfave/cli/v2"
"github.com/urfave/cli/v2"
)
var websocketManagerCommand = &cli.Command{

View File

@@ -42,6 +42,14 @@ var (
// wrapper function by an API
ErrFunctionNotSupported = errors.New("unsupported wrapper function")
errInvalidCryptoCurrency = errors.New("invalid crypto currency")
// ErrDateUnset is an error for start end check calculations
ErrDateUnset = errors.New("date unset")
// ErrStartAfterEnd is an error for start end check calculations
ErrStartAfterEnd = errors.New("start date after end date")
// ErrStartEqualsEnd is an error for start end check calculations
ErrStartEqualsEnd = errors.New("start date equals end date")
// ErrStartAfterTimeNow is an error for start end check calculations
ErrStartAfterTimeNow = errors.New("start date is after current time")
)
func initialiseHTTPClient() {
@@ -53,6 +61,14 @@ func initialiseHTTPClient() {
m.Unlock()
}
// SetHTTPClientWithTimeout protects the setting of the
// global HTTPClient
func SetHTTPClientWithTimeout(t time.Duration) {
m.Lock()
defer m.Unlock()
HTTPClient = NewHTTPClientWithTimeout(t)
}
// NewHTTPClientWithTimeout initialises a new HTTP client and its underlying
// transport IdleConnTimeout with the specified timeout duration
func NewHTTPClientWithTimeout(t time.Duration) *http.Client {
@@ -191,10 +207,13 @@ func SendHTTPRequest(method, urlPath string, headers map[string]string, body io.
req.Header.Add("User-Agent", HTTPUserAgent)
}
m.Lock()
resp, err := HTTPClient.Do(req)
if err != nil {
m.Unlock()
return "", err
}
m.Unlock()
contents, err := ioutil.ReadAll(resp.Body)
defer resp.Body.Close()
@@ -216,10 +235,13 @@ func SendHTTPGetRequest(urlPath string, jsonDecode, isVerbose bool, result inter
initialiseHTTPClient()
m.Lock()
res, err := HTTPClient.Get(urlPath)
if err != nil {
m.Unlock()
return err
}
m.Unlock()
if res.StatusCode != 200 {
return fmt.Errorf("common.SendHTTPGetRequest() error: HTTP status code %d", res.StatusCode)
@@ -390,3 +412,25 @@ func (e Errors) Error() string {
}
return r[:len(r)-2]
}
// StartEndTimeCheck provides some basic checks which occur
// frequently in the codebase
func StartEndTimeCheck(start, end time.Time) error {
if start.IsZero() {
return fmt.Errorf("start %w", ErrDateUnset)
}
if end.IsZero() {
return fmt.Errorf("end %w", ErrDateUnset)
}
if start.After(time.Now()) {
return ErrStartAfterTimeNow
}
if start.After(end) {
return ErrStartAfterEnd
}
if start.Equal(end) {
return ErrStartEqualsEnd
}
return nil
}

View File

@@ -11,6 +11,7 @@ import (
"strconv"
"strings"
"testing"
"time"
)
func TestIsEnabled(t *testing.T) {
@@ -626,3 +627,40 @@ func TestErrors(t *testing.T) {
t.Fatal("does not match error")
}
}
func TestParseStartEndDate(t *testing.T) {
pt := time.Date(1999, 1, 1, 0, 0, 0, 0, time.Local)
ft := time.Date(2222, 1, 1, 0, 0, 0, 0, time.Local)
et := time.Date(2020, 1, 1, 1, 0, 0, 0, time.Local)
nt := time.Time{}
err := StartEndTimeCheck(nt, nt)
if !errors.Is(err, ErrDateUnset) {
t.Errorf("received %v, expected %v", err, ErrDateUnset)
}
err = StartEndTimeCheck(et, nt)
if !errors.Is(err, ErrDateUnset) {
t.Errorf("received %v, expected %v", err, ErrDateUnset)
}
err = StartEndTimeCheck(et, et)
if !errors.Is(err, ErrStartEqualsEnd) {
t.Errorf("received %v, expected %v", err, ErrStartEqualsEnd)
}
err = StartEndTimeCheck(ft, et)
if !errors.Is(err, ErrStartAfterTimeNow) {
t.Errorf("received %v, expected %v", err, ErrStartAfterTimeNow)
}
err = StartEndTimeCheck(et, pt)
if !errors.Is(err, ErrStartAfterEnd) {
t.Errorf("received %v, expected %v", err, ErrStartAfterEnd)
}
err = StartEndTimeCheck(pt, et)
if !errors.Is(err, nil) {
t.Errorf("received %v, expected %v", err, nil)
}
}

View File

@@ -1409,6 +1409,19 @@ func (c *Config) SetNTPCheck(input io.Reader) (string, error) {
return resp, nil
}
// CheckDataHistoryMonitorConfig ensures the data history config is
// valid, or sets default values
func (c *Config) CheckDataHistoryMonitorConfig() {
m.Lock()
defer m.Unlock()
if c.DataHistoryManager.CheckInterval <= 0 {
c.DataHistoryManager.CheckInterval = defaultDataHistoryMonitorCheckTimer
}
if c.DataHistoryManager.MaxJobsPerCycle == 0 {
c.DataHistoryManager.MaxJobsPerCycle = defaultMaxJobsPerCycle
}
}
// CheckConnectionMonitorConfig checks and if zero value assigns default values
func (c *Config) CheckConnectionMonitorConfig() {
m.Lock()
@@ -1755,6 +1768,7 @@ func (c *Config) CheckConfig() error {
}
c.CheckConnectionMonitorConfig()
c.CheckDataHistoryMonitorConfig()
c.CheckCommunicationsConfig()
c.CheckClientBankAccounts()
c.CheckBankAccountConfig()

View File

@@ -36,6 +36,8 @@ const (
DefaultAPIKey = "Key"
DefaultAPISecret = "Secret"
DefaultAPIClientID = "ClientID"
defaultDataHistoryMonitorCheckTimer = time.Minute
defaultMaxJobsPerCycle = 5
)
// Constants here hold some messages
@@ -70,22 +72,23 @@ var (
// prestart management of Portfolio, Communications, Webserver and Enabled
// Exchanges
type Config struct {
Name string `json:"name"`
DataDirectory string `json:"dataDirectory"`
EncryptConfig int `json:"encryptConfig"`
GlobalHTTPTimeout time.Duration `json:"globalHTTPTimeout"`
Database database.Config `json:"database"`
Logging log.Config `json:"logging"`
ConnectionMonitor ConnectionMonitorConfig `json:"connectionMonitor"`
Profiler Profiler `json:"profiler"`
NTPClient NTPClientConfig `json:"ntpclient"`
GCTScript gctscript.Config `json:"gctscript"`
Currency CurrencyConfig `json:"currencyConfig"`
Communications base.CommunicationsConfig `json:"communications"`
RemoteControl RemoteControlConfig `json:"remoteControl"`
Portfolio portfolio.Base `json:"portfolioAddresses"`
Exchanges []ExchangeConfig `json:"exchanges"`
BankAccounts []banking.Account `json:"bankAccounts"`
Name string `json:"name"`
DataDirectory string `json:"dataDirectory"`
EncryptConfig int `json:"encryptConfig"`
GlobalHTTPTimeout time.Duration `json:"globalHTTPTimeout"`
Database database.Config `json:"database"`
Logging log.Config `json:"logging"`
ConnectionMonitor ConnectionMonitorConfig `json:"connectionMonitor"`
DataHistoryManager DataHistoryManager `json:"dataHistoryManager"`
Profiler Profiler `json:"profiler"`
NTPClient NTPClientConfig `json:"ntpclient"`
GCTScript gctscript.Config `json:"gctscript"`
Currency CurrencyConfig `json:"currencyConfig"`
Communications base.CommunicationsConfig `json:"communications"`
RemoteControl RemoteControlConfig `json:"remoteControl"`
Portfolio portfolio.Base `json:"portfolioAddresses"`
Exchanges []ExchangeConfig `json:"exchanges"`
BankAccounts []banking.Account `json:"bankAccounts"`
// Deprecated config settings, will be removed at a future date
Webserver *WebserverConfig `json:"webserver,omitempty"`
@@ -98,6 +101,14 @@ type Config struct {
sessionDK []byte
}
// DataHistoryManager holds all information required for the data history manager
type DataHistoryManager struct {
Enabled bool `json:"enabled"`
CheckInterval time.Duration `json:"checkInterval"`
MaxJobsPerCycle int64 `json:"maxJobsPerCycle"`
Verbose bool `json:"verbose"`
}
// ConnectionMonitorConfig defines the connection monitor variables to ensure
// that there is internet connectivity
type ConnectionMonitorConfig struct {

View File

@@ -2,46 +2,52 @@ package database
import (
"database/sql"
"fmt"
"time"
"github.com/thrasher-corp/sqlboiler/boil"
)
// SetConfig safely sets the global database instance's config with some
// basic locks and checks
func (i *Instance) SetConfig(cfg *Config) error {
if i == nil {
return errNilInstance
return ErrNilInstance
}
if cfg == nil {
return errNilConfig
}
i.m.Lock()
i.config = cfg
if i.config.Verbose {
boil.DebugMode = true
boil.DebugWriter = Logger{}
} else {
boil.DebugMode = false
}
i.m.Unlock()
return nil
}
// SetSQLiteConnection safely sets the global database instance's connection
// to use SQLite
func (i *Instance) SetSQLiteConnection(con *sql.DB) {
func (i *Instance) SetSQLiteConnection(con *sql.DB) error {
if i == nil {
return ErrNilInstance
}
if con == nil {
return errNilSQL
}
i.m.Lock()
defer i.m.Unlock()
i.SQL = con
i.SQL.SetMaxOpenConns(1)
return nil
}
// SetPostgresConnection safely sets the global database instance's connection
// to use Postgres
func (i *Instance) SetPostgresConnection(con *sql.DB) error {
if i == nil {
return ErrNilInstance
}
if con == nil {
return errNilSQL
}
if err := con.Ping(); err != nil {
return err
return fmt.Errorf("%w %s", errFailedPing, err)
}
i.m.Lock()
defer i.m.Unlock()
@@ -55,6 +61,9 @@ func (i *Instance) SetPostgresConnection(con *sql.DB) error {
// SetConnected safely sets the global database instance's connected
// status
func (i *Instance) SetConnected(v bool) {
if i == nil {
return
}
i.m.Lock()
i.connected = v
i.m.Unlock()
@@ -62,13 +71,23 @@ func (i *Instance) SetConnected(v bool) {
// CloseConnection safely disconnects the global database instance
func (i *Instance) CloseConnection() error {
if i == nil {
return ErrNilInstance
}
if i.SQL == nil {
return errNilSQL
}
i.m.Lock()
defer i.m.Unlock()
return i.SQL.Close()
}
// IsConnected safely checks the SQL connection status
func (i *Instance) IsConnected() bool {
if i == nil {
return false
}
i.m.RLock()
defer i.m.RUnlock()
return i.connected
@@ -76,6 +95,9 @@ func (i *Instance) IsConnected() bool {
// GetConfig safely returns a copy of the config
func (i *Instance) GetConfig() *Config {
if i == nil {
return nil
}
i.m.RLock()
defer i.m.RUnlock()
cpy := i.config
@@ -85,7 +107,10 @@ func (i *Instance) GetConfig() *Config {
// Ping pings the database
func (i *Instance) Ping() error {
if i == nil {
return errNilInstance
return ErrNilInstance
}
if !i.IsConnected() {
return ErrDatabaseNotConnected
}
i.m.RLock()
defer i.m.RUnlock()
@@ -94,3 +119,17 @@ func (i *Instance) Ping() error {
}
return i.SQL.Ping()
}
// GetSQL returns the sql connection
func (i *Instance) GetSQL() (*sql.DB, error) {
if i == nil {
return nil, ErrNilInstance
}
if i.SQL == nil {
return nil, errNilSQL
}
i.m.Lock()
defer i.m.Unlock()
resp := i.SQL
return resp, nil
}

213
database/database_test.go Normal file
View File

@@ -0,0 +1,213 @@
package database
import (
"database/sql"
"errors"
"os"
"path/filepath"
"testing"
// import sqlite3 driver
_ "github.com/mattn/go-sqlite3"
)
func TestSetConfig(t *testing.T) {
t.Parallel()
inst := &Instance{}
err := inst.SetConfig(&Config{Verbose: true})
if !errors.Is(err, nil) {
t.Errorf("received %v, expected %v", err, nil)
}
err = inst.SetConfig(nil)
if !errors.Is(err, errNilConfig) {
t.Errorf("received %v, expected %v", err, errNilConfig)
}
inst = nil
err = inst.SetConfig(&Config{})
if !errors.Is(err, ErrNilInstance) {
t.Errorf("received %v, expected %v", err, ErrNilInstance)
}
}
func TestSetSQLiteConnection(t *testing.T) {
t.Parallel()
inst := &Instance{}
err := inst.SetSQLiteConnection(nil)
if !errors.Is(err, errNilSQL) {
t.Errorf("received %v, expected %v", err, errNilSQL)
}
err = inst.SetSQLiteConnection(&sql.DB{})
if !errors.Is(err, nil) {
t.Errorf("received %v, expected %v", err, nil)
}
inst = nil
err = inst.SetSQLiteConnection(nil)
if !errors.Is(err, ErrNilInstance) {
t.Errorf("received %v, expected %v", err, ErrNilInstance)
}
}
func TestSetPostgresConnection(t *testing.T) {
// there is nothing actually requiring a postgres connection specifically
// so this is testing the checks and the ability to set values
// however, such settings would be bad for a sqlite connection irl
t.Parallel()
inst := &Instance{}
databaseFullLocation := filepath.Join(DB.DataPath, "TestSetPostgresConnection")
con, err := sql.Open("sqlite3", databaseFullLocation)
if !errors.Is(err, nil) {
t.Errorf("received %v, expected %v", err, nil)
}
err = inst.SetPostgresConnection(con)
if !errors.Is(err, nil) {
t.Errorf("received %v, expected %v", err, nil)
}
err = con.Close()
if !errors.Is(err, nil) {
t.Errorf("received %v, expected %v", err, nil)
}
err = os.Remove(databaseFullLocation)
if !errors.Is(err, nil) {
t.Errorf("received %v, expected %v", err, nil)
}
}
func TestSetConnected(t *testing.T) {
t.Parallel()
inst := &Instance{}
inst.SetConnected(true)
if !inst.connected {
t.Errorf("received %v, expected %v", false, true)
}
inst.SetConnected(false)
if inst.connected {
t.Errorf("received %v, expected %v", true, false)
}
}
func TestCloseConnection(t *testing.T) {
t.Parallel()
inst := &Instance{}
databaseFullLocation := filepath.Join(DB.DataPath, "TestCloseConnection")
con, err := sql.Open("sqlite3", databaseFullLocation)
if !errors.Is(err, nil) {
t.Errorf("received %v, expected %v", err, nil)
}
err = inst.SetSQLiteConnection(con)
if !errors.Is(err, nil) {
t.Errorf("received %v, expected %v", err, nil)
}
err = inst.CloseConnection()
if !errors.Is(err, nil) {
t.Errorf("received %v, expected %v", err, nil)
}
}
func TestIsConnected(t *testing.T) {
t.Parallel()
inst := &Instance{}
inst.SetConnected(true)
if !inst.IsConnected() {
t.Errorf("received %v, expected %v", false, true)
}
inst.SetConnected(false)
if inst.IsConnected() {
t.Errorf("received %v, expected %v", true, false)
}
}
func TestGetConfig(t *testing.T) {
t.Parallel()
inst := &Instance{}
cfg := inst.GetConfig()
if cfg != nil {
t.Errorf("received %v, expected %v", cfg, nil)
}
err := inst.SetConfig(&Config{Enabled: true})
if !errors.Is(err, nil) {
t.Errorf("received %v, expected %v", err, nil)
}
cfg = inst.GetConfig()
if cfg == nil {
t.Errorf("received %v, expected %v", cfg, &Config{Enabled: true})
}
}
func TestPing(t *testing.T) {
t.Parallel()
inst := &Instance{}
databaseFullLocation := filepath.Join(DB.DataPath, "TestPing")
con, err := sql.Open("sqlite3", databaseFullLocation)
if !errors.Is(err, nil) {
t.Errorf("received %v, expected %v", err, nil)
}
err = inst.SetSQLiteConnection(con)
if !errors.Is(err, nil) {
t.Errorf("received %v, expected %v", err, nil)
}
inst.SetConnected(true)
err = inst.Ping()
if !errors.Is(err, nil) {
t.Errorf("received %v, expected %v", err, nil)
}
inst.SQL = nil
err = inst.Ping()
if !errors.Is(err, errNilSQL) {
t.Errorf("received %v, expected %v", err, errNilSQL)
}
inst.SetConnected(false)
err = inst.Ping()
if !errors.Is(err, ErrDatabaseNotConnected) {
t.Errorf("received %v, expected %v", err, ErrDatabaseNotConnected)
}
inst = nil
err = inst.Ping()
if !errors.Is(err, ErrNilInstance) {
t.Errorf("received %v, expected %v", err, ErrNilInstance)
}
err = con.Close()
if !errors.Is(err, nil) {
t.Errorf("received %v, expected %v", err, nil)
}
err = os.Remove(databaseFullLocation)
if !errors.Is(err, nil) {
t.Errorf("received %v, expected %v", err, nil)
}
}
func TestGetSQL(t *testing.T) {
t.Parallel()
inst := &Instance{}
_, err := inst.GetSQL()
if !errors.Is(err, errNilSQL) {
t.Errorf("received %v, expected %v", err, errNilSQL)
}
databaseFullLocation := filepath.Join(DB.DataPath, "TestGetSQL")
con, err := sql.Open("sqlite3", databaseFullLocation)
if !errors.Is(err, nil) {
t.Errorf("received %v, expected %v", err, nil)
}
err = inst.SetSQLiteConnection(con)
if !errors.Is(err, nil) {
t.Errorf("received %v, expected %v", err, nil)
}
_, err = inst.GetSQL()
if !errors.Is(err, nil) {
t.Errorf("received %v, expected %v", err, nil)
}
inst = nil
_, err = inst.GetSQL()
if !errors.Is(err, ErrNilInstance) {
t.Errorf("received %v, expected %v", err, ErrNilInstance)
}
}

View File

@@ -1,6 +1,7 @@
package database
import (
"context"
"database/sql"
"errors"
"path/filepath"
@@ -39,11 +40,15 @@ var (
SupportedDrivers = []string{DBSQLite, DBSQLite3, DBPostgreSQL}
// ErrFailedToConnect for when a database fails to connect
ErrFailedToConnect = errors.New("database failed to connect")
// ErrDatabaseNotConnected for when a database is not connected
ErrDatabaseNotConnected = errors.New("database is not connected")
// DefaultSQLiteDatabase is the default sqlite3 database name to use
DefaultSQLiteDatabase = "gocryptotrader.db"
errNilConfig = errors.New("received nil config")
errNilInstance = errors.New("database instance is nil")
errNilSQL = errors.New("database SQL connection is nil")
// ErrNilInstance for when a database is nil
ErrNilInstance = errors.New("database instance is nil")
errNilConfig = errors.New("received nil config")
errNilSQL = errors.New("database SQL connection is nil")
errFailedPing = errors.New("unable to verify database is connected, failed ping")
)
const (
@@ -56,3 +61,23 @@ const (
// DBInvalidDriver const string for invalid driver
DBInvalidDriver = "invalid driver"
)
// IDatabase allows for the passing of a database struct
// without giving the receiver access to all functionality
type IDatabase interface {
IsConnected() bool
GetSQL() (*sql.DB, error)
GetConfig() *Config
}
// ISQL allows for the passing of a SQL connection
// without giving the receiver access to all functionality
type ISQL interface {
BeginTx(context.Context, *sql.TxOptions) (*sql.Tx, error)
Exec(string, ...interface{}) (sql.Result, error)
Query(string, ...interface{}) (*sql.Rows, error)
QueryRow(string, ...interface{}) *sql.Row
ExecContext(context.Context, string, ...interface{}) (sql.Result, error)
QueryContext(context.Context, string, ...interface{}) (*sql.Rows, error)
QueryRowContext(context.Context, string, ...interface{}) *sql.Row
}

View File

@@ -10,9 +10,7 @@ import (
)
// Connect opens a connection to Postgres database and returns a pointer to database.DB
func Connect() (*database.Instance, error) {
cfg := database.DB.GetConfig()
func Connect(cfg *database.Config) (*database.Instance, error) {
if cfg.SSLMode == "" {
cfg.SSLMode = "disable"
}

View File

@@ -10,20 +10,21 @@ import (
)
// Connect opens a connection to sqlite database and returns a pointer to database.DB
func Connect() (*database.Instance, error) {
cfg := database.DB.GetConfig()
if cfg.Database == "" {
func Connect(db string) (*database.Instance, error) {
if db == "" {
return nil, database.ErrNoDatabaseProvided
}
databaseFullLocation := filepath.Join(database.DB.DataPath, cfg.Database)
databaseFullLocation := filepath.Join(database.DB.DataPath, db)
dbConn, err := sql.Open("sqlite3", databaseFullLocation)
if err != nil {
return nil, err
}
database.DB.SetSQLiteConnection(dbConn)
err = database.DB.SetSQLiteConnection(dbConn)
if err != nil {
return nil, err
}
return database.DB, nil
}

View File

@@ -0,0 +1,39 @@
-- +goose Up
CREATE TABLE IF NOT EXISTS datahistoryjob
(
id uuid PRIMARY KEY DEFAULT gen_random_uuid(),
nickname varchar(255) NOT NULL,
exchange_name_id uuid REFERENCES exchange(id) NOT NULL,
asset varchar NOT NULL,
base varchar(30) NOT NULL,
quote varchar(30) NOT NULL,
start_time TIMESTAMPTZ NOT NULL,
end_time TIMESTAMPTZ NOT NULL,
data_type DOUBLE PRECISION NOT NULL,
interval DOUBLE PRECISION NOT NULL,
request_size DOUBLE PRECISION NOT NULL,
max_retries DOUBLE PRECISION NOT NULL,
batch_count DOUBLE PRECISION NOT NULL,
status DOUBLE PRECISION NOT NULL,
created TIMESTAMPTZ NOT NULL,
CONSTRAINT uniquenickname
unique(nickname),
CONSTRAINT uniqueid
unique(id)
);
CREATE TABLE IF NOT EXISTS datahistoryjobresult
(
id uuid PRIMARY KEY DEFAULT gen_random_uuid(),
job_id uuid NOT NULL REFERENCES datahistoryjob(id) ON DELETE RESTRICT,
result TEXT NULL,
status DOUBLE PRECISION NOT NULL,
interval_start_time TIMESTAMPTZ NOT NULL,
interval_end_time TIMESTAMPTZ NOT NULL,
run_time TIMESTAMPTZ NOT NULL
);
-- +goose Down
DROP TABLE datahistoryjobresult;
DROP TABLE datahistoryjob;

View File

@@ -0,0 +1,40 @@
-- +goose Up
CREATE TABLE datahistoryjob
(
id text NOT NULL primary key,
nickname text NOT NULL,
exchange_name_id text NOT NULL,
asset text NOT NULL,
base text NOT NULL,
quote text NOT NULL,
start_time timestamp NOT NULL,
end_time timestamp NOT NULL,
interval real NOT NULL,
data_type real NOT NULL,
request_size real NOT NULL,
max_retries real NOT NULL,
batch_count real NOT NULL,
status real NOT NULL,
created timestamp NOT NULL default CURRENT_TIMESTAMP,
FOREIGN KEY(exchange_name_id) REFERENCES exchange(id) ON DELETE RESTRICT,
UNIQUE(id) ON CONFLICT REPLACE,
UNIQUE(nickname) ON CONFLICT REPLACE
);
CREATE TABLE datahistoryjobresult
(
id text not null primary key,
job_id text NOT NULL,
result text NULL,
status real NOT NULL,
interval_start_time timestamp NOT NULL,
interval_end_time timestamp NOT NULL,
run_time timestamp NOT NULL default CURRENT_TIMESTAMP,
UNIQUE(id) ON CONFLICT REPLACE,
FOREIGN KEY(job_id) REFERENCES datahistoryjob(id) ON DELETE RESTRICT
);
-- +goose Down
DROP TABLE datahistoryjob;
DROP TABLE datahistoryjobresult;

View File

@@ -4,23 +4,27 @@
package postgres
var TableNames = struct {
AuditEvent string
Candle string
Exchange string
Script string
ScriptExecution string
Trade string
WithdrawalCrypto string
WithdrawalFiat string
WithdrawalHistory string
AuditEvent string
Candle string
Datahistoryjob string
Datahistoryjobresult string
Exchange string
Script string
ScriptExecution string
Trade string
WithdrawalCrypto string
WithdrawalFiat string
WithdrawalHistory string
}{
AuditEvent: "audit_event",
Candle: "candle",
Exchange: "exchange",
Script: "script",
ScriptExecution: "script_execution",
Trade: "trade",
WithdrawalCrypto: "withdrawal_crypto",
WithdrawalFiat: "withdrawal_fiat",
WithdrawalHistory: "withdrawal_history",
AuditEvent: "audit_event",
Candle: "candle",
Datahistoryjob: "datahistoryjob",
Datahistoryjobresult: "datahistoryjobresult",
Exchange: "exchange",
Script: "script",
ScriptExecution: "script_execution",
Trade: "trade",
WithdrawalCrypto: "withdrawal_crypto",
WithdrawalFiat: "withdrawal_fiat",
WithdrawalHistory: "withdrawal_history",
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,994 @@
// Code generated by SQLBoiler 3.5.0-gct (https://github.com/thrasher-corp/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package postgres
import (
"bytes"
"context"
"reflect"
"testing"
"github.com/thrasher-corp/sqlboiler/boil"
"github.com/thrasher-corp/sqlboiler/queries"
"github.com/thrasher-corp/sqlboiler/randomize"
"github.com/thrasher-corp/sqlboiler/strmangle"
)
var (
// Relationships sometimes use the reflection helper queries.Equal/queries.Assign
// so force a package dependency in case they don't.
_ = queries.Equal
)
func testDatahistoryjobs(t *testing.T) {
t.Parallel()
query := Datahistoryjobs()
if query.Query == nil {
t.Error("expected a query, got nothing")
}
}
func testDatahistoryjobsDelete(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if rowsAff, err := o.Delete(ctx, tx); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only have deleted one row, but affected:", rowsAff)
}
count, err := Datahistoryjobs().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 0 {
t.Error("want zero records, got:", count)
}
}
func testDatahistoryjobsQueryDeleteAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if rowsAff, err := Datahistoryjobs().DeleteAll(ctx, tx); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only have deleted one row, but affected:", rowsAff)
}
count, err := Datahistoryjobs().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 0 {
t.Error("want zero records, got:", count)
}
}
func testDatahistoryjobsSliceDeleteAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice := DatahistoryjobSlice{o}
if rowsAff, err := slice.DeleteAll(ctx, tx); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only have deleted one row, but affected:", rowsAff)
}
count, err := Datahistoryjobs().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 0 {
t.Error("want zero records, got:", count)
}
}
func testDatahistoryjobsExists(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
e, err := DatahistoryjobExists(ctx, tx, o.ID)
if err != nil {
t.Errorf("Unable to check if Datahistoryjob exists: %s", err)
}
if !e {
t.Errorf("Expected DatahistoryjobExists to return true, but got false.")
}
}
func testDatahistoryjobsFind(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
datahistoryjobFound, err := FindDatahistoryjob(ctx, tx, o.ID)
if err != nil {
t.Error(err)
}
if datahistoryjobFound == nil {
t.Error("want a record, got nil")
}
}
func testDatahistoryjobsBind(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = Datahistoryjobs().Bind(ctx, tx, o); err != nil {
t.Error(err)
}
}
func testDatahistoryjobsOne(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if x, err := Datahistoryjobs().One(ctx, tx); err != nil {
t.Error(err)
} else if x == nil {
t.Error("expected to get a non nil record")
}
}
func testDatahistoryjobsAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
datahistoryjobOne := &Datahistoryjob{}
datahistoryjobTwo := &Datahistoryjob{}
if err = randomize.Struct(seed, datahistoryjobOne, datahistoryjobDBTypes, false, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
if err = randomize.Struct(seed, datahistoryjobTwo, datahistoryjobDBTypes, false, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = datahistoryjobOne.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = datahistoryjobTwo.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice, err := Datahistoryjobs().All(ctx, tx)
if err != nil {
t.Error(err)
}
if len(slice) != 2 {
t.Error("want 2 records, got:", len(slice))
}
}
func testDatahistoryjobsCount(t *testing.T) {
t.Parallel()
var err error
seed := randomize.NewSeed()
datahistoryjobOne := &Datahistoryjob{}
datahistoryjobTwo := &Datahistoryjob{}
if err = randomize.Struct(seed, datahistoryjobOne, datahistoryjobDBTypes, false, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
if err = randomize.Struct(seed, datahistoryjobTwo, datahistoryjobDBTypes, false, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = datahistoryjobOne.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = datahistoryjobTwo.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := Datahistoryjobs().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 2 {
t.Error("want 2 records, got:", count)
}
}
func datahistoryjobBeforeInsertHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjob) error {
*o = Datahistoryjob{}
return nil
}
func datahistoryjobAfterInsertHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjob) error {
*o = Datahistoryjob{}
return nil
}
func datahistoryjobAfterSelectHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjob) error {
*o = Datahistoryjob{}
return nil
}
func datahistoryjobBeforeUpdateHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjob) error {
*o = Datahistoryjob{}
return nil
}
func datahistoryjobAfterUpdateHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjob) error {
*o = Datahistoryjob{}
return nil
}
func datahistoryjobBeforeDeleteHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjob) error {
*o = Datahistoryjob{}
return nil
}
func datahistoryjobAfterDeleteHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjob) error {
*o = Datahistoryjob{}
return nil
}
func datahistoryjobBeforeUpsertHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjob) error {
*o = Datahistoryjob{}
return nil
}
func datahistoryjobAfterUpsertHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjob) error {
*o = Datahistoryjob{}
return nil
}
func testDatahistoryjobsHooks(t *testing.T) {
t.Parallel()
var err error
ctx := context.Background()
empty := &Datahistoryjob{}
o := &Datahistoryjob{}
seed := randomize.NewSeed()
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, false); err != nil {
t.Errorf("Unable to randomize Datahistoryjob object: %s", err)
}
AddDatahistoryjobHook(boil.BeforeInsertHook, datahistoryjobBeforeInsertHook)
if err = o.doBeforeInsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeInsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeInsertHook function to empty object, but got: %#v", o)
}
datahistoryjobBeforeInsertHooks = []DatahistoryjobHook{}
AddDatahistoryjobHook(boil.AfterInsertHook, datahistoryjobAfterInsertHook)
if err = o.doAfterInsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterInsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterInsertHook function to empty object, but got: %#v", o)
}
datahistoryjobAfterInsertHooks = []DatahistoryjobHook{}
AddDatahistoryjobHook(boil.AfterSelectHook, datahistoryjobAfterSelectHook)
if err = o.doAfterSelectHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterSelectHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterSelectHook function to empty object, but got: %#v", o)
}
datahistoryjobAfterSelectHooks = []DatahistoryjobHook{}
AddDatahistoryjobHook(boil.BeforeUpdateHook, datahistoryjobBeforeUpdateHook)
if err = o.doBeforeUpdateHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeUpdateHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeUpdateHook function to empty object, but got: %#v", o)
}
datahistoryjobBeforeUpdateHooks = []DatahistoryjobHook{}
AddDatahistoryjobHook(boil.AfterUpdateHook, datahistoryjobAfterUpdateHook)
if err = o.doAfterUpdateHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterUpdateHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterUpdateHook function to empty object, but got: %#v", o)
}
datahistoryjobAfterUpdateHooks = []DatahistoryjobHook{}
AddDatahistoryjobHook(boil.BeforeDeleteHook, datahistoryjobBeforeDeleteHook)
if err = o.doBeforeDeleteHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeDeleteHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeDeleteHook function to empty object, but got: %#v", o)
}
datahistoryjobBeforeDeleteHooks = []DatahistoryjobHook{}
AddDatahistoryjobHook(boil.AfterDeleteHook, datahistoryjobAfterDeleteHook)
if err = o.doAfterDeleteHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterDeleteHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterDeleteHook function to empty object, but got: %#v", o)
}
datahistoryjobAfterDeleteHooks = []DatahistoryjobHook{}
AddDatahistoryjobHook(boil.BeforeUpsertHook, datahistoryjobBeforeUpsertHook)
if err = o.doBeforeUpsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeUpsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeUpsertHook function to empty object, but got: %#v", o)
}
datahistoryjobBeforeUpsertHooks = []DatahistoryjobHook{}
AddDatahistoryjobHook(boil.AfterUpsertHook, datahistoryjobAfterUpsertHook)
if err = o.doAfterUpsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterUpsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterUpsertHook function to empty object, but got: %#v", o)
}
datahistoryjobAfterUpsertHooks = []DatahistoryjobHook{}
}
func testDatahistoryjobsInsert(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := Datahistoryjobs().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
}
func testDatahistoryjobsInsertWhitelist(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Whitelist(datahistoryjobColumnsWithoutDefault...)); err != nil {
t.Error(err)
}
count, err := Datahistoryjobs().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
}
func testDatahistoryjobToManyJobDatahistoryjobresults(t *testing.T) {
var err error
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
var a Datahistoryjob
var b, c Datahistoryjobresult
seed := randomize.NewSeed()
if err = randomize.Struct(seed, &a, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
if err := a.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
if err = randomize.Struct(seed, &b, datahistoryjobresultDBTypes, false, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Fatal(err)
}
if err = randomize.Struct(seed, &c, datahistoryjobresultDBTypes, false, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Fatal(err)
}
b.JobID = a.ID
c.JobID = a.ID
if err = b.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
if err = c.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
check, err := a.JobDatahistoryjobresults().All(ctx, tx)
if err != nil {
t.Fatal(err)
}
bFound, cFound := false, false
for _, v := range check {
if v.JobID == b.JobID {
bFound = true
}
if v.JobID == c.JobID {
cFound = true
}
}
if !bFound {
t.Error("expected to find b")
}
if !cFound {
t.Error("expected to find c")
}
slice := DatahistoryjobSlice{&a}
if err = a.L.LoadJobDatahistoryjobresults(ctx, tx, false, (*[]*Datahistoryjob)(&slice), nil); err != nil {
t.Fatal(err)
}
if got := len(a.R.JobDatahistoryjobresults); got != 2 {
t.Error("number of eager loaded records wrong, got:", got)
}
a.R.JobDatahistoryjobresults = nil
if err = a.L.LoadJobDatahistoryjobresults(ctx, tx, true, &a, nil); err != nil {
t.Fatal(err)
}
if got := len(a.R.JobDatahistoryjobresults); got != 2 {
t.Error("number of eager loaded records wrong, got:", got)
}
if t.Failed() {
t.Logf("%#v", check)
}
}
func testDatahistoryjobToManyAddOpJobDatahistoryjobresults(t *testing.T) {
var err error
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
var a Datahistoryjob
var b, c, d, e Datahistoryjobresult
seed := randomize.NewSeed()
if err = randomize.Struct(seed, &a, datahistoryjobDBTypes, false, strmangle.SetComplement(datahistoryjobPrimaryKeyColumns, datahistoryjobColumnsWithoutDefault)...); err != nil {
t.Fatal(err)
}
foreigners := []*Datahistoryjobresult{&b, &c, &d, &e}
for _, x := range foreigners {
if err = randomize.Struct(seed, x, datahistoryjobresultDBTypes, false, strmangle.SetComplement(datahistoryjobresultPrimaryKeyColumns, datahistoryjobresultColumnsWithoutDefault)...); err != nil {
t.Fatal(err)
}
}
if err := a.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
if err = b.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
if err = c.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
foreignersSplitByInsertion := [][]*Datahistoryjobresult{
{&b, &c},
{&d, &e},
}
for i, x := range foreignersSplitByInsertion {
err = a.AddJobDatahistoryjobresults(ctx, tx, i != 0, x...)
if err != nil {
t.Fatal(err)
}
first := x[0]
second := x[1]
if a.ID != first.JobID {
t.Error("foreign key was wrong value", a.ID, first.JobID)
}
if a.ID != second.JobID {
t.Error("foreign key was wrong value", a.ID, second.JobID)
}
if first.R.Job != &a {
t.Error("relationship was not added properly to the foreign slice")
}
if second.R.Job != &a {
t.Error("relationship was not added properly to the foreign slice")
}
if a.R.JobDatahistoryjobresults[i*2] != first {
t.Error("relationship struct slice not set to correct value")
}
if a.R.JobDatahistoryjobresults[i*2+1] != second {
t.Error("relationship struct slice not set to correct value")
}
count, err := a.JobDatahistoryjobresults().Count(ctx, tx)
if err != nil {
t.Fatal(err)
}
if want := int64((i + 1) * 2); count != want {
t.Error("want", want, "got", count)
}
}
}
func testDatahistoryjobToOneExchangeUsingExchangeName(t *testing.T) {
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
var local Datahistoryjob
var foreign Exchange
seed := randomize.NewSeed()
if err := randomize.Struct(seed, &local, datahistoryjobDBTypes, false, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
if err := randomize.Struct(seed, &foreign, exchangeDBTypes, false, exchangeColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Exchange struct: %s", err)
}
if err := foreign.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
local.ExchangeNameID = foreign.ID
if err := local.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
check, err := local.ExchangeName().One(ctx, tx)
if err != nil {
t.Fatal(err)
}
if check.ID != foreign.ID {
t.Errorf("want: %v, got %v", foreign.ID, check.ID)
}
slice := DatahistoryjobSlice{&local}
if err = local.L.LoadExchangeName(ctx, tx, false, (*[]*Datahistoryjob)(&slice), nil); err != nil {
t.Fatal(err)
}
if local.R.ExchangeName == nil {
t.Error("struct should have been eager loaded")
}
local.R.ExchangeName = nil
if err = local.L.LoadExchangeName(ctx, tx, true, &local, nil); err != nil {
t.Fatal(err)
}
if local.R.ExchangeName == nil {
t.Error("struct should have been eager loaded")
}
}
func testDatahistoryjobToOneSetOpExchangeUsingExchangeName(t *testing.T) {
var err error
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
var a Datahistoryjob
var b, c Exchange
seed := randomize.NewSeed()
if err = randomize.Struct(seed, &a, datahistoryjobDBTypes, false, strmangle.SetComplement(datahistoryjobPrimaryKeyColumns, datahistoryjobColumnsWithoutDefault)...); err != nil {
t.Fatal(err)
}
if err = randomize.Struct(seed, &b, exchangeDBTypes, false, strmangle.SetComplement(exchangePrimaryKeyColumns, exchangeColumnsWithoutDefault)...); err != nil {
t.Fatal(err)
}
if err = randomize.Struct(seed, &c, exchangeDBTypes, false, strmangle.SetComplement(exchangePrimaryKeyColumns, exchangeColumnsWithoutDefault)...); err != nil {
t.Fatal(err)
}
if err := a.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
if err = b.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
for i, x := range []*Exchange{&b, &c} {
err = a.SetExchangeName(ctx, tx, i != 0, x)
if err != nil {
t.Fatal(err)
}
if a.R.ExchangeName != x {
t.Error("relationship struct not set to correct value")
}
if x.R.ExchangeNameDatahistoryjobs[0] != &a {
t.Error("failed to append to foreign relationship struct")
}
if a.ExchangeNameID != x.ID {
t.Error("foreign key was wrong value", a.ExchangeNameID)
}
zero := reflect.Zero(reflect.TypeOf(a.ExchangeNameID))
reflect.Indirect(reflect.ValueOf(&a.ExchangeNameID)).Set(zero)
if err = a.Reload(ctx, tx); err != nil {
t.Fatal("failed to reload", err)
}
if a.ExchangeNameID != x.ID {
t.Error("foreign key was wrong value", a.ExchangeNameID, x.ID)
}
}
}
func testDatahistoryjobsReload(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = o.Reload(ctx, tx); err != nil {
t.Error(err)
}
}
func testDatahistoryjobsReloadAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice := DatahistoryjobSlice{o}
if err = slice.ReloadAll(ctx, tx); err != nil {
t.Error(err)
}
}
func testDatahistoryjobsSelect(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice, err := Datahistoryjobs().All(ctx, tx)
if err != nil {
t.Error(err)
}
if len(slice) != 1 {
t.Error("want one record, got:", len(slice))
}
}
var (
datahistoryjobDBTypes = map[string]string{`ID`: `uuid`, `Nickname`: `character varying`, `ExchangeNameID`: `uuid`, `Asset`: `character varying`, `Base`: `character varying`, `Quote`: `character varying`, `StartTime`: `timestamp with time zone`, `EndTime`: `timestamp with time zone`, `DataType`: `double precision`, `Interval`: `double precision`, `RequestSize`: `double precision`, `MaxRetries`: `double precision`, `BatchCount`: `double precision`, `Status`: `double precision`, `Created`: `timestamp with time zone`}
_ = bytes.MinRead
)
func testDatahistoryjobsUpdate(t *testing.T) {
t.Parallel()
if 0 == len(datahistoryjobPrimaryKeyColumns) {
t.Skip("Skipping table with no primary key columns")
}
if len(datahistoryjobAllColumns) == len(datahistoryjobPrimaryKeyColumns) {
t.Skip("Skipping table with only primary key columns")
}
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := Datahistoryjobs().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobPrimaryKeyColumns...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
if rowsAff, err := o.Update(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only affect one row but affected", rowsAff)
}
}
func testDatahistoryjobsSliceUpdateAll(t *testing.T) {
t.Parallel()
if len(datahistoryjobAllColumns) == len(datahistoryjobPrimaryKeyColumns) {
t.Skip("Skipping table with only primary key columns")
}
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := Datahistoryjobs().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobPrimaryKeyColumns...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
// Remove Primary keys and unique columns from what we plan to update
var fields []string
if strmangle.StringSliceMatch(datahistoryjobAllColumns, datahistoryjobPrimaryKeyColumns) {
fields = datahistoryjobAllColumns
} else {
fields = strmangle.SetComplement(
datahistoryjobAllColumns,
datahistoryjobPrimaryKeyColumns,
)
}
value := reflect.Indirect(reflect.ValueOf(o))
typ := reflect.TypeOf(o).Elem()
n := typ.NumField()
updateMap := M{}
for _, col := range fields {
for i := 0; i < n; i++ {
f := typ.Field(i)
if f.Tag.Get("boil") == col {
updateMap[col] = value.Field(i).Interface()
}
}
}
slice := DatahistoryjobSlice{o}
if rowsAff, err := slice.UpdateAll(ctx, tx, updateMap); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("wanted one record updated but got", rowsAff)
}
}
func testDatahistoryjobsUpsert(t *testing.T) {
t.Parallel()
if len(datahistoryjobAllColumns) == len(datahistoryjobPrimaryKeyColumns) {
t.Skip("Skipping table with only primary key columns")
}
seed := randomize.NewSeed()
var err error
// Attempt the INSERT side of an UPSERT
o := Datahistoryjob{}
if err = randomize.Struct(seed, &o, datahistoryjobDBTypes, true); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Upsert(ctx, tx, false, nil, boil.Infer(), boil.Infer()); err != nil {
t.Errorf("Unable to upsert Datahistoryjob: %s", err)
}
count, err := Datahistoryjobs().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
// Attempt the UPDATE side of an UPSERT
if err = randomize.Struct(seed, &o, datahistoryjobDBTypes, false, datahistoryjobPrimaryKeyColumns...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
if err = o.Upsert(ctx, tx, true, nil, boil.Infer(), boil.Infer()); err != nil {
t.Errorf("Unable to upsert Datahistoryjob: %s", err)
}
count, err = Datahistoryjobs().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,841 @@
// Code generated by SQLBoiler 3.5.0-gct (https://github.com/thrasher-corp/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package postgres
import (
"bytes"
"context"
"reflect"
"testing"
"github.com/thrasher-corp/sqlboiler/boil"
"github.com/thrasher-corp/sqlboiler/queries"
"github.com/thrasher-corp/sqlboiler/randomize"
"github.com/thrasher-corp/sqlboiler/strmangle"
)
var (
// Relationships sometimes use the reflection helper queries.Equal/queries.Assign
// so force a package dependency in case they don't.
_ = queries.Equal
)
func testDatahistoryjobresults(t *testing.T) {
t.Parallel()
query := Datahistoryjobresults()
if query.Query == nil {
t.Error("expected a query, got nothing")
}
}
func testDatahistoryjobresultsDelete(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if rowsAff, err := o.Delete(ctx, tx); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only have deleted one row, but affected:", rowsAff)
}
count, err := Datahistoryjobresults().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 0 {
t.Error("want zero records, got:", count)
}
}
func testDatahistoryjobresultsQueryDeleteAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if rowsAff, err := Datahistoryjobresults().DeleteAll(ctx, tx); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only have deleted one row, but affected:", rowsAff)
}
count, err := Datahistoryjobresults().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 0 {
t.Error("want zero records, got:", count)
}
}
func testDatahistoryjobresultsSliceDeleteAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice := DatahistoryjobresultSlice{o}
if rowsAff, err := slice.DeleteAll(ctx, tx); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only have deleted one row, but affected:", rowsAff)
}
count, err := Datahistoryjobresults().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 0 {
t.Error("want zero records, got:", count)
}
}
func testDatahistoryjobresultsExists(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
e, err := DatahistoryjobresultExists(ctx, tx, o.ID)
if err != nil {
t.Errorf("Unable to check if Datahistoryjobresult exists: %s", err)
}
if !e {
t.Errorf("Expected DatahistoryjobresultExists to return true, but got false.")
}
}
func testDatahistoryjobresultsFind(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
datahistoryjobresultFound, err := FindDatahistoryjobresult(ctx, tx, o.ID)
if err != nil {
t.Error(err)
}
if datahistoryjobresultFound == nil {
t.Error("want a record, got nil")
}
}
func testDatahistoryjobresultsBind(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = Datahistoryjobresults().Bind(ctx, tx, o); err != nil {
t.Error(err)
}
}
func testDatahistoryjobresultsOne(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if x, err := Datahistoryjobresults().One(ctx, tx); err != nil {
t.Error(err)
} else if x == nil {
t.Error("expected to get a non nil record")
}
}
func testDatahistoryjobresultsAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
datahistoryjobresultOne := &Datahistoryjobresult{}
datahistoryjobresultTwo := &Datahistoryjobresult{}
if err = randomize.Struct(seed, datahistoryjobresultOne, datahistoryjobresultDBTypes, false, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
if err = randomize.Struct(seed, datahistoryjobresultTwo, datahistoryjobresultDBTypes, false, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = datahistoryjobresultOne.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = datahistoryjobresultTwo.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice, err := Datahistoryjobresults().All(ctx, tx)
if err != nil {
t.Error(err)
}
if len(slice) != 2 {
t.Error("want 2 records, got:", len(slice))
}
}
func testDatahistoryjobresultsCount(t *testing.T) {
t.Parallel()
var err error
seed := randomize.NewSeed()
datahistoryjobresultOne := &Datahistoryjobresult{}
datahistoryjobresultTwo := &Datahistoryjobresult{}
if err = randomize.Struct(seed, datahistoryjobresultOne, datahistoryjobresultDBTypes, false, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
if err = randomize.Struct(seed, datahistoryjobresultTwo, datahistoryjobresultDBTypes, false, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = datahistoryjobresultOne.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = datahistoryjobresultTwo.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := Datahistoryjobresults().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 2 {
t.Error("want 2 records, got:", count)
}
}
func datahistoryjobresultBeforeInsertHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjobresult) error {
*o = Datahistoryjobresult{}
return nil
}
func datahistoryjobresultAfterInsertHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjobresult) error {
*o = Datahistoryjobresult{}
return nil
}
func datahistoryjobresultAfterSelectHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjobresult) error {
*o = Datahistoryjobresult{}
return nil
}
func datahistoryjobresultBeforeUpdateHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjobresult) error {
*o = Datahistoryjobresult{}
return nil
}
func datahistoryjobresultAfterUpdateHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjobresult) error {
*o = Datahistoryjobresult{}
return nil
}
func datahistoryjobresultBeforeDeleteHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjobresult) error {
*o = Datahistoryjobresult{}
return nil
}
func datahistoryjobresultAfterDeleteHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjobresult) error {
*o = Datahistoryjobresult{}
return nil
}
func datahistoryjobresultBeforeUpsertHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjobresult) error {
*o = Datahistoryjobresult{}
return nil
}
func datahistoryjobresultAfterUpsertHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjobresult) error {
*o = Datahistoryjobresult{}
return nil
}
func testDatahistoryjobresultsHooks(t *testing.T) {
t.Parallel()
var err error
ctx := context.Background()
empty := &Datahistoryjobresult{}
o := &Datahistoryjobresult{}
seed := randomize.NewSeed()
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, false); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult object: %s", err)
}
AddDatahistoryjobresultHook(boil.BeforeInsertHook, datahistoryjobresultBeforeInsertHook)
if err = o.doBeforeInsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeInsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeInsertHook function to empty object, but got: %#v", o)
}
datahistoryjobresultBeforeInsertHooks = []DatahistoryjobresultHook{}
AddDatahistoryjobresultHook(boil.AfterInsertHook, datahistoryjobresultAfterInsertHook)
if err = o.doAfterInsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterInsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterInsertHook function to empty object, but got: %#v", o)
}
datahistoryjobresultAfterInsertHooks = []DatahistoryjobresultHook{}
AddDatahistoryjobresultHook(boil.AfterSelectHook, datahistoryjobresultAfterSelectHook)
if err = o.doAfterSelectHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterSelectHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterSelectHook function to empty object, but got: %#v", o)
}
datahistoryjobresultAfterSelectHooks = []DatahistoryjobresultHook{}
AddDatahistoryjobresultHook(boil.BeforeUpdateHook, datahistoryjobresultBeforeUpdateHook)
if err = o.doBeforeUpdateHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeUpdateHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeUpdateHook function to empty object, but got: %#v", o)
}
datahistoryjobresultBeforeUpdateHooks = []DatahistoryjobresultHook{}
AddDatahistoryjobresultHook(boil.AfterUpdateHook, datahistoryjobresultAfterUpdateHook)
if err = o.doAfterUpdateHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterUpdateHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterUpdateHook function to empty object, but got: %#v", o)
}
datahistoryjobresultAfterUpdateHooks = []DatahistoryjobresultHook{}
AddDatahistoryjobresultHook(boil.BeforeDeleteHook, datahistoryjobresultBeforeDeleteHook)
if err = o.doBeforeDeleteHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeDeleteHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeDeleteHook function to empty object, but got: %#v", o)
}
datahistoryjobresultBeforeDeleteHooks = []DatahistoryjobresultHook{}
AddDatahistoryjobresultHook(boil.AfterDeleteHook, datahistoryjobresultAfterDeleteHook)
if err = o.doAfterDeleteHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterDeleteHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterDeleteHook function to empty object, but got: %#v", o)
}
datahistoryjobresultAfterDeleteHooks = []DatahistoryjobresultHook{}
AddDatahistoryjobresultHook(boil.BeforeUpsertHook, datahistoryjobresultBeforeUpsertHook)
if err = o.doBeforeUpsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeUpsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeUpsertHook function to empty object, but got: %#v", o)
}
datahistoryjobresultBeforeUpsertHooks = []DatahistoryjobresultHook{}
AddDatahistoryjobresultHook(boil.AfterUpsertHook, datahistoryjobresultAfterUpsertHook)
if err = o.doAfterUpsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterUpsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterUpsertHook function to empty object, but got: %#v", o)
}
datahistoryjobresultAfterUpsertHooks = []DatahistoryjobresultHook{}
}
func testDatahistoryjobresultsInsert(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := Datahistoryjobresults().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
}
func testDatahistoryjobresultsInsertWhitelist(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Whitelist(datahistoryjobresultColumnsWithoutDefault...)); err != nil {
t.Error(err)
}
count, err := Datahistoryjobresults().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
}
func testDatahistoryjobresultToOneDatahistoryjobUsingJob(t *testing.T) {
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
var local Datahistoryjobresult
var foreign Datahistoryjob
seed := randomize.NewSeed()
if err := randomize.Struct(seed, &local, datahistoryjobresultDBTypes, false, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
if err := randomize.Struct(seed, &foreign, datahistoryjobDBTypes, false, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
if err := foreign.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
local.JobID = foreign.ID
if err := local.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
check, err := local.Job().One(ctx, tx)
if err != nil {
t.Fatal(err)
}
if check.ID != foreign.ID {
t.Errorf("want: %v, got %v", foreign.ID, check.ID)
}
slice := DatahistoryjobresultSlice{&local}
if err = local.L.LoadJob(ctx, tx, false, (*[]*Datahistoryjobresult)(&slice), nil); err != nil {
t.Fatal(err)
}
if local.R.Job == nil {
t.Error("struct should have been eager loaded")
}
local.R.Job = nil
if err = local.L.LoadJob(ctx, tx, true, &local, nil); err != nil {
t.Fatal(err)
}
if local.R.Job == nil {
t.Error("struct should have been eager loaded")
}
}
func testDatahistoryjobresultToOneSetOpDatahistoryjobUsingJob(t *testing.T) {
var err error
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
var a Datahistoryjobresult
var b, c Datahistoryjob
seed := randomize.NewSeed()
if err = randomize.Struct(seed, &a, datahistoryjobresultDBTypes, false, strmangle.SetComplement(datahistoryjobresultPrimaryKeyColumns, datahistoryjobresultColumnsWithoutDefault)...); err != nil {
t.Fatal(err)
}
if err = randomize.Struct(seed, &b, datahistoryjobDBTypes, false, strmangle.SetComplement(datahistoryjobPrimaryKeyColumns, datahistoryjobColumnsWithoutDefault)...); err != nil {
t.Fatal(err)
}
if err = randomize.Struct(seed, &c, datahistoryjobDBTypes, false, strmangle.SetComplement(datahistoryjobPrimaryKeyColumns, datahistoryjobColumnsWithoutDefault)...); err != nil {
t.Fatal(err)
}
if err := a.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
if err = b.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
for i, x := range []*Datahistoryjob{&b, &c} {
err = a.SetJob(ctx, tx, i != 0, x)
if err != nil {
t.Fatal(err)
}
if a.R.Job != x {
t.Error("relationship struct not set to correct value")
}
if x.R.JobDatahistoryjobresults[0] != &a {
t.Error("failed to append to foreign relationship struct")
}
if a.JobID != x.ID {
t.Error("foreign key was wrong value", a.JobID)
}
zero := reflect.Zero(reflect.TypeOf(a.JobID))
reflect.Indirect(reflect.ValueOf(&a.JobID)).Set(zero)
if err = a.Reload(ctx, tx); err != nil {
t.Fatal("failed to reload", err)
}
if a.JobID != x.ID {
t.Error("foreign key was wrong value", a.JobID, x.ID)
}
}
}
func testDatahistoryjobresultsReload(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = o.Reload(ctx, tx); err != nil {
t.Error(err)
}
}
func testDatahistoryjobresultsReloadAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice := DatahistoryjobresultSlice{o}
if err = slice.ReloadAll(ctx, tx); err != nil {
t.Error(err)
}
}
func testDatahistoryjobresultsSelect(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice, err := Datahistoryjobresults().All(ctx, tx)
if err != nil {
t.Error(err)
}
if len(slice) != 1 {
t.Error("want one record, got:", len(slice))
}
}
var (
datahistoryjobresultDBTypes = map[string]string{`ID`: `uuid`, `JobID`: `uuid`, `Result`: `text`, `Status`: `double precision`, `IntervalStartTime`: `timestamp with time zone`, `IntervalEndTime`: `timestamp with time zone`, `RunTime`: `timestamp with time zone`}
_ = bytes.MinRead
)
func testDatahistoryjobresultsUpdate(t *testing.T) {
t.Parallel()
if 0 == len(datahistoryjobresultPrimaryKeyColumns) {
t.Skip("Skipping table with no primary key columns")
}
if len(datahistoryjobresultAllColumns) == len(datahistoryjobresultPrimaryKeyColumns) {
t.Skip("Skipping table with only primary key columns")
}
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := Datahistoryjobresults().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultPrimaryKeyColumns...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
if rowsAff, err := o.Update(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only affect one row but affected", rowsAff)
}
}
func testDatahistoryjobresultsSliceUpdateAll(t *testing.T) {
t.Parallel()
if len(datahistoryjobresultAllColumns) == len(datahistoryjobresultPrimaryKeyColumns) {
t.Skip("Skipping table with only primary key columns")
}
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := Datahistoryjobresults().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultPrimaryKeyColumns...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
// Remove Primary keys and unique columns from what we plan to update
var fields []string
if strmangle.StringSliceMatch(datahistoryjobresultAllColumns, datahistoryjobresultPrimaryKeyColumns) {
fields = datahistoryjobresultAllColumns
} else {
fields = strmangle.SetComplement(
datahistoryjobresultAllColumns,
datahistoryjobresultPrimaryKeyColumns,
)
}
value := reflect.Indirect(reflect.ValueOf(o))
typ := reflect.TypeOf(o).Elem()
n := typ.NumField()
updateMap := M{}
for _, col := range fields {
for i := 0; i < n; i++ {
f := typ.Field(i)
if f.Tag.Get("boil") == col {
updateMap[col] = value.Field(i).Interface()
}
}
}
slice := DatahistoryjobresultSlice{o}
if rowsAff, err := slice.UpdateAll(ctx, tx, updateMap); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("wanted one record updated but got", rowsAff)
}
}
func testDatahistoryjobresultsUpsert(t *testing.T) {
t.Parallel()
if len(datahistoryjobresultAllColumns) == len(datahistoryjobresultPrimaryKeyColumns) {
t.Skip("Skipping table with only primary key columns")
}
seed := randomize.NewSeed()
var err error
// Attempt the INSERT side of an UPSERT
o := Datahistoryjobresult{}
if err = randomize.Struct(seed, &o, datahistoryjobresultDBTypes, true); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Upsert(ctx, tx, false, nil, boil.Infer(), boil.Infer()); err != nil {
t.Errorf("Unable to upsert Datahistoryjobresult: %s", err)
}
count, err := Datahistoryjobresults().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
// Attempt the UPDATE side of an UPSERT
if err = randomize.Struct(seed, &o, datahistoryjobresultDBTypes, false, datahistoryjobresultPrimaryKeyColumns...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
if err = o.Upsert(ctx, tx, true, nil, boil.Infer(), boil.Infer()); err != nil {
t.Errorf("Unable to upsert Datahistoryjobresult: %s", err)
}
count, err = Datahistoryjobresults().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
}

View File

@@ -51,10 +51,12 @@ var ExchangeWhere = struct {
// ExchangeRels is where relationship names are stored.
var ExchangeRels = struct {
ExchangeNameCandles string
ExchangeNameDatahistoryjobs string
ExchangeNameTrades string
ExchangeNameWithdrawalHistories string
}{
ExchangeNameCandles: "ExchangeNameCandles",
ExchangeNameDatahistoryjobs: "ExchangeNameDatahistoryjobs",
ExchangeNameTrades: "ExchangeNameTrades",
ExchangeNameWithdrawalHistories: "ExchangeNameWithdrawalHistories",
}
@@ -62,6 +64,7 @@ var ExchangeRels = struct {
// exchangeR is where relationships are stored.
type exchangeR struct {
ExchangeNameCandles CandleSlice
ExchangeNameDatahistoryjobs DatahistoryjobSlice
ExchangeNameTrades TradeSlice
ExchangeNameWithdrawalHistories WithdrawalHistorySlice
}
@@ -377,6 +380,27 @@ func (o *Exchange) ExchangeNameCandles(mods ...qm.QueryMod) candleQuery {
return query
}
// ExchangeNameDatahistoryjobs retrieves all the datahistoryjob's Datahistoryjobs with an executor via exchange_name_id column.
func (o *Exchange) ExchangeNameDatahistoryjobs(mods ...qm.QueryMod) datahistoryjobQuery {
var queryMods []qm.QueryMod
if len(mods) != 0 {
queryMods = append(queryMods, mods...)
}
queryMods = append(queryMods,
qm.Where("\"datahistoryjob\".\"exchange_name_id\"=?", o.ID),
)
query := Datahistoryjobs(queryMods...)
queries.SetFrom(query.Query, "\"datahistoryjob\"")
if len(queries.GetSelect(query.Query)) == 0 {
queries.SetSelect(query.Query, []string{"\"datahistoryjob\".*"})
}
return query
}
// ExchangeNameTrades retrieves all the trade's Trades with an executor via exchange_name_id column.
func (o *Exchange) ExchangeNameTrades(mods ...qm.QueryMod) tradeQuery {
var queryMods []qm.QueryMod
@@ -514,6 +538,101 @@ func (exchangeL) LoadExchangeNameCandles(ctx context.Context, e boil.ContextExec
return nil
}
// LoadExchangeNameDatahistoryjobs allows an eager lookup of values, cached into the
// loaded structs of the objects. This is for a 1-M or N-M relationship.
func (exchangeL) LoadExchangeNameDatahistoryjobs(ctx context.Context, e boil.ContextExecutor, singular bool, maybeExchange interface{}, mods queries.Applicator) error {
var slice []*Exchange
var object *Exchange
if singular {
object = maybeExchange.(*Exchange)
} else {
slice = *maybeExchange.(*[]*Exchange)
}
args := make([]interface{}, 0, 1)
if singular {
if object.R == nil {
object.R = &exchangeR{}
}
args = append(args, object.ID)
} else {
Outer:
for _, obj := range slice {
if obj.R == nil {
obj.R = &exchangeR{}
}
for _, a := range args {
if a == obj.ID {
continue Outer
}
}
args = append(args, obj.ID)
}
}
if len(args) == 0 {
return nil
}
query := NewQuery(qm.From(`datahistoryjob`), qm.WhereIn(`datahistoryjob.exchange_name_id in ?`, args...))
if mods != nil {
mods.Apply(query)
}
results, err := query.QueryContext(ctx, e)
if err != nil {
return errors.Wrap(err, "failed to eager load datahistoryjob")
}
var resultSlice []*Datahistoryjob
if err = queries.Bind(results, &resultSlice); err != nil {
return errors.Wrap(err, "failed to bind eager loaded slice datahistoryjob")
}
if err = results.Close(); err != nil {
return errors.Wrap(err, "failed to close results in eager load on datahistoryjob")
}
if err = results.Err(); err != nil {
return errors.Wrap(err, "error occurred during iteration of eager loaded relations for datahistoryjob")
}
if len(datahistoryjobAfterSelectHooks) != 0 {
for _, obj := range resultSlice {
if err := obj.doAfterSelectHooks(ctx, e); err != nil {
return err
}
}
}
if singular {
object.R.ExchangeNameDatahistoryjobs = resultSlice
for _, foreign := range resultSlice {
if foreign.R == nil {
foreign.R = &datahistoryjobR{}
}
foreign.R.ExchangeName = object
}
return nil
}
for _, foreign := range resultSlice {
for _, local := range slice {
if local.ID == foreign.ExchangeNameID {
local.R.ExchangeNameDatahistoryjobs = append(local.R.ExchangeNameDatahistoryjobs, foreign)
if foreign.R == nil {
foreign.R = &datahistoryjobR{}
}
foreign.R.ExchangeName = local
break
}
}
}
return nil
}
// LoadExchangeNameTrades allows an eager lookup of values, cached into the
// loaded structs of the objects. This is for a 1-M or N-M relationship.
func (exchangeL) LoadExchangeNameTrades(ctx context.Context, e boil.ContextExecutor, singular bool, maybeExchange interface{}, mods queries.Applicator) error {
@@ -757,6 +876,59 @@ func (o *Exchange) AddExchangeNameCandles(ctx context.Context, exec boil.Context
return nil
}
// AddExchangeNameDatahistoryjobs adds the given related objects to the existing relationships
// of the exchange, optionally inserting them as new records.
// Appends related to o.R.ExchangeNameDatahistoryjobs.
// Sets related.R.ExchangeName appropriately.
func (o *Exchange) AddExchangeNameDatahistoryjobs(ctx context.Context, exec boil.ContextExecutor, insert bool, related ...*Datahistoryjob) error {
var err error
for _, rel := range related {
if insert {
rel.ExchangeNameID = o.ID
if err = rel.Insert(ctx, exec, boil.Infer()); err != nil {
return errors.Wrap(err, "failed to insert into foreign table")
}
} else {
updateQuery := fmt.Sprintf(
"UPDATE \"datahistoryjob\" SET %s WHERE %s",
strmangle.SetParamNames("\"", "\"", 1, []string{"exchange_name_id"}),
strmangle.WhereClause("\"", "\"", 2, datahistoryjobPrimaryKeyColumns),
)
values := []interface{}{o.ID, rel.ID}
if boil.DebugMode {
fmt.Fprintln(boil.DebugWriter, updateQuery)
fmt.Fprintln(boil.DebugWriter, values)
}
if _, err = exec.ExecContext(ctx, updateQuery, values...); err != nil {
return errors.Wrap(err, "failed to update foreign table")
}
rel.ExchangeNameID = o.ID
}
}
if o.R == nil {
o.R = &exchangeR{
ExchangeNameDatahistoryjobs: related,
}
} else {
o.R.ExchangeNameDatahistoryjobs = append(o.R.ExchangeNameDatahistoryjobs, related...)
}
for _, rel := range related {
if rel.R == nil {
rel.R = &datahistoryjobR{
ExchangeName: o,
}
} else {
rel.R.ExchangeName = o
}
}
return nil
}
// AddExchangeNameTrades adds the given related objects to the existing relationships
// of the exchange, optionally inserting them as new records.
// Appends related to o.R.ExchangeNameTrades.

View File

@@ -572,6 +572,84 @@ func testExchangeToManyExchangeNameCandles(t *testing.T) {
}
}
func testExchangeToManyExchangeNameDatahistoryjobs(t *testing.T) {
var err error
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
var a Exchange
var b, c Datahistoryjob
seed := randomize.NewSeed()
if err = randomize.Struct(seed, &a, exchangeDBTypes, true, exchangeColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Exchange struct: %s", err)
}
if err := a.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
if err = randomize.Struct(seed, &b, datahistoryjobDBTypes, false, datahistoryjobColumnsWithDefault...); err != nil {
t.Fatal(err)
}
if err = randomize.Struct(seed, &c, datahistoryjobDBTypes, false, datahistoryjobColumnsWithDefault...); err != nil {
t.Fatal(err)
}
b.ExchangeNameID = a.ID
c.ExchangeNameID = a.ID
if err = b.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
if err = c.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
check, err := a.ExchangeNameDatahistoryjobs().All(ctx, tx)
if err != nil {
t.Fatal(err)
}
bFound, cFound := false, false
for _, v := range check {
if v.ExchangeNameID == b.ExchangeNameID {
bFound = true
}
if v.ExchangeNameID == c.ExchangeNameID {
cFound = true
}
}
if !bFound {
t.Error("expected to find b")
}
if !cFound {
t.Error("expected to find c")
}
slice := ExchangeSlice{&a}
if err = a.L.LoadExchangeNameDatahistoryjobs(ctx, tx, false, (*[]*Exchange)(&slice), nil); err != nil {
t.Fatal(err)
}
if got := len(a.R.ExchangeNameDatahistoryjobs); got != 2 {
t.Error("number of eager loaded records wrong, got:", got)
}
a.R.ExchangeNameDatahistoryjobs = nil
if err = a.L.LoadExchangeNameDatahistoryjobs(ctx, tx, true, &a, nil); err != nil {
t.Fatal(err)
}
if got := len(a.R.ExchangeNameDatahistoryjobs); got != 2 {
t.Error("number of eager loaded records wrong, got:", got)
}
if t.Failed() {
t.Logf("%#v", check)
}
}
func testExchangeToManyExchangeNameTrades(t *testing.T) {
var err error
ctx := context.Background()
@@ -803,6 +881,81 @@ func testExchangeToManyAddOpExchangeNameCandles(t *testing.T) {
}
}
}
func testExchangeToManyAddOpExchangeNameDatahistoryjobs(t *testing.T) {
var err error
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
var a Exchange
var b, c, d, e Datahistoryjob
seed := randomize.NewSeed()
if err = randomize.Struct(seed, &a, exchangeDBTypes, false, strmangle.SetComplement(exchangePrimaryKeyColumns, exchangeColumnsWithoutDefault)...); err != nil {
t.Fatal(err)
}
foreigners := []*Datahistoryjob{&b, &c, &d, &e}
for _, x := range foreigners {
if err = randomize.Struct(seed, x, datahistoryjobDBTypes, false, strmangle.SetComplement(datahistoryjobPrimaryKeyColumns, datahistoryjobColumnsWithoutDefault)...); err != nil {
t.Fatal(err)
}
}
if err := a.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
if err = b.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
if err = c.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
foreignersSplitByInsertion := [][]*Datahistoryjob{
{&b, &c},
{&d, &e},
}
for i, x := range foreignersSplitByInsertion {
err = a.AddExchangeNameDatahistoryjobs(ctx, tx, i != 0, x...)
if err != nil {
t.Fatal(err)
}
first := x[0]
second := x[1]
if a.ID != first.ExchangeNameID {
t.Error("foreign key was wrong value", a.ID, first.ExchangeNameID)
}
if a.ID != second.ExchangeNameID {
t.Error("foreign key was wrong value", a.ID, second.ExchangeNameID)
}
if first.R.ExchangeName != &a {
t.Error("relationship was not added properly to the foreign slice")
}
if second.R.ExchangeName != &a {
t.Error("relationship was not added properly to the foreign slice")
}
if a.R.ExchangeNameDatahistoryjobs[i*2] != first {
t.Error("relationship struct slice not set to correct value")
}
if a.R.ExchangeNameDatahistoryjobs[i*2+1] != second {
t.Error("relationship struct slice not set to correct value")
}
count, err := a.ExchangeNameDatahistoryjobs().Count(ctx, tx)
if err != nil {
t.Fatal(err)
}
if want := int64((i + 1) * 2); count != want {
t.Error("want", want, "got", count)
}
}
}
func testExchangeToManyAddOpExchangeNameTrades(t *testing.T) {
var err error

View File

@@ -50,29 +50,6 @@ var ScriptExecutionColumns = struct {
// Generated where
type whereHelpernull_String struct{ field string }
func (w whereHelpernull_String) EQ(x null.String) qm.QueryMod {
return qmhelper.WhereNullEQ(w.field, false, x)
}
func (w whereHelpernull_String) NEQ(x null.String) qm.QueryMod {
return qmhelper.WhereNullEQ(w.field, true, x)
}
func (w whereHelpernull_String) IsNull() qm.QueryMod { return qmhelper.WhereIsNull(w.field) }
func (w whereHelpernull_String) IsNotNull() qm.QueryMod { return qmhelper.WhereIsNotNull(w.field) }
func (w whereHelpernull_String) LT(x null.String) qm.QueryMod {
return qmhelper.Where(w.field, qmhelper.LT, x)
}
func (w whereHelpernull_String) LTE(x null.String) qm.QueryMod {
return qmhelper.Where(w.field, qmhelper.LTE, x)
}
func (w whereHelpernull_String) GT(x null.String) qm.QueryMod {
return qmhelper.Where(w.field, qmhelper.GT, x)
}
func (w whereHelpernull_String) GTE(x null.String) qm.QueryMod {
return qmhelper.Where(w.field, qmhelper.GTE, x)
}
var ScriptExecutionWhere = struct {
ID whereHelperstring
ScriptID whereHelpernull_String

View File

@@ -14,6 +14,8 @@ import "testing"
func TestParent(t *testing.T) {
t.Run("AuditEvents", testAuditEvents)
t.Run("Candles", testCandles)
t.Run("Datahistoryjobs", testDatahistoryjobs)
t.Run("Datahistoryjobresults", testDatahistoryjobresults)
t.Run("Exchanges", testExchanges)
t.Run("Scripts", testScripts)
t.Run("ScriptExecutions", testScriptExecutions)
@@ -26,6 +28,8 @@ func TestParent(t *testing.T) {
func TestDelete(t *testing.T) {
t.Run("AuditEvents", testAuditEventsDelete)
t.Run("Candles", testCandlesDelete)
t.Run("Datahistoryjobs", testDatahistoryjobsDelete)
t.Run("Datahistoryjobresults", testDatahistoryjobresultsDelete)
t.Run("Exchanges", testExchangesDelete)
t.Run("Scripts", testScriptsDelete)
t.Run("ScriptExecutions", testScriptExecutionsDelete)
@@ -38,6 +42,8 @@ func TestDelete(t *testing.T) {
func TestQueryDeleteAll(t *testing.T) {
t.Run("AuditEvents", testAuditEventsQueryDeleteAll)
t.Run("Candles", testCandlesQueryDeleteAll)
t.Run("Datahistoryjobs", testDatahistoryjobsQueryDeleteAll)
t.Run("Datahistoryjobresults", testDatahistoryjobresultsQueryDeleteAll)
t.Run("Exchanges", testExchangesQueryDeleteAll)
t.Run("Scripts", testScriptsQueryDeleteAll)
t.Run("ScriptExecutions", testScriptExecutionsQueryDeleteAll)
@@ -50,6 +56,8 @@ func TestQueryDeleteAll(t *testing.T) {
func TestSliceDeleteAll(t *testing.T) {
t.Run("AuditEvents", testAuditEventsSliceDeleteAll)
t.Run("Candles", testCandlesSliceDeleteAll)
t.Run("Datahistoryjobs", testDatahistoryjobsSliceDeleteAll)
t.Run("Datahistoryjobresults", testDatahistoryjobresultsSliceDeleteAll)
t.Run("Exchanges", testExchangesSliceDeleteAll)
t.Run("Scripts", testScriptsSliceDeleteAll)
t.Run("ScriptExecutions", testScriptExecutionsSliceDeleteAll)
@@ -62,6 +70,8 @@ func TestSliceDeleteAll(t *testing.T) {
func TestExists(t *testing.T) {
t.Run("AuditEvents", testAuditEventsExists)
t.Run("Candles", testCandlesExists)
t.Run("Datahistoryjobs", testDatahistoryjobsExists)
t.Run("Datahistoryjobresults", testDatahistoryjobresultsExists)
t.Run("Exchanges", testExchangesExists)
t.Run("Scripts", testScriptsExists)
t.Run("ScriptExecutions", testScriptExecutionsExists)
@@ -74,6 +84,8 @@ func TestExists(t *testing.T) {
func TestFind(t *testing.T) {
t.Run("AuditEvents", testAuditEventsFind)
t.Run("Candles", testCandlesFind)
t.Run("Datahistoryjobs", testDatahistoryjobsFind)
t.Run("Datahistoryjobresults", testDatahistoryjobresultsFind)
t.Run("Exchanges", testExchangesFind)
t.Run("Scripts", testScriptsFind)
t.Run("ScriptExecutions", testScriptExecutionsFind)
@@ -86,6 +98,8 @@ func TestFind(t *testing.T) {
func TestBind(t *testing.T) {
t.Run("AuditEvents", testAuditEventsBind)
t.Run("Candles", testCandlesBind)
t.Run("Datahistoryjobs", testDatahistoryjobsBind)
t.Run("Datahistoryjobresults", testDatahistoryjobresultsBind)
t.Run("Exchanges", testExchangesBind)
t.Run("Scripts", testScriptsBind)
t.Run("ScriptExecutions", testScriptExecutionsBind)
@@ -98,6 +112,8 @@ func TestBind(t *testing.T) {
func TestOne(t *testing.T) {
t.Run("AuditEvents", testAuditEventsOne)
t.Run("Candles", testCandlesOne)
t.Run("Datahistoryjobs", testDatahistoryjobsOne)
t.Run("Datahistoryjobresults", testDatahistoryjobresultsOne)
t.Run("Exchanges", testExchangesOne)
t.Run("Scripts", testScriptsOne)
t.Run("ScriptExecutions", testScriptExecutionsOne)
@@ -110,6 +126,8 @@ func TestOne(t *testing.T) {
func TestAll(t *testing.T) {
t.Run("AuditEvents", testAuditEventsAll)
t.Run("Candles", testCandlesAll)
t.Run("Datahistoryjobs", testDatahistoryjobsAll)
t.Run("Datahistoryjobresults", testDatahistoryjobresultsAll)
t.Run("Exchanges", testExchangesAll)
t.Run("Scripts", testScriptsAll)
t.Run("ScriptExecutions", testScriptExecutionsAll)
@@ -122,6 +140,8 @@ func TestAll(t *testing.T) {
func TestCount(t *testing.T) {
t.Run("AuditEvents", testAuditEventsCount)
t.Run("Candles", testCandlesCount)
t.Run("Datahistoryjobs", testDatahistoryjobsCount)
t.Run("Datahistoryjobresults", testDatahistoryjobresultsCount)
t.Run("Exchanges", testExchangesCount)
t.Run("Scripts", testScriptsCount)
t.Run("ScriptExecutions", testScriptExecutionsCount)
@@ -134,6 +154,8 @@ func TestCount(t *testing.T) {
func TestHooks(t *testing.T) {
t.Run("AuditEvents", testAuditEventsHooks)
t.Run("Candles", testCandlesHooks)
t.Run("Datahistoryjobs", testDatahistoryjobsHooks)
t.Run("Datahistoryjobresults", testDatahistoryjobresultsHooks)
t.Run("Exchanges", testExchangesHooks)
t.Run("Scripts", testScriptsHooks)
t.Run("ScriptExecutions", testScriptExecutionsHooks)
@@ -148,6 +170,10 @@ func TestInsert(t *testing.T) {
t.Run("AuditEvents", testAuditEventsInsertWhitelist)
t.Run("Candles", testCandlesInsert)
t.Run("Candles", testCandlesInsertWhitelist)
t.Run("Datahistoryjobs", testDatahistoryjobsInsert)
t.Run("Datahistoryjobs", testDatahistoryjobsInsertWhitelist)
t.Run("Datahistoryjobresults", testDatahistoryjobresultsInsert)
t.Run("Datahistoryjobresults", testDatahistoryjobresultsInsertWhitelist)
t.Run("Exchanges", testExchangesInsert)
t.Run("Exchanges", testExchangesInsertWhitelist)
t.Run("Scripts", testScriptsInsert)
@@ -168,6 +194,8 @@ func TestInsert(t *testing.T) {
// or deadlocks can occur.
func TestToOne(t *testing.T) {
t.Run("CandleToExchangeUsingExchangeName", testCandleToOneExchangeUsingExchangeName)
t.Run("DatahistoryjobToExchangeUsingExchangeName", testDatahistoryjobToOneExchangeUsingExchangeName)
t.Run("DatahistoryjobresultToDatahistoryjobUsingJob", testDatahistoryjobresultToOneDatahistoryjobUsingJob)
t.Run("ScriptExecutionToScriptUsingScript", testScriptExecutionToOneScriptUsingScript)
t.Run("TradeToExchangeUsingExchangeName", testTradeToOneExchangeUsingExchangeName)
t.Run("WithdrawalCryptoToWithdrawalHistoryUsingWithdrawalHistory", testWithdrawalCryptoToOneWithdrawalHistoryUsingWithdrawalHistory)
@@ -185,6 +213,8 @@ func TestOneToOne(t *testing.T) {
// TestToMany tests cannot be run in parallel
// or deadlocks can occur.
func TestToMany(t *testing.T) {
t.Run("DatahistoryjobToJobDatahistoryjobresults", testDatahistoryjobToManyJobDatahistoryjobresults)
t.Run("ExchangeToExchangeNameDatahistoryjobs", testExchangeToManyExchangeNameDatahistoryjobs)
t.Run("ExchangeToExchangeNameWithdrawalHistories", testExchangeToManyExchangeNameWithdrawalHistories)
t.Run("ScriptToScriptExecutions", testScriptToManyScriptExecutions)
t.Run("WithdrawalHistoryToWithdrawalCryptos", testWithdrawalHistoryToManyWithdrawalCryptos)
@@ -195,6 +225,8 @@ func TestToMany(t *testing.T) {
// or deadlocks can occur.
func TestToOneSet(t *testing.T) {
t.Run("CandleToExchangeUsingExchangeNameCandle", testCandleToOneSetOpExchangeUsingExchangeName)
t.Run("DatahistoryjobToExchangeUsingExchangeNameDatahistoryjobs", testDatahistoryjobToOneSetOpExchangeUsingExchangeName)
t.Run("DatahistoryjobresultToDatahistoryjobUsingJobDatahistoryjobresults", testDatahistoryjobresultToOneSetOpDatahistoryjobUsingJob)
t.Run("ScriptExecutionToScriptUsingScriptExecutions", testScriptExecutionToOneSetOpScriptUsingScript)
t.Run("TradeToExchangeUsingExchangeNameTrade", testTradeToOneSetOpExchangeUsingExchangeName)
t.Run("WithdrawalCryptoToWithdrawalHistoryUsingWithdrawalCryptos", testWithdrawalCryptoToOneSetOpWithdrawalHistoryUsingWithdrawalHistory)
@@ -220,6 +252,8 @@ func TestOneToOneRemove(t *testing.T) {}
// TestToManyAdd tests cannot be run in parallel
// or deadlocks can occur.
func TestToManyAdd(t *testing.T) {
t.Run("DatahistoryjobToJobDatahistoryjobresults", testDatahistoryjobToManyAddOpJobDatahistoryjobresults)
t.Run("ExchangeToExchangeNameDatahistoryjobs", testExchangeToManyAddOpExchangeNameDatahistoryjobs)
t.Run("ExchangeToExchangeNameWithdrawalHistories", testExchangeToManyAddOpExchangeNameWithdrawalHistories)
t.Run("ScriptToScriptExecutions", testScriptToManyAddOpScriptExecutions)
t.Run("WithdrawalHistoryToWithdrawalCryptos", testWithdrawalHistoryToManyAddOpWithdrawalCryptos)
@@ -237,6 +271,8 @@ func TestToManyRemove(t *testing.T) {}
func TestReload(t *testing.T) {
t.Run("AuditEvents", testAuditEventsReload)
t.Run("Candles", testCandlesReload)
t.Run("Datahistoryjobs", testDatahistoryjobsReload)
t.Run("Datahistoryjobresults", testDatahistoryjobresultsReload)
t.Run("Exchanges", testExchangesReload)
t.Run("Scripts", testScriptsReload)
t.Run("ScriptExecutions", testScriptExecutionsReload)
@@ -249,6 +285,8 @@ func TestReload(t *testing.T) {
func TestReloadAll(t *testing.T) {
t.Run("AuditEvents", testAuditEventsReloadAll)
t.Run("Candles", testCandlesReloadAll)
t.Run("Datahistoryjobs", testDatahistoryjobsReloadAll)
t.Run("Datahistoryjobresults", testDatahistoryjobresultsReloadAll)
t.Run("Exchanges", testExchangesReloadAll)
t.Run("Scripts", testScriptsReloadAll)
t.Run("ScriptExecutions", testScriptExecutionsReloadAll)
@@ -261,6 +299,8 @@ func TestReloadAll(t *testing.T) {
func TestSelect(t *testing.T) {
t.Run("AuditEvents", testAuditEventsSelect)
t.Run("Candles", testCandlesSelect)
t.Run("Datahistoryjobs", testDatahistoryjobsSelect)
t.Run("Datahistoryjobresults", testDatahistoryjobresultsSelect)
t.Run("Exchanges", testExchangesSelect)
t.Run("Scripts", testScriptsSelect)
t.Run("ScriptExecutions", testScriptExecutionsSelect)
@@ -273,6 +313,8 @@ func TestSelect(t *testing.T) {
func TestUpdate(t *testing.T) {
t.Run("AuditEvents", testAuditEventsUpdate)
t.Run("Candles", testCandlesUpdate)
t.Run("Datahistoryjobs", testDatahistoryjobsUpdate)
t.Run("Datahistoryjobresults", testDatahistoryjobresultsUpdate)
t.Run("Exchanges", testExchangesUpdate)
t.Run("Scripts", testScriptsUpdate)
t.Run("ScriptExecutions", testScriptExecutionsUpdate)
@@ -285,6 +327,8 @@ func TestUpdate(t *testing.T) {
func TestSliceUpdateAll(t *testing.T) {
t.Run("AuditEvents", testAuditEventsSliceUpdateAll)
t.Run("Candles", testCandlesSliceUpdateAll)
t.Run("Datahistoryjobs", testDatahistoryjobsSliceUpdateAll)
t.Run("Datahistoryjobresults", testDatahistoryjobresultsSliceUpdateAll)
t.Run("Exchanges", testExchangesSliceUpdateAll)
t.Run("Scripts", testScriptsSliceUpdateAll)
t.Run("ScriptExecutions", testScriptExecutionsSliceUpdateAll)

View File

@@ -4,25 +4,29 @@
package sqlite3
var TableNames = struct {
AuditEvent string
Candle string
Exchange string
GooseDBVersion string
Script string
ScriptExecution string
Trade string
WithdrawalCrypto string
WithdrawalFiat string
WithdrawalHistory string
AuditEvent string
Candle string
Datahistoryjob string
Datahistoryjobresult string
Exchange string
GooseDBVersion string
Script string
ScriptExecution string
Trade string
WithdrawalCrypto string
WithdrawalFiat string
WithdrawalHistory string
}{
AuditEvent: "audit_event",
Candle: "candle",
Exchange: "exchange",
GooseDBVersion: "goose_db_version",
Script: "script",
ScriptExecution: "script_execution",
Trade: "trade",
WithdrawalCrypto: "withdrawal_crypto",
WithdrawalFiat: "withdrawal_fiat",
WithdrawalHistory: "withdrawal_history",
AuditEvent: "audit_event",
Candle: "candle",
Datahistoryjob: "datahistoryjob",
Datahistoryjobresult: "datahistoryjobresult",
Exchange: "exchange",
GooseDBVersion: "goose_db_version",
Script: "script",
ScriptExecution: "script_execution",
Trade: "trade",
WithdrawalCrypto: "withdrawal_crypto",
WithdrawalFiat: "withdrawal_fiat",
WithdrawalHistory: "withdrawal_history",
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,946 @@
// Code generated by SQLBoiler 3.5.0-gct (https://github.com/thrasher-corp/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package sqlite3
import (
"bytes"
"context"
"reflect"
"testing"
"github.com/thrasher-corp/sqlboiler/boil"
"github.com/thrasher-corp/sqlboiler/queries"
"github.com/thrasher-corp/sqlboiler/randomize"
"github.com/thrasher-corp/sqlboiler/strmangle"
)
var (
// Relationships sometimes use the reflection helper queries.Equal/queries.Assign
// so force a package dependency in case they don't.
_ = queries.Equal
)
func testDatahistoryjobs(t *testing.T) {
t.Parallel()
query := Datahistoryjobs()
if query.Query == nil {
t.Error("expected a query, got nothing")
}
}
func testDatahistoryjobsDelete(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if rowsAff, err := o.Delete(ctx, tx); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only have deleted one row, but affected:", rowsAff)
}
count, err := Datahistoryjobs().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 0 {
t.Error("want zero records, got:", count)
}
}
func testDatahistoryjobsQueryDeleteAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if rowsAff, err := Datahistoryjobs().DeleteAll(ctx, tx); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only have deleted one row, but affected:", rowsAff)
}
count, err := Datahistoryjobs().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 0 {
t.Error("want zero records, got:", count)
}
}
func testDatahistoryjobsSliceDeleteAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice := DatahistoryjobSlice{o}
if rowsAff, err := slice.DeleteAll(ctx, tx); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only have deleted one row, but affected:", rowsAff)
}
count, err := Datahistoryjobs().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 0 {
t.Error("want zero records, got:", count)
}
}
func testDatahistoryjobsExists(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
e, err := DatahistoryjobExists(ctx, tx, o.ID)
if err != nil {
t.Errorf("Unable to check if Datahistoryjob exists: %s", err)
}
if !e {
t.Errorf("Expected DatahistoryjobExists to return true, but got false.")
}
}
func testDatahistoryjobsFind(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
datahistoryjobFound, err := FindDatahistoryjob(ctx, tx, o.ID)
if err != nil {
t.Error(err)
}
if datahistoryjobFound == nil {
t.Error("want a record, got nil")
}
}
func testDatahistoryjobsBind(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = Datahistoryjobs().Bind(ctx, tx, o); err != nil {
t.Error(err)
}
}
func testDatahistoryjobsOne(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if x, err := Datahistoryjobs().One(ctx, tx); err != nil {
t.Error(err)
} else if x == nil {
t.Error("expected to get a non nil record")
}
}
func testDatahistoryjobsAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
datahistoryjobOne := &Datahistoryjob{}
datahistoryjobTwo := &Datahistoryjob{}
if err = randomize.Struct(seed, datahistoryjobOne, datahistoryjobDBTypes, false, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
if err = randomize.Struct(seed, datahistoryjobTwo, datahistoryjobDBTypes, false, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = datahistoryjobOne.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = datahistoryjobTwo.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice, err := Datahistoryjobs().All(ctx, tx)
if err != nil {
t.Error(err)
}
if len(slice) != 2 {
t.Error("want 2 records, got:", len(slice))
}
}
func testDatahistoryjobsCount(t *testing.T) {
t.Parallel()
var err error
seed := randomize.NewSeed()
datahistoryjobOne := &Datahistoryjob{}
datahistoryjobTwo := &Datahistoryjob{}
if err = randomize.Struct(seed, datahistoryjobOne, datahistoryjobDBTypes, false, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
if err = randomize.Struct(seed, datahistoryjobTwo, datahistoryjobDBTypes, false, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = datahistoryjobOne.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = datahistoryjobTwo.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := Datahistoryjobs().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 2 {
t.Error("want 2 records, got:", count)
}
}
func datahistoryjobBeforeInsertHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjob) error {
*o = Datahistoryjob{}
return nil
}
func datahistoryjobAfterInsertHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjob) error {
*o = Datahistoryjob{}
return nil
}
func datahistoryjobAfterSelectHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjob) error {
*o = Datahistoryjob{}
return nil
}
func datahistoryjobBeforeUpdateHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjob) error {
*o = Datahistoryjob{}
return nil
}
func datahistoryjobAfterUpdateHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjob) error {
*o = Datahistoryjob{}
return nil
}
func datahistoryjobBeforeDeleteHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjob) error {
*o = Datahistoryjob{}
return nil
}
func datahistoryjobAfterDeleteHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjob) error {
*o = Datahistoryjob{}
return nil
}
func datahistoryjobBeforeUpsertHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjob) error {
*o = Datahistoryjob{}
return nil
}
func datahistoryjobAfterUpsertHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjob) error {
*o = Datahistoryjob{}
return nil
}
func testDatahistoryjobsHooks(t *testing.T) {
t.Parallel()
var err error
ctx := context.Background()
empty := &Datahistoryjob{}
o := &Datahistoryjob{}
seed := randomize.NewSeed()
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, false); err != nil {
t.Errorf("Unable to randomize Datahistoryjob object: %s", err)
}
AddDatahistoryjobHook(boil.BeforeInsertHook, datahistoryjobBeforeInsertHook)
if err = o.doBeforeInsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeInsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeInsertHook function to empty object, but got: %#v", o)
}
datahistoryjobBeforeInsertHooks = []DatahistoryjobHook{}
AddDatahistoryjobHook(boil.AfterInsertHook, datahistoryjobAfterInsertHook)
if err = o.doAfterInsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterInsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterInsertHook function to empty object, but got: %#v", o)
}
datahistoryjobAfterInsertHooks = []DatahistoryjobHook{}
AddDatahistoryjobHook(boil.AfterSelectHook, datahistoryjobAfterSelectHook)
if err = o.doAfterSelectHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterSelectHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterSelectHook function to empty object, but got: %#v", o)
}
datahistoryjobAfterSelectHooks = []DatahistoryjobHook{}
AddDatahistoryjobHook(boil.BeforeUpdateHook, datahistoryjobBeforeUpdateHook)
if err = o.doBeforeUpdateHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeUpdateHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeUpdateHook function to empty object, but got: %#v", o)
}
datahistoryjobBeforeUpdateHooks = []DatahistoryjobHook{}
AddDatahistoryjobHook(boil.AfterUpdateHook, datahistoryjobAfterUpdateHook)
if err = o.doAfterUpdateHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterUpdateHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterUpdateHook function to empty object, but got: %#v", o)
}
datahistoryjobAfterUpdateHooks = []DatahistoryjobHook{}
AddDatahistoryjobHook(boil.BeforeDeleteHook, datahistoryjobBeforeDeleteHook)
if err = o.doBeforeDeleteHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeDeleteHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeDeleteHook function to empty object, but got: %#v", o)
}
datahistoryjobBeforeDeleteHooks = []DatahistoryjobHook{}
AddDatahistoryjobHook(boil.AfterDeleteHook, datahistoryjobAfterDeleteHook)
if err = o.doAfterDeleteHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterDeleteHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterDeleteHook function to empty object, but got: %#v", o)
}
datahistoryjobAfterDeleteHooks = []DatahistoryjobHook{}
AddDatahistoryjobHook(boil.BeforeUpsertHook, datahistoryjobBeforeUpsertHook)
if err = o.doBeforeUpsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeUpsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeUpsertHook function to empty object, but got: %#v", o)
}
datahistoryjobBeforeUpsertHooks = []DatahistoryjobHook{}
AddDatahistoryjobHook(boil.AfterUpsertHook, datahistoryjobAfterUpsertHook)
if err = o.doAfterUpsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterUpsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterUpsertHook function to empty object, but got: %#v", o)
}
datahistoryjobAfterUpsertHooks = []DatahistoryjobHook{}
}
func testDatahistoryjobsInsert(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := Datahistoryjobs().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
}
func testDatahistoryjobsInsertWhitelist(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Whitelist(datahistoryjobColumnsWithoutDefault...)); err != nil {
t.Error(err)
}
count, err := Datahistoryjobs().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
}
func testDatahistoryjobToManyJobDatahistoryjobresults(t *testing.T) {
var err error
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
var a Datahistoryjob
var b, c Datahistoryjobresult
seed := randomize.NewSeed()
if err = randomize.Struct(seed, &a, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
if err := a.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
if err = randomize.Struct(seed, &b, datahistoryjobresultDBTypes, false, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Fatal(err)
}
if err = randomize.Struct(seed, &c, datahistoryjobresultDBTypes, false, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Fatal(err)
}
b.JobID = a.ID
c.JobID = a.ID
if err = b.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
if err = c.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
check, err := a.JobDatahistoryjobresults().All(ctx, tx)
if err != nil {
t.Fatal(err)
}
bFound, cFound := false, false
for _, v := range check {
if v.JobID == b.JobID {
bFound = true
}
if v.JobID == c.JobID {
cFound = true
}
}
if !bFound {
t.Error("expected to find b")
}
if !cFound {
t.Error("expected to find c")
}
slice := DatahistoryjobSlice{&a}
if err = a.L.LoadJobDatahistoryjobresults(ctx, tx, false, (*[]*Datahistoryjob)(&slice), nil); err != nil {
t.Fatal(err)
}
if got := len(a.R.JobDatahistoryjobresults); got != 2 {
t.Error("number of eager loaded records wrong, got:", got)
}
a.R.JobDatahistoryjobresults = nil
if err = a.L.LoadJobDatahistoryjobresults(ctx, tx, true, &a, nil); err != nil {
t.Fatal(err)
}
if got := len(a.R.JobDatahistoryjobresults); got != 2 {
t.Error("number of eager loaded records wrong, got:", got)
}
if t.Failed() {
t.Logf("%#v", check)
}
}
func testDatahistoryjobToManyAddOpJobDatahistoryjobresults(t *testing.T) {
var err error
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
var a Datahistoryjob
var b, c, d, e Datahistoryjobresult
seed := randomize.NewSeed()
if err = randomize.Struct(seed, &a, datahistoryjobDBTypes, false, strmangle.SetComplement(datahistoryjobPrimaryKeyColumns, datahistoryjobColumnsWithoutDefault)...); err != nil {
t.Fatal(err)
}
foreigners := []*Datahistoryjobresult{&b, &c, &d, &e}
for _, x := range foreigners {
if err = randomize.Struct(seed, x, datahistoryjobresultDBTypes, false, strmangle.SetComplement(datahistoryjobresultPrimaryKeyColumns, datahistoryjobresultColumnsWithoutDefault)...); err != nil {
t.Fatal(err)
}
}
if err := a.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
if err = b.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
if err = c.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
foreignersSplitByInsertion := [][]*Datahistoryjobresult{
{&b, &c},
{&d, &e},
}
for i, x := range foreignersSplitByInsertion {
err = a.AddJobDatahistoryjobresults(ctx, tx, i != 0, x...)
if err != nil {
t.Fatal(err)
}
first := x[0]
second := x[1]
if a.ID != first.JobID {
t.Error("foreign key was wrong value", a.ID, first.JobID)
}
if a.ID != second.JobID {
t.Error("foreign key was wrong value", a.ID, second.JobID)
}
if first.R.Job != &a {
t.Error("relationship was not added properly to the foreign slice")
}
if second.R.Job != &a {
t.Error("relationship was not added properly to the foreign slice")
}
if a.R.JobDatahistoryjobresults[i*2] != first {
t.Error("relationship struct slice not set to correct value")
}
if a.R.JobDatahistoryjobresults[i*2+1] != second {
t.Error("relationship struct slice not set to correct value")
}
count, err := a.JobDatahistoryjobresults().Count(ctx, tx)
if err != nil {
t.Fatal(err)
}
if want := int64((i + 1) * 2); count != want {
t.Error("want", want, "got", count)
}
}
}
func testDatahistoryjobToOneExchangeUsingExchangeName(t *testing.T) {
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
var local Datahistoryjob
var foreign Exchange
seed := randomize.NewSeed()
if err := randomize.Struct(seed, &local, datahistoryjobDBTypes, false, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
if err := randomize.Struct(seed, &foreign, exchangeDBTypes, false, exchangeColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Exchange struct: %s", err)
}
if err := foreign.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
local.ExchangeNameID = foreign.ID
if err := local.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
check, err := local.ExchangeName().One(ctx, tx)
if err != nil {
t.Fatal(err)
}
if check.ID != foreign.ID {
t.Errorf("want: %v, got %v", foreign.ID, check.ID)
}
slice := DatahistoryjobSlice{&local}
if err = local.L.LoadExchangeName(ctx, tx, false, (*[]*Datahistoryjob)(&slice), nil); err != nil {
t.Fatal(err)
}
if local.R.ExchangeName == nil {
t.Error("struct should have been eager loaded")
}
local.R.ExchangeName = nil
if err = local.L.LoadExchangeName(ctx, tx, true, &local, nil); err != nil {
t.Fatal(err)
}
if local.R.ExchangeName == nil {
t.Error("struct should have been eager loaded")
}
}
func testDatahistoryjobToOneSetOpExchangeUsingExchangeName(t *testing.T) {
var err error
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
var a Datahistoryjob
var b, c Exchange
seed := randomize.NewSeed()
if err = randomize.Struct(seed, &a, datahistoryjobDBTypes, false, strmangle.SetComplement(datahistoryjobPrimaryKeyColumns, datahistoryjobColumnsWithoutDefault)...); err != nil {
t.Fatal(err)
}
if err = randomize.Struct(seed, &b, exchangeDBTypes, false, strmangle.SetComplement(exchangePrimaryKeyColumns, exchangeColumnsWithoutDefault)...); err != nil {
t.Fatal(err)
}
if err = randomize.Struct(seed, &c, exchangeDBTypes, false, strmangle.SetComplement(exchangePrimaryKeyColumns, exchangeColumnsWithoutDefault)...); err != nil {
t.Fatal(err)
}
if err := a.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
if err = b.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
for i, x := range []*Exchange{&b, &c} {
err = a.SetExchangeName(ctx, tx, i != 0, x)
if err != nil {
t.Fatal(err)
}
if a.R.ExchangeName != x {
t.Error("relationship struct not set to correct value")
}
if x.R.ExchangeNameDatahistoryjobs[0] != &a {
t.Error("failed to append to foreign relationship struct")
}
if a.ExchangeNameID != x.ID {
t.Error("foreign key was wrong value", a.ExchangeNameID)
}
zero := reflect.Zero(reflect.TypeOf(a.ExchangeNameID))
reflect.Indirect(reflect.ValueOf(&a.ExchangeNameID)).Set(zero)
if err = a.Reload(ctx, tx); err != nil {
t.Fatal("failed to reload", err)
}
if a.ExchangeNameID != x.ID {
t.Error("foreign key was wrong value", a.ExchangeNameID, x.ID)
}
}
}
func testDatahistoryjobsReload(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = o.Reload(ctx, tx); err != nil {
t.Error(err)
}
}
func testDatahistoryjobsReloadAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice := DatahistoryjobSlice{o}
if err = slice.ReloadAll(ctx, tx); err != nil {
t.Error(err)
}
}
func testDatahistoryjobsSelect(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice, err := Datahistoryjobs().All(ctx, tx)
if err != nil {
t.Error(err)
}
if len(slice) != 1 {
t.Error("want one record, got:", len(slice))
}
}
var (
datahistoryjobDBTypes = map[string]string{`ID`: `TEXT`, `Nickname`: `TEXT`, `ExchangeNameID`: `TEXT`, `Asset`: `TEXT`, `Base`: `TEXT`, `Quote`: `TEXT`, `StartTime`: `TIMESTAMP`, `EndTime`: `TIMESTAMP`, `Interval`: `REAL`, `DataType`: `REAL`, `RequestSize`: `REAL`, `MaxRetries`: `REAL`, `BatchCount`: `REAL`, `Status`: `REAL`, `Created`: `TIMESTAMP`}
_ = bytes.MinRead
)
func testDatahistoryjobsUpdate(t *testing.T) {
t.Parallel()
if 0 == len(datahistoryjobPrimaryKeyColumns) {
t.Skip("Skipping table with no primary key columns")
}
if len(datahistoryjobAllColumns) == len(datahistoryjobPrimaryKeyColumns) {
t.Skip("Skipping table with only primary key columns")
}
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := Datahistoryjobs().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobPrimaryKeyColumns...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
if rowsAff, err := o.Update(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only affect one row but affected", rowsAff)
}
}
func testDatahistoryjobsSliceUpdateAll(t *testing.T) {
t.Parallel()
if len(datahistoryjobAllColumns) == len(datahistoryjobPrimaryKeyColumns) {
t.Skip("Skipping table with only primary key columns")
}
seed := randomize.NewSeed()
var err error
o := &Datahistoryjob{}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := Datahistoryjobs().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
if err = randomize.Struct(seed, o, datahistoryjobDBTypes, true, datahistoryjobPrimaryKeyColumns...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
// Remove Primary keys and unique columns from what we plan to update
var fields []string
if strmangle.StringSliceMatch(datahistoryjobAllColumns, datahistoryjobPrimaryKeyColumns) {
fields = datahistoryjobAllColumns
} else {
fields = strmangle.SetComplement(
datahistoryjobAllColumns,
datahistoryjobPrimaryKeyColumns,
)
}
value := reflect.Indirect(reflect.ValueOf(o))
typ := reflect.TypeOf(o).Elem()
n := typ.NumField()
updateMap := M{}
for _, col := range fields {
for i := 0; i < n; i++ {
f := typ.Field(i)
if f.Tag.Get("boil") == col {
updateMap[col] = value.Field(i).Interface()
}
}
}
slice := DatahistoryjobSlice{o}
if rowsAff, err := slice.UpdateAll(ctx, tx, updateMap); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("wanted one record updated but got", rowsAff)
}
}

View File

@@ -0,0 +1,980 @@
// Code generated by SQLBoiler 3.5.0-gct (https://github.com/thrasher-corp/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package sqlite3
import (
"context"
"database/sql"
"fmt"
"reflect"
"strings"
"sync"
"time"
"github.com/pkg/errors"
"github.com/thrasher-corp/sqlboiler/boil"
"github.com/thrasher-corp/sqlboiler/queries"
"github.com/thrasher-corp/sqlboiler/queries/qm"
"github.com/thrasher-corp/sqlboiler/queries/qmhelper"
"github.com/thrasher-corp/sqlboiler/strmangle"
"github.com/volatiletech/null"
)
// Datahistoryjobresult is an object representing the database table.
type Datahistoryjobresult struct {
ID string `boil:"id" json:"id" toml:"id" yaml:"id"`
JobID string `boil:"job_id" json:"job_id" toml:"job_id" yaml:"job_id"`
Result null.String `boil:"result" json:"result,omitempty" toml:"result" yaml:"result,omitempty"`
Status float64 `boil:"status" json:"status" toml:"status" yaml:"status"`
IntervalStartTime string `boil:"interval_start_time" json:"interval_start_time" toml:"interval_start_time" yaml:"interval_start_time"`
IntervalEndTime string `boil:"interval_end_time" json:"interval_end_time" toml:"interval_end_time" yaml:"interval_end_time"`
RunTime string `boil:"run_time" json:"run_time" toml:"run_time" yaml:"run_time"`
R *datahistoryjobresultR `boil:"-" json:"-" toml:"-" yaml:"-"`
L datahistoryjobresultL `boil:"-" json:"-" toml:"-" yaml:"-"`
}
var DatahistoryjobresultColumns = struct {
ID string
JobID string
Result string
Status string
IntervalStartTime string
IntervalEndTime string
RunTime string
}{
ID: "id",
JobID: "job_id",
Result: "result",
Status: "status",
IntervalStartTime: "interval_start_time",
IntervalEndTime: "interval_end_time",
RunTime: "run_time",
}
// Generated where
type whereHelpernull_String struct{ field string }
func (w whereHelpernull_String) EQ(x null.String) qm.QueryMod {
return qmhelper.WhereNullEQ(w.field, false, x)
}
func (w whereHelpernull_String) NEQ(x null.String) qm.QueryMod {
return qmhelper.WhereNullEQ(w.field, true, x)
}
func (w whereHelpernull_String) IsNull() qm.QueryMod { return qmhelper.WhereIsNull(w.field) }
func (w whereHelpernull_String) IsNotNull() qm.QueryMod { return qmhelper.WhereIsNotNull(w.field) }
func (w whereHelpernull_String) LT(x null.String) qm.QueryMod {
return qmhelper.Where(w.field, qmhelper.LT, x)
}
func (w whereHelpernull_String) LTE(x null.String) qm.QueryMod {
return qmhelper.Where(w.field, qmhelper.LTE, x)
}
func (w whereHelpernull_String) GT(x null.String) qm.QueryMod {
return qmhelper.Where(w.field, qmhelper.GT, x)
}
func (w whereHelpernull_String) GTE(x null.String) qm.QueryMod {
return qmhelper.Where(w.field, qmhelper.GTE, x)
}
var DatahistoryjobresultWhere = struct {
ID whereHelperstring
JobID whereHelperstring
Result whereHelpernull_String
Status whereHelperfloat64
IntervalStartTime whereHelperstring
IntervalEndTime whereHelperstring
RunTime whereHelperstring
}{
ID: whereHelperstring{field: "\"datahistoryjobresult\".\"id\""},
JobID: whereHelperstring{field: "\"datahistoryjobresult\".\"job_id\""},
Result: whereHelpernull_String{field: "\"datahistoryjobresult\".\"result\""},
Status: whereHelperfloat64{field: "\"datahistoryjobresult\".\"status\""},
IntervalStartTime: whereHelperstring{field: "\"datahistoryjobresult\".\"interval_start_time\""},
IntervalEndTime: whereHelperstring{field: "\"datahistoryjobresult\".\"interval_end_time\""},
RunTime: whereHelperstring{field: "\"datahistoryjobresult\".\"run_time\""},
}
// DatahistoryjobresultRels is where relationship names are stored.
var DatahistoryjobresultRels = struct {
Job string
}{
Job: "Job",
}
// datahistoryjobresultR is where relationships are stored.
type datahistoryjobresultR struct {
Job *Datahistoryjob
}
// NewStruct creates a new relationship struct
func (*datahistoryjobresultR) NewStruct() *datahistoryjobresultR {
return &datahistoryjobresultR{}
}
// datahistoryjobresultL is where Load methods for each relationship are stored.
type datahistoryjobresultL struct{}
var (
datahistoryjobresultAllColumns = []string{"id", "job_id", "result", "status", "interval_start_time", "interval_end_time", "run_time"}
datahistoryjobresultColumnsWithoutDefault = []string{"id", "job_id", "result", "status", "interval_start_time", "interval_end_time"}
datahistoryjobresultColumnsWithDefault = []string{"run_time"}
datahistoryjobresultPrimaryKeyColumns = []string{"id"}
)
type (
// DatahistoryjobresultSlice is an alias for a slice of pointers to Datahistoryjobresult.
// This should generally be used opposed to []Datahistoryjobresult.
DatahistoryjobresultSlice []*Datahistoryjobresult
// DatahistoryjobresultHook is the signature for custom Datahistoryjobresult hook methods
DatahistoryjobresultHook func(context.Context, boil.ContextExecutor, *Datahistoryjobresult) error
datahistoryjobresultQuery struct {
*queries.Query
}
)
// Cache for insert, update and upsert
var (
datahistoryjobresultType = reflect.TypeOf(&Datahistoryjobresult{})
datahistoryjobresultMapping = queries.MakeStructMapping(datahistoryjobresultType)
datahistoryjobresultPrimaryKeyMapping, _ = queries.BindMapping(datahistoryjobresultType, datahistoryjobresultMapping, datahistoryjobresultPrimaryKeyColumns)
datahistoryjobresultInsertCacheMut sync.RWMutex
datahistoryjobresultInsertCache = make(map[string]insertCache)
datahistoryjobresultUpdateCacheMut sync.RWMutex
datahistoryjobresultUpdateCache = make(map[string]updateCache)
datahistoryjobresultUpsertCacheMut sync.RWMutex
datahistoryjobresultUpsertCache = make(map[string]insertCache)
)
var (
// Force time package dependency for automated UpdatedAt/CreatedAt.
_ = time.Second
// Force qmhelper dependency for where clause generation (which doesn't
// always happen)
_ = qmhelper.Where
)
var datahistoryjobresultBeforeInsertHooks []DatahistoryjobresultHook
var datahistoryjobresultBeforeUpdateHooks []DatahistoryjobresultHook
var datahistoryjobresultBeforeDeleteHooks []DatahistoryjobresultHook
var datahistoryjobresultBeforeUpsertHooks []DatahistoryjobresultHook
var datahistoryjobresultAfterInsertHooks []DatahistoryjobresultHook
var datahistoryjobresultAfterSelectHooks []DatahistoryjobresultHook
var datahistoryjobresultAfterUpdateHooks []DatahistoryjobresultHook
var datahistoryjobresultAfterDeleteHooks []DatahistoryjobresultHook
var datahistoryjobresultAfterUpsertHooks []DatahistoryjobresultHook
// doBeforeInsertHooks executes all "before insert" hooks.
func (o *Datahistoryjobresult) doBeforeInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range datahistoryjobresultBeforeInsertHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doBeforeUpdateHooks executes all "before Update" hooks.
func (o *Datahistoryjobresult) doBeforeUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range datahistoryjobresultBeforeUpdateHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doBeforeDeleteHooks executes all "before Delete" hooks.
func (o *Datahistoryjobresult) doBeforeDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range datahistoryjobresultBeforeDeleteHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doBeforeUpsertHooks executes all "before Upsert" hooks.
func (o *Datahistoryjobresult) doBeforeUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range datahistoryjobresultBeforeUpsertHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doAfterInsertHooks executes all "after Insert" hooks.
func (o *Datahistoryjobresult) doAfterInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range datahistoryjobresultAfterInsertHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doAfterSelectHooks executes all "after Select" hooks.
func (o *Datahistoryjobresult) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range datahistoryjobresultAfterSelectHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doAfterUpdateHooks executes all "after Update" hooks.
func (o *Datahistoryjobresult) doAfterUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range datahistoryjobresultAfterUpdateHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doAfterDeleteHooks executes all "after Delete" hooks.
func (o *Datahistoryjobresult) doAfterDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range datahistoryjobresultAfterDeleteHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doAfterUpsertHooks executes all "after Upsert" hooks.
func (o *Datahistoryjobresult) doAfterUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range datahistoryjobresultAfterUpsertHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// AddDatahistoryjobresultHook registers your hook function for all future operations.
func AddDatahistoryjobresultHook(hookPoint boil.HookPoint, datahistoryjobresultHook DatahistoryjobresultHook) {
switch hookPoint {
case boil.BeforeInsertHook:
datahistoryjobresultBeforeInsertHooks = append(datahistoryjobresultBeforeInsertHooks, datahistoryjobresultHook)
case boil.BeforeUpdateHook:
datahistoryjobresultBeforeUpdateHooks = append(datahistoryjobresultBeforeUpdateHooks, datahistoryjobresultHook)
case boil.BeforeDeleteHook:
datahistoryjobresultBeforeDeleteHooks = append(datahistoryjobresultBeforeDeleteHooks, datahistoryjobresultHook)
case boil.BeforeUpsertHook:
datahistoryjobresultBeforeUpsertHooks = append(datahistoryjobresultBeforeUpsertHooks, datahistoryjobresultHook)
case boil.AfterInsertHook:
datahistoryjobresultAfterInsertHooks = append(datahistoryjobresultAfterInsertHooks, datahistoryjobresultHook)
case boil.AfterSelectHook:
datahistoryjobresultAfterSelectHooks = append(datahistoryjobresultAfterSelectHooks, datahistoryjobresultHook)
case boil.AfterUpdateHook:
datahistoryjobresultAfterUpdateHooks = append(datahistoryjobresultAfterUpdateHooks, datahistoryjobresultHook)
case boil.AfterDeleteHook:
datahistoryjobresultAfterDeleteHooks = append(datahistoryjobresultAfterDeleteHooks, datahistoryjobresultHook)
case boil.AfterUpsertHook:
datahistoryjobresultAfterUpsertHooks = append(datahistoryjobresultAfterUpsertHooks, datahistoryjobresultHook)
}
}
// One returns a single datahistoryjobresult record from the query.
func (q datahistoryjobresultQuery) One(ctx context.Context, exec boil.ContextExecutor) (*Datahistoryjobresult, error) {
o := &Datahistoryjobresult{}
queries.SetLimit(q.Query, 1)
err := q.Bind(ctx, exec, o)
if err != nil {
if errors.Cause(err) == sql.ErrNoRows {
return nil, sql.ErrNoRows
}
return nil, errors.Wrap(err, "sqlite3: failed to execute a one query for datahistoryjobresult")
}
if err := o.doAfterSelectHooks(ctx, exec); err != nil {
return o, err
}
return o, nil
}
// All returns all Datahistoryjobresult records from the query.
func (q datahistoryjobresultQuery) All(ctx context.Context, exec boil.ContextExecutor) (DatahistoryjobresultSlice, error) {
var o []*Datahistoryjobresult
err := q.Bind(ctx, exec, &o)
if err != nil {
return nil, errors.Wrap(err, "sqlite3: failed to assign all query results to Datahistoryjobresult slice")
}
if len(datahistoryjobresultAfterSelectHooks) != 0 {
for _, obj := range o {
if err := obj.doAfterSelectHooks(ctx, exec); err != nil {
return o, err
}
}
}
return o, nil
}
// Count returns the count of all Datahistoryjobresult records in the query.
func (q datahistoryjobresultQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
var count int64
queries.SetSelect(q.Query, nil)
queries.SetCount(q.Query)
err := q.Query.QueryRowContext(ctx, exec).Scan(&count)
if err != nil {
return 0, errors.Wrap(err, "sqlite3: failed to count datahistoryjobresult rows")
}
return count, nil
}
// Exists checks if the row exists in the table.
func (q datahistoryjobresultQuery) Exists(ctx context.Context, exec boil.ContextExecutor) (bool, error) {
var count int64
queries.SetSelect(q.Query, nil)
queries.SetCount(q.Query)
queries.SetLimit(q.Query, 1)
err := q.Query.QueryRowContext(ctx, exec).Scan(&count)
if err != nil {
return false, errors.Wrap(err, "sqlite3: failed to check if datahistoryjobresult exists")
}
return count > 0, nil
}
// Job pointed to by the foreign key.
func (o *Datahistoryjobresult) Job(mods ...qm.QueryMod) datahistoryjobQuery {
queryMods := []qm.QueryMod{
qm.Where("\"id\" = ?", o.JobID),
}
queryMods = append(queryMods, mods...)
query := Datahistoryjobs(queryMods...)
queries.SetFrom(query.Query, "\"datahistoryjob\"")
return query
}
// LoadJob allows an eager lookup of values, cached into the
// loaded structs of the objects. This is for an N-1 relationship.
func (datahistoryjobresultL) LoadJob(ctx context.Context, e boil.ContextExecutor, singular bool, maybeDatahistoryjobresult interface{}, mods queries.Applicator) error {
var slice []*Datahistoryjobresult
var object *Datahistoryjobresult
if singular {
object = maybeDatahistoryjobresult.(*Datahistoryjobresult)
} else {
slice = *maybeDatahistoryjobresult.(*[]*Datahistoryjobresult)
}
args := make([]interface{}, 0, 1)
if singular {
if object.R == nil {
object.R = &datahistoryjobresultR{}
}
args = append(args, object.JobID)
} else {
Outer:
for _, obj := range slice {
if obj.R == nil {
obj.R = &datahistoryjobresultR{}
}
for _, a := range args {
if a == obj.JobID {
continue Outer
}
}
args = append(args, obj.JobID)
}
}
if len(args) == 0 {
return nil
}
query := NewQuery(qm.From(`datahistoryjob`), qm.WhereIn(`datahistoryjob.id in ?`, args...))
if mods != nil {
mods.Apply(query)
}
results, err := query.QueryContext(ctx, e)
if err != nil {
return errors.Wrap(err, "failed to eager load Datahistoryjob")
}
var resultSlice []*Datahistoryjob
if err = queries.Bind(results, &resultSlice); err != nil {
return errors.Wrap(err, "failed to bind eager loaded slice Datahistoryjob")
}
if err = results.Close(); err != nil {
return errors.Wrap(err, "failed to close results of eager load for datahistoryjob")
}
if err = results.Err(); err != nil {
return errors.Wrap(err, "error occurred during iteration of eager loaded relations for datahistoryjob")
}
if len(datahistoryjobresultAfterSelectHooks) != 0 {
for _, obj := range resultSlice {
if err := obj.doAfterSelectHooks(ctx, e); err != nil {
return err
}
}
}
if len(resultSlice) == 0 {
return nil
}
if singular {
foreign := resultSlice[0]
object.R.Job = foreign
if foreign.R == nil {
foreign.R = &datahistoryjobR{}
}
foreign.R.JobDatahistoryjobresults = append(foreign.R.JobDatahistoryjobresults, object)
return nil
}
for _, local := range slice {
for _, foreign := range resultSlice {
if local.JobID == foreign.ID {
local.R.Job = foreign
if foreign.R == nil {
foreign.R = &datahistoryjobR{}
}
foreign.R.JobDatahistoryjobresults = append(foreign.R.JobDatahistoryjobresults, local)
break
}
}
}
return nil
}
// SetJob of the datahistoryjobresult to the related item.
// Sets o.R.Job to related.
// Adds o to related.R.JobDatahistoryjobresults.
func (o *Datahistoryjobresult) SetJob(ctx context.Context, exec boil.ContextExecutor, insert bool, related *Datahistoryjob) error {
var err error
if insert {
if err = related.Insert(ctx, exec, boil.Infer()); err != nil {
return errors.Wrap(err, "failed to insert into foreign table")
}
}
updateQuery := fmt.Sprintf(
"UPDATE \"datahistoryjobresult\" SET %s WHERE %s",
strmangle.SetParamNames("\"", "\"", 0, []string{"job_id"}),
strmangle.WhereClause("\"", "\"", 0, datahistoryjobresultPrimaryKeyColumns),
)
values := []interface{}{related.ID, o.ID}
if boil.DebugMode {
fmt.Fprintln(boil.DebugWriter, updateQuery)
fmt.Fprintln(boil.DebugWriter, values)
}
if _, err = exec.ExecContext(ctx, updateQuery, values...); err != nil {
return errors.Wrap(err, "failed to update local table")
}
o.JobID = related.ID
if o.R == nil {
o.R = &datahistoryjobresultR{
Job: related,
}
} else {
o.R.Job = related
}
if related.R == nil {
related.R = &datahistoryjobR{
JobDatahistoryjobresults: DatahistoryjobresultSlice{o},
}
} else {
related.R.JobDatahistoryjobresults = append(related.R.JobDatahistoryjobresults, o)
}
return nil
}
// Datahistoryjobresults retrieves all the records using an executor.
func Datahistoryjobresults(mods ...qm.QueryMod) datahistoryjobresultQuery {
mods = append(mods, qm.From("\"datahistoryjobresult\""))
return datahistoryjobresultQuery{NewQuery(mods...)}
}
// FindDatahistoryjobresult retrieves a single record by ID with an executor.
// If selectCols is empty Find will return all columns.
func FindDatahistoryjobresult(ctx context.Context, exec boil.ContextExecutor, iD string, selectCols ...string) (*Datahistoryjobresult, error) {
datahistoryjobresultObj := &Datahistoryjobresult{}
sel := "*"
if len(selectCols) > 0 {
sel = strings.Join(strmangle.IdentQuoteSlice(dialect.LQ, dialect.RQ, selectCols), ",")
}
query := fmt.Sprintf(
"select %s from \"datahistoryjobresult\" where \"id\"=?", sel,
)
q := queries.Raw(query, iD)
err := q.Bind(ctx, exec, datahistoryjobresultObj)
if err != nil {
if errors.Cause(err) == sql.ErrNoRows {
return nil, sql.ErrNoRows
}
return nil, errors.Wrap(err, "sqlite3: unable to select from datahistoryjobresult")
}
return datahistoryjobresultObj, nil
}
// Insert a single record using an executor.
// See boil.Columns.InsertColumnSet documentation to understand column list inference for inserts.
func (o *Datahistoryjobresult) Insert(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) error {
if o == nil {
return errors.New("sqlite3: no datahistoryjobresult provided for insertion")
}
var err error
if err := o.doBeforeInsertHooks(ctx, exec); err != nil {
return err
}
nzDefaults := queries.NonZeroDefaultSet(datahistoryjobresultColumnsWithDefault, o)
key := makeCacheKey(columns, nzDefaults)
datahistoryjobresultInsertCacheMut.RLock()
cache, cached := datahistoryjobresultInsertCache[key]
datahistoryjobresultInsertCacheMut.RUnlock()
if !cached {
wl, returnColumns := columns.InsertColumnSet(
datahistoryjobresultAllColumns,
datahistoryjobresultColumnsWithDefault,
datahistoryjobresultColumnsWithoutDefault,
nzDefaults,
)
cache.valueMapping, err = queries.BindMapping(datahistoryjobresultType, datahistoryjobresultMapping, wl)
if err != nil {
return err
}
cache.retMapping, err = queries.BindMapping(datahistoryjobresultType, datahistoryjobresultMapping, returnColumns)
if err != nil {
return err
}
if len(wl) != 0 {
cache.query = fmt.Sprintf("INSERT INTO \"datahistoryjobresult\" (\"%s\") %%sVALUES (%s)%%s", strings.Join(wl, "\",\""), strmangle.Placeholders(dialect.UseIndexPlaceholders, len(wl), 1, 1))
} else {
cache.query = "INSERT INTO \"datahistoryjobresult\" () VALUES ()%s%s"
}
var queryOutput, queryReturning string
if len(cache.retMapping) != 0 {
cache.retQuery = fmt.Sprintf("SELECT \"%s\" FROM \"datahistoryjobresult\" WHERE %s", strings.Join(returnColumns, "\",\""), strmangle.WhereClause("\"", "\"", 0, datahistoryjobresultPrimaryKeyColumns))
}
cache.query = fmt.Sprintf(cache.query, queryOutput, queryReturning)
}
value := reflect.Indirect(reflect.ValueOf(o))
vals := queries.ValuesFromMapping(value, cache.valueMapping)
if boil.DebugMode {
fmt.Fprintln(boil.DebugWriter, cache.query)
fmt.Fprintln(boil.DebugWriter, vals)
}
_, err = exec.ExecContext(ctx, cache.query, vals...)
if err != nil {
return errors.Wrap(err, "sqlite3: unable to insert into datahistoryjobresult")
}
var identifierCols []interface{}
if len(cache.retMapping) == 0 {
goto CacheNoHooks
}
identifierCols = []interface{}{
o.ID,
}
if boil.DebugMode {
fmt.Fprintln(boil.DebugWriter, cache.retQuery)
fmt.Fprintln(boil.DebugWriter, identifierCols...)
}
err = exec.QueryRowContext(ctx, cache.retQuery, identifierCols...).Scan(queries.PtrsFromMapping(value, cache.retMapping)...)
if err != nil {
return errors.Wrap(err, "sqlite3: unable to populate default values for datahistoryjobresult")
}
CacheNoHooks:
if !cached {
datahistoryjobresultInsertCacheMut.Lock()
datahistoryjobresultInsertCache[key] = cache
datahistoryjobresultInsertCacheMut.Unlock()
}
return o.doAfterInsertHooks(ctx, exec)
}
// Update uses an executor to update the Datahistoryjobresult.
// See boil.Columns.UpdateColumnSet documentation to understand column list inference for updates.
// Update does not automatically update the record in case of default values. Use .Reload() to refresh the records.
func (o *Datahistoryjobresult) Update(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) (int64, error) {
var err error
if err = o.doBeforeUpdateHooks(ctx, exec); err != nil {
return 0, err
}
key := makeCacheKey(columns, nil)
datahistoryjobresultUpdateCacheMut.RLock()
cache, cached := datahistoryjobresultUpdateCache[key]
datahistoryjobresultUpdateCacheMut.RUnlock()
if !cached {
wl := columns.UpdateColumnSet(
datahistoryjobresultAllColumns,
datahistoryjobresultPrimaryKeyColumns,
)
if len(wl) == 0 {
return 0, errors.New("sqlite3: unable to update datahistoryjobresult, could not build whitelist")
}
cache.query = fmt.Sprintf("UPDATE \"datahistoryjobresult\" SET %s WHERE %s",
strmangle.SetParamNames("\"", "\"", 0, wl),
strmangle.WhereClause("\"", "\"", 0, datahistoryjobresultPrimaryKeyColumns),
)
cache.valueMapping, err = queries.BindMapping(datahistoryjobresultType, datahistoryjobresultMapping, append(wl, datahistoryjobresultPrimaryKeyColumns...))
if err != nil {
return 0, err
}
}
values := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping)
if boil.DebugMode {
fmt.Fprintln(boil.DebugWriter, cache.query)
fmt.Fprintln(boil.DebugWriter, values)
}
var result sql.Result
result, err = exec.ExecContext(ctx, cache.query, values...)
if err != nil {
return 0, errors.Wrap(err, "sqlite3: unable to update datahistoryjobresult row")
}
rowsAff, err := result.RowsAffected()
if err != nil {
return 0, errors.Wrap(err, "sqlite3: failed to get rows affected by update for datahistoryjobresult")
}
if !cached {
datahistoryjobresultUpdateCacheMut.Lock()
datahistoryjobresultUpdateCache[key] = cache
datahistoryjobresultUpdateCacheMut.Unlock()
}
return rowsAff, o.doAfterUpdateHooks(ctx, exec)
}
// UpdateAll updates all rows with the specified column values.
func (q datahistoryjobresultQuery) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) {
queries.SetUpdate(q.Query, cols)
result, err := q.Query.ExecContext(ctx, exec)
if err != nil {
return 0, errors.Wrap(err, "sqlite3: unable to update all for datahistoryjobresult")
}
rowsAff, err := result.RowsAffected()
if err != nil {
return 0, errors.Wrap(err, "sqlite3: unable to retrieve rows affected for datahistoryjobresult")
}
return rowsAff, nil
}
// UpdateAll updates all rows with the specified column values, using an executor.
func (o DatahistoryjobresultSlice) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) {
ln := int64(len(o))
if ln == 0 {
return 0, nil
}
if len(cols) == 0 {
return 0, errors.New("sqlite3: update all requires at least one column argument")
}
colNames := make([]string, len(cols))
args := make([]interface{}, len(cols))
i := 0
for name, value := range cols {
colNames[i] = name
args[i] = value
i++
}
// Append all of the primary key values for each column
for _, obj := range o {
pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), datahistoryjobresultPrimaryKeyMapping)
args = append(args, pkeyArgs...)
}
sql := fmt.Sprintf("UPDATE \"datahistoryjobresult\" SET %s WHERE %s",
strmangle.SetParamNames("\"", "\"", 0, colNames),
strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, datahistoryjobresultPrimaryKeyColumns, len(o)))
if boil.DebugMode {
fmt.Fprintln(boil.DebugWriter, sql)
fmt.Fprintln(boil.DebugWriter, args...)
}
result, err := exec.ExecContext(ctx, sql, args...)
if err != nil {
return 0, errors.Wrap(err, "sqlite3: unable to update all in datahistoryjobresult slice")
}
rowsAff, err := result.RowsAffected()
if err != nil {
return 0, errors.Wrap(err, "sqlite3: unable to retrieve rows affected all in update all datahistoryjobresult")
}
return rowsAff, nil
}
// Delete deletes a single Datahistoryjobresult record with an executor.
// Delete will match against the primary key column to find the record to delete.
func (o *Datahistoryjobresult) Delete(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
if o == nil {
return 0, errors.New("sqlite3: no Datahistoryjobresult provided for delete")
}
if err := o.doBeforeDeleteHooks(ctx, exec); err != nil {
return 0, err
}
args := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), datahistoryjobresultPrimaryKeyMapping)
sql := "DELETE FROM \"datahistoryjobresult\" WHERE \"id\"=?"
if boil.DebugMode {
fmt.Fprintln(boil.DebugWriter, sql)
fmt.Fprintln(boil.DebugWriter, args...)
}
result, err := exec.ExecContext(ctx, sql, args...)
if err != nil {
return 0, errors.Wrap(err, "sqlite3: unable to delete from datahistoryjobresult")
}
rowsAff, err := result.RowsAffected()
if err != nil {
return 0, errors.Wrap(err, "sqlite3: failed to get rows affected by delete for datahistoryjobresult")
}
if err := o.doAfterDeleteHooks(ctx, exec); err != nil {
return 0, err
}
return rowsAff, nil
}
// DeleteAll deletes all matching rows.
func (q datahistoryjobresultQuery) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
if q.Query == nil {
return 0, errors.New("sqlite3: no datahistoryjobresultQuery provided for delete all")
}
queries.SetDelete(q.Query)
result, err := q.Query.ExecContext(ctx, exec)
if err != nil {
return 0, errors.Wrap(err, "sqlite3: unable to delete all from datahistoryjobresult")
}
rowsAff, err := result.RowsAffected()
if err != nil {
return 0, errors.Wrap(err, "sqlite3: failed to get rows affected by deleteall for datahistoryjobresult")
}
return rowsAff, nil
}
// DeleteAll deletes all rows in the slice, using an executor.
func (o DatahistoryjobresultSlice) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
if len(o) == 0 {
return 0, nil
}
if len(datahistoryjobresultBeforeDeleteHooks) != 0 {
for _, obj := range o {
if err := obj.doBeforeDeleteHooks(ctx, exec); err != nil {
return 0, err
}
}
}
var args []interface{}
for _, obj := range o {
pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), datahistoryjobresultPrimaryKeyMapping)
args = append(args, pkeyArgs...)
}
sql := "DELETE FROM \"datahistoryjobresult\" WHERE " +
strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, datahistoryjobresultPrimaryKeyColumns, len(o))
if boil.DebugMode {
fmt.Fprintln(boil.DebugWriter, sql)
fmt.Fprintln(boil.DebugWriter, args)
}
result, err := exec.ExecContext(ctx, sql, args...)
if err != nil {
return 0, errors.Wrap(err, "sqlite3: unable to delete all from datahistoryjobresult slice")
}
rowsAff, err := result.RowsAffected()
if err != nil {
return 0, errors.Wrap(err, "sqlite3: failed to get rows affected by deleteall for datahistoryjobresult")
}
if len(datahistoryjobresultAfterDeleteHooks) != 0 {
for _, obj := range o {
if err := obj.doAfterDeleteHooks(ctx, exec); err != nil {
return 0, err
}
}
}
return rowsAff, nil
}
// Reload refetches the object from the database
// using the primary keys with an executor.
func (o *Datahistoryjobresult) Reload(ctx context.Context, exec boil.ContextExecutor) error {
ret, err := FindDatahistoryjobresult(ctx, exec, o.ID)
if err != nil {
return err
}
*o = *ret
return nil
}
// ReloadAll refetches every row with matching primary key column values
// and overwrites the original object slice with the newly updated slice.
func (o *DatahistoryjobresultSlice) ReloadAll(ctx context.Context, exec boil.ContextExecutor) error {
if o == nil || len(*o) == 0 {
return nil
}
slice := DatahistoryjobresultSlice{}
var args []interface{}
for _, obj := range *o {
pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), datahistoryjobresultPrimaryKeyMapping)
args = append(args, pkeyArgs...)
}
sql := "SELECT \"datahistoryjobresult\".* FROM \"datahistoryjobresult\" WHERE " +
strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, datahistoryjobresultPrimaryKeyColumns, len(*o))
q := queries.Raw(sql, args...)
err := q.Bind(ctx, exec, &slice)
if err != nil {
return errors.Wrap(err, "sqlite3: unable to reload all in DatahistoryjobresultSlice")
}
*o = slice
return nil
}
// DatahistoryjobresultExists checks if the Datahistoryjobresult row exists.
func DatahistoryjobresultExists(ctx context.Context, exec boil.ContextExecutor, iD string) (bool, error) {
var exists bool
sql := "select exists(select 1 from \"datahistoryjobresult\" where \"id\"=? limit 1)"
if boil.DebugMode {
fmt.Fprintln(boil.DebugWriter, sql)
fmt.Fprintln(boil.DebugWriter, iD)
}
row := exec.QueryRowContext(ctx, sql, iD)
err := row.Scan(&exists)
if err != nil {
return false, errors.Wrap(err, "sqlite3: unable to check if datahistoryjobresult exists")
}
return exists, nil
}

View File

@@ -0,0 +1,793 @@
// Code generated by SQLBoiler 3.5.0-gct (https://github.com/thrasher-corp/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package sqlite3
import (
"bytes"
"context"
"reflect"
"testing"
"github.com/thrasher-corp/sqlboiler/boil"
"github.com/thrasher-corp/sqlboiler/queries"
"github.com/thrasher-corp/sqlboiler/randomize"
"github.com/thrasher-corp/sqlboiler/strmangle"
)
var (
// Relationships sometimes use the reflection helper queries.Equal/queries.Assign
// so force a package dependency in case they don't.
_ = queries.Equal
)
func testDatahistoryjobresults(t *testing.T) {
t.Parallel()
query := Datahistoryjobresults()
if query.Query == nil {
t.Error("expected a query, got nothing")
}
}
func testDatahistoryjobresultsDelete(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if rowsAff, err := o.Delete(ctx, tx); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only have deleted one row, but affected:", rowsAff)
}
count, err := Datahistoryjobresults().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 0 {
t.Error("want zero records, got:", count)
}
}
func testDatahistoryjobresultsQueryDeleteAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if rowsAff, err := Datahistoryjobresults().DeleteAll(ctx, tx); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only have deleted one row, but affected:", rowsAff)
}
count, err := Datahistoryjobresults().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 0 {
t.Error("want zero records, got:", count)
}
}
func testDatahistoryjobresultsSliceDeleteAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice := DatahistoryjobresultSlice{o}
if rowsAff, err := slice.DeleteAll(ctx, tx); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only have deleted one row, but affected:", rowsAff)
}
count, err := Datahistoryjobresults().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 0 {
t.Error("want zero records, got:", count)
}
}
func testDatahistoryjobresultsExists(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
e, err := DatahistoryjobresultExists(ctx, tx, o.ID)
if err != nil {
t.Errorf("Unable to check if Datahistoryjobresult exists: %s", err)
}
if !e {
t.Errorf("Expected DatahistoryjobresultExists to return true, but got false.")
}
}
func testDatahistoryjobresultsFind(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
datahistoryjobresultFound, err := FindDatahistoryjobresult(ctx, tx, o.ID)
if err != nil {
t.Error(err)
}
if datahistoryjobresultFound == nil {
t.Error("want a record, got nil")
}
}
func testDatahistoryjobresultsBind(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = Datahistoryjobresults().Bind(ctx, tx, o); err != nil {
t.Error(err)
}
}
func testDatahistoryjobresultsOne(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if x, err := Datahistoryjobresults().One(ctx, tx); err != nil {
t.Error(err)
} else if x == nil {
t.Error("expected to get a non nil record")
}
}
func testDatahistoryjobresultsAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
datahistoryjobresultOne := &Datahistoryjobresult{}
datahistoryjobresultTwo := &Datahistoryjobresult{}
if err = randomize.Struct(seed, datahistoryjobresultOne, datahistoryjobresultDBTypes, false, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
if err = randomize.Struct(seed, datahistoryjobresultTwo, datahistoryjobresultDBTypes, false, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = datahistoryjobresultOne.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = datahistoryjobresultTwo.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice, err := Datahistoryjobresults().All(ctx, tx)
if err != nil {
t.Error(err)
}
if len(slice) != 2 {
t.Error("want 2 records, got:", len(slice))
}
}
func testDatahistoryjobresultsCount(t *testing.T) {
t.Parallel()
var err error
seed := randomize.NewSeed()
datahistoryjobresultOne := &Datahistoryjobresult{}
datahistoryjobresultTwo := &Datahistoryjobresult{}
if err = randomize.Struct(seed, datahistoryjobresultOne, datahistoryjobresultDBTypes, false, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
if err = randomize.Struct(seed, datahistoryjobresultTwo, datahistoryjobresultDBTypes, false, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = datahistoryjobresultOne.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = datahistoryjobresultTwo.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := Datahistoryjobresults().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 2 {
t.Error("want 2 records, got:", count)
}
}
func datahistoryjobresultBeforeInsertHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjobresult) error {
*o = Datahistoryjobresult{}
return nil
}
func datahistoryjobresultAfterInsertHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjobresult) error {
*o = Datahistoryjobresult{}
return nil
}
func datahistoryjobresultAfterSelectHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjobresult) error {
*o = Datahistoryjobresult{}
return nil
}
func datahistoryjobresultBeforeUpdateHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjobresult) error {
*o = Datahistoryjobresult{}
return nil
}
func datahistoryjobresultAfterUpdateHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjobresult) error {
*o = Datahistoryjobresult{}
return nil
}
func datahistoryjobresultBeforeDeleteHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjobresult) error {
*o = Datahistoryjobresult{}
return nil
}
func datahistoryjobresultAfterDeleteHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjobresult) error {
*o = Datahistoryjobresult{}
return nil
}
func datahistoryjobresultBeforeUpsertHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjobresult) error {
*o = Datahistoryjobresult{}
return nil
}
func datahistoryjobresultAfterUpsertHook(ctx context.Context, e boil.ContextExecutor, o *Datahistoryjobresult) error {
*o = Datahistoryjobresult{}
return nil
}
func testDatahistoryjobresultsHooks(t *testing.T) {
t.Parallel()
var err error
ctx := context.Background()
empty := &Datahistoryjobresult{}
o := &Datahistoryjobresult{}
seed := randomize.NewSeed()
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, false); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult object: %s", err)
}
AddDatahistoryjobresultHook(boil.BeforeInsertHook, datahistoryjobresultBeforeInsertHook)
if err = o.doBeforeInsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeInsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeInsertHook function to empty object, but got: %#v", o)
}
datahistoryjobresultBeforeInsertHooks = []DatahistoryjobresultHook{}
AddDatahistoryjobresultHook(boil.AfterInsertHook, datahistoryjobresultAfterInsertHook)
if err = o.doAfterInsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterInsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterInsertHook function to empty object, but got: %#v", o)
}
datahistoryjobresultAfterInsertHooks = []DatahistoryjobresultHook{}
AddDatahistoryjobresultHook(boil.AfterSelectHook, datahistoryjobresultAfterSelectHook)
if err = o.doAfterSelectHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterSelectHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterSelectHook function to empty object, but got: %#v", o)
}
datahistoryjobresultAfterSelectHooks = []DatahistoryjobresultHook{}
AddDatahistoryjobresultHook(boil.BeforeUpdateHook, datahistoryjobresultBeforeUpdateHook)
if err = o.doBeforeUpdateHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeUpdateHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeUpdateHook function to empty object, but got: %#v", o)
}
datahistoryjobresultBeforeUpdateHooks = []DatahistoryjobresultHook{}
AddDatahistoryjobresultHook(boil.AfterUpdateHook, datahistoryjobresultAfterUpdateHook)
if err = o.doAfterUpdateHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterUpdateHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterUpdateHook function to empty object, but got: %#v", o)
}
datahistoryjobresultAfterUpdateHooks = []DatahistoryjobresultHook{}
AddDatahistoryjobresultHook(boil.BeforeDeleteHook, datahistoryjobresultBeforeDeleteHook)
if err = o.doBeforeDeleteHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeDeleteHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeDeleteHook function to empty object, but got: %#v", o)
}
datahistoryjobresultBeforeDeleteHooks = []DatahistoryjobresultHook{}
AddDatahistoryjobresultHook(boil.AfterDeleteHook, datahistoryjobresultAfterDeleteHook)
if err = o.doAfterDeleteHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterDeleteHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterDeleteHook function to empty object, but got: %#v", o)
}
datahistoryjobresultAfterDeleteHooks = []DatahistoryjobresultHook{}
AddDatahistoryjobresultHook(boil.BeforeUpsertHook, datahistoryjobresultBeforeUpsertHook)
if err = o.doBeforeUpsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doBeforeUpsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected BeforeUpsertHook function to empty object, but got: %#v", o)
}
datahistoryjobresultBeforeUpsertHooks = []DatahistoryjobresultHook{}
AddDatahistoryjobresultHook(boil.AfterUpsertHook, datahistoryjobresultAfterUpsertHook)
if err = o.doAfterUpsertHooks(ctx, nil); err != nil {
t.Errorf("Unable to execute doAfterUpsertHooks: %s", err)
}
if !reflect.DeepEqual(o, empty) {
t.Errorf("Expected AfterUpsertHook function to empty object, but got: %#v", o)
}
datahistoryjobresultAfterUpsertHooks = []DatahistoryjobresultHook{}
}
func testDatahistoryjobresultsInsert(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := Datahistoryjobresults().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
}
func testDatahistoryjobresultsInsertWhitelist(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Whitelist(datahistoryjobresultColumnsWithoutDefault...)); err != nil {
t.Error(err)
}
count, err := Datahistoryjobresults().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
}
func testDatahistoryjobresultToOneDatahistoryjobUsingJob(t *testing.T) {
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
var local Datahistoryjobresult
var foreign Datahistoryjob
seed := randomize.NewSeed()
if err := randomize.Struct(seed, &local, datahistoryjobresultDBTypes, false, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
if err := randomize.Struct(seed, &foreign, datahistoryjobDBTypes, false, datahistoryjobColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjob struct: %s", err)
}
if err := foreign.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
local.JobID = foreign.ID
if err := local.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
check, err := local.Job().One(ctx, tx)
if err != nil {
t.Fatal(err)
}
if check.ID != foreign.ID {
t.Errorf("want: %v, got %v", foreign.ID, check.ID)
}
slice := DatahistoryjobresultSlice{&local}
if err = local.L.LoadJob(ctx, tx, false, (*[]*Datahistoryjobresult)(&slice), nil); err != nil {
t.Fatal(err)
}
if local.R.Job == nil {
t.Error("struct should have been eager loaded")
}
local.R.Job = nil
if err = local.L.LoadJob(ctx, tx, true, &local, nil); err != nil {
t.Fatal(err)
}
if local.R.Job == nil {
t.Error("struct should have been eager loaded")
}
}
func testDatahistoryjobresultToOneSetOpDatahistoryjobUsingJob(t *testing.T) {
var err error
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
var a Datahistoryjobresult
var b, c Datahistoryjob
seed := randomize.NewSeed()
if err = randomize.Struct(seed, &a, datahistoryjobresultDBTypes, false, strmangle.SetComplement(datahistoryjobresultPrimaryKeyColumns, datahistoryjobresultColumnsWithoutDefault)...); err != nil {
t.Fatal(err)
}
if err = randomize.Struct(seed, &b, datahistoryjobDBTypes, false, strmangle.SetComplement(datahistoryjobPrimaryKeyColumns, datahistoryjobColumnsWithoutDefault)...); err != nil {
t.Fatal(err)
}
if err = randomize.Struct(seed, &c, datahistoryjobDBTypes, false, strmangle.SetComplement(datahistoryjobPrimaryKeyColumns, datahistoryjobColumnsWithoutDefault)...); err != nil {
t.Fatal(err)
}
if err := a.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
if err = b.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
for i, x := range []*Datahistoryjob{&b, &c} {
err = a.SetJob(ctx, tx, i != 0, x)
if err != nil {
t.Fatal(err)
}
if a.R.Job != x {
t.Error("relationship struct not set to correct value")
}
if x.R.JobDatahistoryjobresults[0] != &a {
t.Error("failed to append to foreign relationship struct")
}
if a.JobID != x.ID {
t.Error("foreign key was wrong value", a.JobID)
}
zero := reflect.Zero(reflect.TypeOf(a.JobID))
reflect.Indirect(reflect.ValueOf(&a.JobID)).Set(zero)
if err = a.Reload(ctx, tx); err != nil {
t.Fatal("failed to reload", err)
}
if a.JobID != x.ID {
t.Error("foreign key was wrong value", a.JobID, x.ID)
}
}
}
func testDatahistoryjobresultsReload(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
if err = o.Reload(ctx, tx); err != nil {
t.Error(err)
}
}
func testDatahistoryjobresultsReloadAll(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice := DatahistoryjobresultSlice{o}
if err = slice.ReloadAll(ctx, tx); err != nil {
t.Error(err)
}
}
func testDatahistoryjobresultsSelect(t *testing.T) {
t.Parallel()
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
slice, err := Datahistoryjobresults().All(ctx, tx)
if err != nil {
t.Error(err)
}
if len(slice) != 1 {
t.Error("want one record, got:", len(slice))
}
}
var (
datahistoryjobresultDBTypes = map[string]string{`ID`: `TEXT`, `JobID`: `TEXT`, `Result`: `TEXT`, `Status`: `REAL`, `IntervalStartTime`: `TIMESTAMP`, `IntervalEndTime`: `TIMESTAMP`, `RunTime`: `TIMESTAMP`}
_ = bytes.MinRead
)
func testDatahistoryjobresultsUpdate(t *testing.T) {
t.Parallel()
if 0 == len(datahistoryjobresultPrimaryKeyColumns) {
t.Skip("Skipping table with no primary key columns")
}
if len(datahistoryjobresultAllColumns) == len(datahistoryjobresultPrimaryKeyColumns) {
t.Skip("Skipping table with only primary key columns")
}
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := Datahistoryjobresults().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultPrimaryKeyColumns...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
if rowsAff, err := o.Update(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("should only affect one row but affected", rowsAff)
}
}
func testDatahistoryjobresultsSliceUpdateAll(t *testing.T) {
t.Parallel()
if len(datahistoryjobresultAllColumns) == len(datahistoryjobresultPrimaryKeyColumns) {
t.Skip("Skipping table with only primary key columns")
}
seed := randomize.NewSeed()
var err error
o := &Datahistoryjobresult{}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
t.Error(err)
}
count, err := Datahistoryjobresults().Count(ctx, tx)
if err != nil {
t.Error(err)
}
if count != 1 {
t.Error("want one record, got:", count)
}
if err = randomize.Struct(seed, o, datahistoryjobresultDBTypes, true, datahistoryjobresultPrimaryKeyColumns...); err != nil {
t.Errorf("Unable to randomize Datahistoryjobresult struct: %s", err)
}
// Remove Primary keys and unique columns from what we plan to update
var fields []string
if strmangle.StringSliceMatch(datahistoryjobresultAllColumns, datahistoryjobresultPrimaryKeyColumns) {
fields = datahistoryjobresultAllColumns
} else {
fields = strmangle.SetComplement(
datahistoryjobresultAllColumns,
datahistoryjobresultPrimaryKeyColumns,
)
}
value := reflect.Indirect(reflect.ValueOf(o))
typ := reflect.TypeOf(o).Elem()
n := typ.NumField()
updateMap := M{}
for _, col := range fields {
for i := 0; i < n; i++ {
f := typ.Field(i)
if f.Tag.Get("boil") == col {
updateMap[col] = value.Field(i).Interface()
}
}
}
slice := DatahistoryjobresultSlice{o}
if rowsAff, err := slice.UpdateAll(ctx, tx, updateMap); err != nil {
t.Error(err)
} else if rowsAff != 1 {
t.Error("wanted one record updated but got", rowsAff)
}
}

View File

@@ -51,10 +51,12 @@ var ExchangeWhere = struct {
var ExchangeRels = struct {
ExchangeNameCandle string
ExchangeNameTrade string
ExchangeNameDatahistoryjobs string
ExchangeNameWithdrawalHistories string
}{
ExchangeNameCandle: "ExchangeNameCandle",
ExchangeNameTrade: "ExchangeNameTrade",
ExchangeNameDatahistoryjobs: "ExchangeNameDatahistoryjobs",
ExchangeNameWithdrawalHistories: "ExchangeNameWithdrawalHistories",
}
@@ -62,6 +64,7 @@ var ExchangeRels = struct {
type exchangeR struct {
ExchangeNameCandle *Candle
ExchangeNameTrade *Trade
ExchangeNameDatahistoryjobs DatahistoryjobSlice
ExchangeNameWithdrawalHistories WithdrawalHistorySlice
}
@@ -383,6 +386,27 @@ func (o *Exchange) ExchangeNameTrade(mods ...qm.QueryMod) tradeQuery {
return query
}
// ExchangeNameDatahistoryjobs retrieves all the datahistoryjob's Datahistoryjobs with an executor via exchange_name_id column.
func (o *Exchange) ExchangeNameDatahistoryjobs(mods ...qm.QueryMod) datahistoryjobQuery {
var queryMods []qm.QueryMod
if len(mods) != 0 {
queryMods = append(queryMods, mods...)
}
queryMods = append(queryMods,
qm.Where("\"datahistoryjob\".\"exchange_name_id\"=?", o.ID),
)
query := Datahistoryjobs(queryMods...)
queries.SetFrom(query.Query, "\"datahistoryjob\"")
if len(queries.GetSelect(query.Query)) == 0 {
queries.SetSelect(query.Query, []string{"\"datahistoryjob\".*"})
}
return query
}
// ExchangeNameWithdrawalHistories retrieves all the withdrawal_history's WithdrawalHistories with an executor via exchange_name_id column.
func (o *Exchange) ExchangeNameWithdrawalHistories(mods ...qm.QueryMod) withdrawalHistoryQuery {
var queryMods []qm.QueryMod
@@ -600,6 +624,101 @@ func (exchangeL) LoadExchangeNameTrade(ctx context.Context, e boil.ContextExecut
return nil
}
// LoadExchangeNameDatahistoryjobs allows an eager lookup of values, cached into the
// loaded structs of the objects. This is for a 1-M or N-M relationship.
func (exchangeL) LoadExchangeNameDatahistoryjobs(ctx context.Context, e boil.ContextExecutor, singular bool, maybeExchange interface{}, mods queries.Applicator) error {
var slice []*Exchange
var object *Exchange
if singular {
object = maybeExchange.(*Exchange)
} else {
slice = *maybeExchange.(*[]*Exchange)
}
args := make([]interface{}, 0, 1)
if singular {
if object.R == nil {
object.R = &exchangeR{}
}
args = append(args, object.ID)
} else {
Outer:
for _, obj := range slice {
if obj.R == nil {
obj.R = &exchangeR{}
}
for _, a := range args {
if a == obj.ID {
continue Outer
}
}
args = append(args, obj.ID)
}
}
if len(args) == 0 {
return nil
}
query := NewQuery(qm.From(`datahistoryjob`), qm.WhereIn(`datahistoryjob.exchange_name_id in ?`, args...))
if mods != nil {
mods.Apply(query)
}
results, err := query.QueryContext(ctx, e)
if err != nil {
return errors.Wrap(err, "failed to eager load datahistoryjob")
}
var resultSlice []*Datahistoryjob
if err = queries.Bind(results, &resultSlice); err != nil {
return errors.Wrap(err, "failed to bind eager loaded slice datahistoryjob")
}
if err = results.Close(); err != nil {
return errors.Wrap(err, "failed to close results in eager load on datahistoryjob")
}
if err = results.Err(); err != nil {
return errors.Wrap(err, "error occurred during iteration of eager loaded relations for datahistoryjob")
}
if len(datahistoryjobAfterSelectHooks) != 0 {
for _, obj := range resultSlice {
if err := obj.doAfterSelectHooks(ctx, e); err != nil {
return err
}
}
}
if singular {
object.R.ExchangeNameDatahistoryjobs = resultSlice
for _, foreign := range resultSlice {
if foreign.R == nil {
foreign.R = &datahistoryjobR{}
}
foreign.R.ExchangeName = object
}
return nil
}
for _, foreign := range resultSlice {
for _, local := range slice {
if local.ID == foreign.ExchangeNameID {
local.R.ExchangeNameDatahistoryjobs = append(local.R.ExchangeNameDatahistoryjobs, foreign)
if foreign.R == nil {
foreign.R = &datahistoryjobR{}
}
foreign.R.ExchangeName = local
break
}
}
}
return nil
}
// LoadExchangeNameWithdrawalHistories allows an eager lookup of values, cached into the
// loaded structs of the objects. This is for a 1-M or N-M relationship.
func (exchangeL) LoadExchangeNameWithdrawalHistories(ctx context.Context, e boil.ContextExecutor, singular bool, maybeExchange interface{}, mods queries.Applicator) error {
@@ -797,6 +916,59 @@ func (o *Exchange) SetExchangeNameTrade(ctx context.Context, exec boil.ContextEx
return nil
}
// AddExchangeNameDatahistoryjobs adds the given related objects to the existing relationships
// of the exchange, optionally inserting them as new records.
// Appends related to o.R.ExchangeNameDatahistoryjobs.
// Sets related.R.ExchangeName appropriately.
func (o *Exchange) AddExchangeNameDatahistoryjobs(ctx context.Context, exec boil.ContextExecutor, insert bool, related ...*Datahistoryjob) error {
var err error
for _, rel := range related {
if insert {
rel.ExchangeNameID = o.ID
if err = rel.Insert(ctx, exec, boil.Infer()); err != nil {
return errors.Wrap(err, "failed to insert into foreign table")
}
} else {
updateQuery := fmt.Sprintf(
"UPDATE \"datahistoryjob\" SET %s WHERE %s",
strmangle.SetParamNames("\"", "\"", 0, []string{"exchange_name_id"}),
strmangle.WhereClause("\"", "\"", 0, datahistoryjobPrimaryKeyColumns),
)
values := []interface{}{o.ID, rel.ID}
if boil.DebugMode {
fmt.Fprintln(boil.DebugWriter, updateQuery)
fmt.Fprintln(boil.DebugWriter, values)
}
if _, err = exec.ExecContext(ctx, updateQuery, values...); err != nil {
return errors.Wrap(err, "failed to update foreign table")
}
rel.ExchangeNameID = o.ID
}
}
if o.R == nil {
o.R = &exchangeR{
ExchangeNameDatahistoryjobs: related,
}
} else {
o.R.ExchangeNameDatahistoryjobs = append(o.R.ExchangeNameDatahistoryjobs, related...)
}
for _, rel := range related {
if rel.R == nil {
rel.R = &datahistoryjobR{
ExchangeName: o,
}
} else {
rel.R.ExchangeName = o
}
}
return nil
}
// AddExchangeNameWithdrawalHistories adds the given related objects to the existing relationships
// of the exchange, optionally inserting them as new records.
// Appends related to o.R.ExchangeNameWithdrawalHistories.

View File

@@ -719,6 +719,84 @@ func testExchangeOneToOneSetOpTradeUsingExchangeNameTrade(t *testing.T) {
}
}
func testExchangeToManyExchangeNameDatahistoryjobs(t *testing.T) {
var err error
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
var a Exchange
var b, c Datahistoryjob
seed := randomize.NewSeed()
if err = randomize.Struct(seed, &a, exchangeDBTypes, true, exchangeColumnsWithDefault...); err != nil {
t.Errorf("Unable to randomize Exchange struct: %s", err)
}
if err := a.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
if err = randomize.Struct(seed, &b, datahistoryjobDBTypes, false, datahistoryjobColumnsWithDefault...); err != nil {
t.Fatal(err)
}
if err = randomize.Struct(seed, &c, datahistoryjobDBTypes, false, datahistoryjobColumnsWithDefault...); err != nil {
t.Fatal(err)
}
b.ExchangeNameID = a.ID
c.ExchangeNameID = a.ID
if err = b.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
if err = c.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
check, err := a.ExchangeNameDatahistoryjobs().All(ctx, tx)
if err != nil {
t.Fatal(err)
}
bFound, cFound := false, false
for _, v := range check {
if v.ExchangeNameID == b.ExchangeNameID {
bFound = true
}
if v.ExchangeNameID == c.ExchangeNameID {
cFound = true
}
}
if !bFound {
t.Error("expected to find b")
}
if !cFound {
t.Error("expected to find c")
}
slice := ExchangeSlice{&a}
if err = a.L.LoadExchangeNameDatahistoryjobs(ctx, tx, false, (*[]*Exchange)(&slice), nil); err != nil {
t.Fatal(err)
}
if got := len(a.R.ExchangeNameDatahistoryjobs); got != 2 {
t.Error("number of eager loaded records wrong, got:", got)
}
a.R.ExchangeNameDatahistoryjobs = nil
if err = a.L.LoadExchangeNameDatahistoryjobs(ctx, tx, true, &a, nil); err != nil {
t.Fatal(err)
}
if got := len(a.R.ExchangeNameDatahistoryjobs); got != 2 {
t.Error("number of eager loaded records wrong, got:", got)
}
if t.Failed() {
t.Logf("%#v", check)
}
}
func testExchangeToManyExchangeNameWithdrawalHistories(t *testing.T) {
var err error
ctx := context.Background()
@@ -797,6 +875,81 @@ func testExchangeToManyExchangeNameWithdrawalHistories(t *testing.T) {
}
}
func testExchangeToManyAddOpExchangeNameDatahistoryjobs(t *testing.T) {
var err error
ctx := context.Background()
tx := MustTx(boil.BeginTx(ctx, nil))
defer func() { _ = tx.Rollback() }()
var a Exchange
var b, c, d, e Datahistoryjob
seed := randomize.NewSeed()
if err = randomize.Struct(seed, &a, exchangeDBTypes, false, strmangle.SetComplement(exchangePrimaryKeyColumns, exchangeColumnsWithoutDefault)...); err != nil {
t.Fatal(err)
}
foreigners := []*Datahistoryjob{&b, &c, &d, &e}
for _, x := range foreigners {
if err = randomize.Struct(seed, x, datahistoryjobDBTypes, false, strmangle.SetComplement(datahistoryjobPrimaryKeyColumns, datahistoryjobColumnsWithoutDefault)...); err != nil {
t.Fatal(err)
}
}
if err := a.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
if err = b.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
if err = c.Insert(ctx, tx, boil.Infer()); err != nil {
t.Fatal(err)
}
foreignersSplitByInsertion := [][]*Datahistoryjob{
{&b, &c},
{&d, &e},
}
for i, x := range foreignersSplitByInsertion {
err = a.AddExchangeNameDatahistoryjobs(ctx, tx, i != 0, x...)
if err != nil {
t.Fatal(err)
}
first := x[0]
second := x[1]
if a.ID != first.ExchangeNameID {
t.Error("foreign key was wrong value", a.ID, first.ExchangeNameID)
}
if a.ID != second.ExchangeNameID {
t.Error("foreign key was wrong value", a.ID, second.ExchangeNameID)
}
if first.R.ExchangeName != &a {
t.Error("relationship was not added properly to the foreign slice")
}
if second.R.ExchangeName != &a {
t.Error("relationship was not added properly to the foreign slice")
}
if a.R.ExchangeNameDatahistoryjobs[i*2] != first {
t.Error("relationship struct slice not set to correct value")
}
if a.R.ExchangeNameDatahistoryjobs[i*2+1] != second {
t.Error("relationship struct slice not set to correct value")
}
count, err := a.ExchangeNameDatahistoryjobs().Count(ctx, tx)
if err != nil {
t.Fatal(err)
}
if want := int64((i + 1) * 2); count != want {
t.Error("want", want, "got", count)
}
}
}
func testExchangeToManyAddOpExchangeNameWithdrawalHistories(t *testing.T) {
var err error

View File

@@ -63,28 +63,6 @@ var TradeColumns = struct {
}
// Generated where
type whereHelpernull_String struct{ field string }
func (w whereHelpernull_String) EQ(x null.String) qm.QueryMod {
return qmhelper.WhereNullEQ(w.field, false, x)
}
func (w whereHelpernull_String) NEQ(x null.String) qm.QueryMod {
return qmhelper.WhereNullEQ(w.field, true, x)
}
func (w whereHelpernull_String) IsNull() qm.QueryMod { return qmhelper.WhereIsNull(w.field) }
func (w whereHelpernull_String) IsNotNull() qm.QueryMod { return qmhelper.WhereIsNotNull(w.field) }
func (w whereHelpernull_String) LT(x null.String) qm.QueryMod {
return qmhelper.Where(w.field, qmhelper.LT, x)
}
func (w whereHelpernull_String) LTE(x null.String) qm.QueryMod {
return qmhelper.Where(w.field, qmhelper.LTE, x)
}
func (w whereHelpernull_String) GT(x null.String) qm.QueryMod {
return qmhelper.Where(w.field, qmhelper.GT, x)
}
func (w whereHelpernull_String) GTE(x null.String) qm.QueryMod {
return qmhelper.Where(w.field, qmhelper.GTE, x)
}
var TradeWhere = struct {
ID whereHelperstring

View File

@@ -80,7 +80,7 @@ func Series(exchangeName, base, quote string, interval int64, asset string, star
}
}
if len(out.Candles) < 1 {
return out, fmt.Errorf(errNoCandleDataFound, exchangeName, base, quote, interval, asset)
return out, fmt.Errorf("%w: %s %s %s %v %s", ErrNoCandleDataFound, exchangeName, base, quote, interval, asset)
}
out.ExchangeID = exchangeName

View File

@@ -242,10 +242,8 @@ func TestSeries(t *testing.T) {
}
ret, err = Series("", "", "", 0, "", start, end)
if err != nil {
if !errors.Is(err, errInvalidInput) {
t.Fatal(err)
}
if !errors.Is(err, errInvalidInput) {
t.Fatal(err)
}
ret, err = Series(testExchanges[0].Name,
@@ -254,9 +252,7 @@ func TestSeries(t *testing.T) {
start, end)
if err != nil {
if !errors.Is(err, errInvalidInput) {
if err.Error() != fmt.Errorf(errNoCandleDataFound, testExchanges[0].Name,
"BTC", "MOON",
"864000", "spot").Error() {
if !errors.Is(err, ErrNoCandleDataFound) {
t.Fatal(err)
}
}

View File

@@ -5,13 +5,11 @@ import (
"time"
)
const (
errNoCandleDataFound = "no candle data found: %v %v %v %v %v"
)
var (
errInvalidInput = errors.New("exchange, base, quote, asset, interval, start & end cannot be empty")
errNoCandleData = errors.New("no candle data provided")
// ErrNoCandleDataFound returns when no candle data is found
ErrNoCandleDataFound = errors.New("no candle data found")
)
// Item generic candle holder for modelPSQL & modelSQLite

View File

@@ -0,0 +1,694 @@
package datahistoryjob
import (
"context"
"database/sql"
"fmt"
"strings"
"time"
"github.com/thrasher-corp/gocryptotrader/database"
"github.com/thrasher-corp/gocryptotrader/database/models/postgres"
"github.com/thrasher-corp/gocryptotrader/database/models/sqlite3"
"github.com/thrasher-corp/gocryptotrader/database/repository/datahistoryjobresult"
"github.com/thrasher-corp/gocryptotrader/log"
"github.com/thrasher-corp/sqlboiler/boil"
"github.com/thrasher-corp/sqlboiler/queries/qm"
)
// Setup returns a DBService
func Setup(db database.IDatabase) (*DBService, error) {
if db == nil {
return nil, database.ErrNilInstance
}
if !db.IsConnected() {
return nil, database.ErrDatabaseNotConnected
}
cfg := db.GetConfig()
dbCon, err := db.GetSQL()
if err != nil {
return nil, err
}
return &DBService{
sql: dbCon,
driver: cfg.Driver,
}, nil
}
// Upsert inserts or updates jobs into the database
func (db *DBService) Upsert(jobs ...*DataHistoryJob) error {
ctx := context.Background()
tx, err := db.sql.BeginTx(ctx, nil)
if err != nil {
return fmt.Errorf("beginTx %w", err)
}
defer func() {
if err != nil {
errRB := tx.Rollback()
if errRB != nil {
log.Errorf(log.DatabaseMgr, "Insert tx.Rollback %v", errRB)
}
}
}()
switch db.driver {
case database.DBSQLite3, database.DBSQLite:
err = upsertSqlite(ctx, tx, jobs...)
case database.DBPostgreSQL:
err = upsertPostgres(ctx, tx, jobs...)
default:
return database.ErrNoDatabaseProvided
}
if err != nil {
return err
}
return tx.Commit()
}
// GetByNickName returns a job by its nickname
func (db *DBService) GetByNickName(nickname string) (*DataHistoryJob, error) {
switch db.driver {
case database.DBSQLite3, database.DBSQLite:
return db.getByNicknameSQLite(nickname)
case database.DBPostgreSQL:
return db.getByNicknamePostgres(nickname)
default:
return nil, database.ErrNoDatabaseProvided
}
}
// GetByID returns a job by its id
func (db *DBService) GetByID(id string) (*DataHistoryJob, error) {
switch db.driver {
case database.DBSQLite3, database.DBSQLite:
return db.getByIDSQLite(id)
case database.DBPostgreSQL:
return db.getByIDPostgres(id)
default:
return nil, database.ErrNoDatabaseProvided
}
}
// GetJobsBetween will return all jobs between two dates
func (db *DBService) GetJobsBetween(startDate, endDate time.Time) ([]DataHistoryJob, error) {
switch db.driver {
case database.DBSQLite3, database.DBSQLite:
return db.getJobsBetweenSQLite(startDate, endDate)
case database.DBPostgreSQL:
return db.getJobsBetweenPostgres(startDate, endDate)
default:
return nil, database.ErrNoDatabaseProvided
}
}
// GetAllIncompleteJobsAndResults returns all jobs that have the status "active"
func (db *DBService) GetAllIncompleteJobsAndResults() ([]DataHistoryJob, error) {
switch db.driver {
case database.DBSQLite3, database.DBSQLite:
return db.getAllIncompleteJobsAndResultsSQLite()
case database.DBPostgreSQL:
return db.getAllIncompleteJobsAndResultsPostgres()
default:
return nil, database.ErrNoDatabaseProvided
}
}
// GetJobAndAllResults returns a job and joins all job results
func (db *DBService) GetJobAndAllResults(nickname string) (*DataHistoryJob, error) {
switch db.driver {
case database.DBSQLite3, database.DBSQLite:
return db.getJobAndAllResultsSQLite(nickname)
case database.DBPostgreSQL:
return db.getJobAndAllResultsPostgres(nickname)
default:
return nil, database.ErrNoDatabaseProvided
}
}
func upsertSqlite(ctx context.Context, tx *sql.Tx, jobs ...*DataHistoryJob) error {
for i := range jobs {
r, err := sqlite3.Exchanges(
qm.Where("name = ?", strings.ToLower(jobs[i].ExchangeName))).One(ctx, tx)
if err != nil {
return err
}
var tempEvent = sqlite3.Datahistoryjob{
ID: jobs[i].ID,
ExchangeNameID: r.ID,
Nickname: strings.ToLower(jobs[i].Nickname),
Asset: strings.ToLower(jobs[i].Asset),
Base: strings.ToUpper(jobs[i].Base),
Quote: strings.ToUpper(jobs[i].Quote),
StartTime: jobs[i].StartDate.UTC().Format(time.RFC3339),
EndTime: jobs[i].EndDate.UTC().Format(time.RFC3339),
Interval: float64(jobs[i].Interval),
DataType: float64(jobs[i].DataType),
RequestSize: float64(jobs[i].RequestSizeLimit),
MaxRetries: float64(jobs[i].MaxRetryAttempts),
BatchCount: float64(jobs[i].BatchSize),
Status: float64(jobs[i].Status),
Created: time.Now().UTC().Format(time.RFC3339),
}
err = tempEvent.Insert(ctx, tx, boil.Infer())
if err != nil {
return err
}
}
return nil
}
func upsertPostgres(ctx context.Context, tx *sql.Tx, jobs ...*DataHistoryJob) error {
for i := range jobs {
r, err := postgres.Exchanges(
qm.Where("name = ?", strings.ToLower(jobs[i].ExchangeName))).One(ctx, tx)
if err != nil {
return err
}
var tempEvent = postgres.Datahistoryjob{
ID: jobs[i].ID,
Nickname: strings.ToLower(jobs[i].Nickname),
ExchangeNameID: r.ID,
Asset: strings.ToLower(jobs[i].Asset),
Base: strings.ToUpper(jobs[i].Base),
Quote: strings.ToUpper(jobs[i].Quote),
StartTime: jobs[i].StartDate.UTC(),
EndTime: jobs[i].EndDate.UTC(),
Interval: float64(jobs[i].Interval),
DataType: float64(jobs[i].DataType),
BatchCount: float64(jobs[i].BatchSize),
RequestSize: float64(jobs[i].RequestSizeLimit),
MaxRetries: float64(jobs[i].MaxRetryAttempts),
Status: float64(jobs[i].Status),
Created: time.Now().UTC(),
}
err = tempEvent.Upsert(ctx, tx, true, []string{"nickname"}, boil.Infer(), boil.Infer())
if err != nil {
return err
}
}
return nil
}
func (db *DBService) getByNicknameSQLite(nickname string) (*DataHistoryJob, error) {
var job *DataHistoryJob
result, err := sqlite3.Datahistoryjobs(qm.Where("nickname = ?", strings.ToLower(nickname))).One(context.Background(), db.sql)
if err != nil {
return job, err
}
exchangeResult, err := result.ExchangeName().One(context.Background(), db.sql)
if err != nil {
return job, err
}
ts, err := time.Parse(time.RFC3339, result.StartTime)
if err != nil {
return nil, err
}
te, err := time.Parse(time.RFC3339, result.EndTime)
if err != nil {
return nil, err
}
c, err := time.Parse(time.RFC3339, result.Created)
if err != nil {
return nil, err
}
job = &DataHistoryJob{
ID: result.ID,
Nickname: result.Nickname,
ExchangeID: result.ExchangeNameID,
ExchangeName: exchangeResult.Name,
Asset: result.Asset,
Base: result.Base,
Quote: result.Quote,
StartDate: ts,
EndDate: te,
Interval: int64(result.Interval),
BatchSize: int64(result.BatchCount),
RequestSizeLimit: int64(result.RequestSize),
DataType: int64(result.DataType),
MaxRetryAttempts: int64(result.MaxRetries),
Status: int64(result.Status),
CreatedDate: c,
}
return job, nil
}
func (db *DBService) getByNicknamePostgres(nickname string) (*DataHistoryJob, error) {
var job *DataHistoryJob
query := postgres.Datahistoryjobs(qm.Where("nickname = ?", strings.ToLower(nickname)))
result, err := query.One(context.Background(), db.sql)
if err != nil {
return job, err
}
exchangeResult, err := result.ExchangeName().One(context.Background(), db.sql)
if err != nil {
return job, err
}
job = &DataHistoryJob{
ID: result.ID,
Nickname: result.Nickname,
ExchangeID: result.ExchangeNameID,
ExchangeName: exchangeResult.Name,
Asset: result.Asset,
Base: result.Base,
Quote: result.Quote,
StartDate: result.StartTime,
EndDate: result.EndTime,
Interval: int64(result.Interval),
BatchSize: int64(result.BatchCount),
RequestSizeLimit: int64(result.RequestSize),
DataType: int64(result.DataType),
MaxRetryAttempts: int64(result.MaxRetries),
Status: int64(result.Status),
CreatedDate: result.Created,
}
return job, nil
}
func (db *DBService) getByIDSQLite(id string) (*DataHistoryJob, error) {
var job *DataHistoryJob
result, err := sqlite3.Datahistoryjobs(qm.Where("id = ?", id)).One(context.Background(), db.sql)
if err != nil {
return job, err
}
exchangeResult, err := result.ExchangeName().One(context.Background(), db.sql)
if err != nil {
return job, err
}
ts, err := time.Parse(time.RFC3339, result.StartTime)
if err != nil {
return nil, err
}
te, err := time.Parse(time.RFC3339, result.EndTime)
if err != nil {
return nil, err
}
c, err := time.Parse(time.RFC3339, result.Created)
if err != nil {
return nil, err
}
job = &DataHistoryJob{
ID: result.ID,
Nickname: result.Nickname,
ExchangeID: result.ExchangeNameID,
ExchangeName: exchangeResult.Name,
Asset: result.Asset,
Base: result.Base,
Quote: result.Quote,
StartDate: ts,
EndDate: te,
Interval: int64(result.Interval),
RequestSizeLimit: int64(result.RequestSize),
DataType: int64(result.DataType),
MaxRetryAttempts: int64(result.MaxRetries),
BatchSize: int64(result.BatchCount),
Status: int64(result.Status),
CreatedDate: c,
}
return job, nil
}
func (db *DBService) getByIDPostgres(id string) (*DataHistoryJob, error) {
var job *DataHistoryJob
query := postgres.Datahistoryjobs(qm.Where("id = ?", id))
result, err := query.One(context.Background(), db.sql)
if err != nil {
return job, err
}
exchangeResult, err := result.ExchangeName().One(context.Background(), db.sql)
if err != nil {
return job, err
}
job = &DataHistoryJob{
ID: result.ID,
Nickname: result.Nickname,
ExchangeID: result.ExchangeNameID,
ExchangeName: exchangeResult.Name,
Asset: result.Asset,
Base: result.Base,
Quote: result.Quote,
StartDate: result.StartTime,
EndDate: result.EndTime,
Interval: int64(result.Interval),
BatchSize: int64(result.BatchCount),
RequestSizeLimit: int64(result.RequestSize),
DataType: int64(result.DataType),
MaxRetryAttempts: int64(result.MaxRetries),
Status: int64(result.Status),
CreatedDate: result.Created,
}
return job, nil
}
func (db *DBService) getJobsBetweenSQLite(startDate, endDate time.Time) ([]DataHistoryJob, error) {
var jobs []DataHistoryJob
query := sqlite3.Datahistoryjobs(qm.Where("created BETWEEN ? AND ? ", startDate.UTC().Format(time.RFC3339), endDate.UTC().Format(time.RFC3339)))
results, err := query.All(context.Background(), db.sql)
if err != nil {
return jobs, err
}
for i := range results {
exchangeResult, err := results[i].ExchangeName(qm.Where("id = ?", results[i].ExchangeNameID)).One(context.Background(), db.sql)
if err != nil {
return nil, err
}
ts, err := time.Parse(time.RFC3339, results[i].StartTime)
if err != nil {
return nil, err
}
te, err := time.Parse(time.RFC3339, results[i].EndTime)
if err != nil {
return nil, err
}
c, err := time.Parse(time.RFC3339, results[i].Created)
if err != nil {
return nil, err
}
jobs = append(jobs, DataHistoryJob{
ID: results[i].ID,
Nickname: results[i].Nickname,
ExchangeID: results[i].ExchangeNameID,
ExchangeName: exchangeResult.Name,
Asset: results[i].Asset,
Base: results[i].Base,
Quote: results[i].Quote,
StartDate: ts,
EndDate: te,
Interval: int64(results[i].Interval),
RequestSizeLimit: int64(results[i].RequestSize),
BatchSize: int64(results[i].BatchCount),
DataType: int64(results[i].DataType),
MaxRetryAttempts: int64(results[i].MaxRetries),
Status: int64(results[i].Status),
CreatedDate: c,
})
}
return jobs, nil
}
func (db *DBService) getJobsBetweenPostgres(startDate, endDate time.Time) ([]DataHistoryJob, error) {
var jobs []DataHistoryJob
query := postgres.Datahistoryjobs(qm.Where("created BETWEEN ? AND ? ", startDate, endDate))
results, err := query.All(context.Background(), db.sql)
if err != nil {
return jobs, err
}
for i := range results {
exchangeResult, err := results[i].ExchangeName(qm.Where("id = ?", results[i].ExchangeNameID)).One(context.Background(), db.sql)
if err != nil {
return nil, err
}
jobs = append(jobs, DataHistoryJob{
ID: results[i].ID,
Nickname: results[i].Nickname,
ExchangeID: results[i].ExchangeNameID,
ExchangeName: exchangeResult.Name,
Asset: results[i].Asset,
Base: results[i].Base,
Quote: results[i].Quote,
StartDate: results[i].StartTime,
EndDate: results[i].EndTime,
Interval: int64(results[i].Interval),
BatchSize: int64(results[i].BatchCount),
RequestSizeLimit: int64(results[i].RequestSize),
DataType: int64(results[i].DataType),
MaxRetryAttempts: int64(results[i].MaxRetries),
Status: int64(results[i].Status),
CreatedDate: results[i].Created,
})
}
return jobs, nil
}
func (db *DBService) getJobAndAllResultsSQLite(nickname string) (*DataHistoryJob, error) {
var job *DataHistoryJob
query := sqlite3.Datahistoryjobs(
qm.Load(sqlite3.DatahistoryjobRels.JobDatahistoryjobresults),
qm.Load(sqlite3.DatahistoryjobRels.ExchangeName),
qm.Where("nickname = ?", strings.ToLower(nickname)))
result, err := query.One(context.Background(), db.sql)
if err != nil {
return nil, err
}
var jobResults []*datahistoryjobresult.DataHistoryJobResult
for i := range result.R.JobDatahistoryjobresults {
var start, end, run time.Time
start, err = time.Parse(time.RFC3339, result.R.JobDatahistoryjobresults[i].IntervalStartTime)
if err != nil {
return nil, err
}
end, err = time.Parse(time.RFC3339, result.R.JobDatahistoryjobresults[i].IntervalEndTime)
if err != nil {
return nil, err
}
run, err = time.Parse(time.RFC3339, result.R.JobDatahistoryjobresults[i].RunTime)
if err != nil {
return nil, err
}
jobResults = append(jobResults, &datahistoryjobresult.DataHistoryJobResult{
ID: result.R.JobDatahistoryjobresults[i].ID,
JobID: result.R.JobDatahistoryjobresults[i].JobID,
IntervalStartDate: start,
IntervalEndDate: end,
Status: int64(result.R.JobDatahistoryjobresults[i].Status),
Result: result.R.JobDatahistoryjobresults[i].Result.String,
Date: run,
})
}
start, err := time.Parse(time.RFC3339, result.StartTime)
if err != nil {
return nil, err
}
end, err := time.Parse(time.RFC3339, result.EndTime)
if err != nil {
return nil, err
}
created, err := time.Parse(time.RFC3339, result.Created)
if err != nil {
return nil, err
}
job = &DataHistoryJob{
ID: result.ID,
Nickname: result.Nickname,
ExchangeID: result.ExchangeNameID,
ExchangeName: result.R.ExchangeName.Name,
Asset: result.Asset,
Base: result.Base,
Quote: result.Quote,
StartDate: start,
EndDate: end,
Interval: int64(result.Interval),
BatchSize: int64(result.BatchCount),
RequestSizeLimit: int64(result.RequestSize),
DataType: int64(result.DataType),
MaxRetryAttempts: int64(result.MaxRetries),
Status: int64(result.Status),
CreatedDate: created,
Results: jobResults,
}
return job, nil
}
func (db *DBService) getJobAndAllResultsPostgres(nickname string) (*DataHistoryJob, error) {
var job *DataHistoryJob
query := postgres.Datahistoryjobs(
qm.Load(postgres.DatahistoryjobRels.ExchangeName),
qm.Load(postgres.DatahistoryjobRels.JobDatahistoryjobresults),
qm.Where("nickname = ?", strings.ToLower(nickname)))
result, err := query.One(context.Background(), db.sql)
if err != nil {
return job, err
}
var jobResults []*datahistoryjobresult.DataHistoryJobResult
for i := range result.R.JobDatahistoryjobresults {
jobResults = append(jobResults, &datahistoryjobresult.DataHistoryJobResult{
ID: result.R.JobDatahistoryjobresults[i].ID,
JobID: result.R.JobDatahistoryjobresults[i].JobID,
IntervalStartDate: result.R.JobDatahistoryjobresults[i].IntervalStartTime,
IntervalEndDate: result.R.JobDatahistoryjobresults[i].IntervalEndTime,
Status: int64(result.R.JobDatahistoryjobresults[i].Status),
Result: result.R.JobDatahistoryjobresults[i].Result.String,
Date: result.R.JobDatahistoryjobresults[i].RunTime,
})
}
job = &DataHistoryJob{
ID: result.ID,
Nickname: result.Nickname,
ExchangeID: result.ExchangeNameID,
ExchangeName: result.R.ExchangeName.Name,
Asset: result.Asset,
Base: result.Base,
Quote: result.Quote,
StartDate: result.StartTime,
EndDate: result.EndTime,
Interval: int64(result.Interval),
BatchSize: int64(result.BatchCount),
RequestSizeLimit: int64(result.RequestSize),
DataType: int64(result.DataType),
MaxRetryAttempts: int64(result.MaxRetries),
Status: int64(result.Status),
CreatedDate: result.Created,
Results: jobResults,
}
return job, nil
}
func (db *DBService) getAllIncompleteJobsAndResultsSQLite() ([]DataHistoryJob, error) {
var jobs []DataHistoryJob
query := sqlite3.Datahistoryjobs(
qm.Load(sqlite3.DatahistoryjobRels.ExchangeName),
qm.Load(sqlite3.DatahistoryjobRels.JobDatahistoryjobresults),
qm.Where("status = ?", 0))
results, err := query.All(context.Background(), db.sql)
if err != nil {
return jobs, err
}
for i := range results {
var jobResults []*datahistoryjobresult.DataHistoryJobResult
for j := range results[i].R.JobDatahistoryjobresults {
var start, end, run time.Time
start, err = time.Parse(time.RFC3339, results[i].R.JobDatahistoryjobresults[j].IntervalStartTime)
if err != nil {
return nil, err
}
end, err = time.Parse(time.RFC3339, results[i].R.JobDatahistoryjobresults[j].IntervalEndTime)
if err != nil {
return nil, err
}
run, err = time.Parse(time.RFC3339, results[i].R.JobDatahistoryjobresults[j].RunTime)
if err != nil {
return nil, err
}
jobResults = append(jobResults, &datahistoryjobresult.DataHistoryJobResult{
ID: results[i].R.JobDatahistoryjobresults[j].ID,
JobID: results[i].R.JobDatahistoryjobresults[j].JobID,
IntervalStartDate: start,
IntervalEndDate: end,
Status: int64(results[i].R.JobDatahistoryjobresults[j].Status),
Result: results[i].R.JobDatahistoryjobresults[j].Result.String,
Date: run,
})
}
start, err := time.Parse(time.RFC3339, results[i].StartTime)
if err != nil {
return nil, err
}
end, err := time.Parse(time.RFC3339, results[i].EndTime)
if err != nil {
return nil, err
}
created, err := time.Parse(time.RFC3339, results[i].Created)
if err != nil {
return nil, err
}
jobs = append(jobs, DataHistoryJob{
ID: results[i].ID,
Nickname: results[i].Nickname,
ExchangeID: results[i].ExchangeNameID,
ExchangeName: results[i].R.ExchangeName.Name,
Asset: results[i].Asset,
Base: results[i].Base,
Quote: results[i].Quote,
StartDate: start,
EndDate: end,
Interval: int64(results[i].Interval),
BatchSize: int64(results[i].BatchCount),
RequestSizeLimit: int64(results[i].RequestSize),
DataType: int64(results[i].DataType),
MaxRetryAttempts: int64(results[i].MaxRetries),
Status: int64(results[i].Status),
CreatedDate: created,
Results: jobResults,
})
}
return jobs, nil
}
func (db *DBService) getAllIncompleteJobsAndResultsPostgres() ([]DataHistoryJob, error) {
var jobs []DataHistoryJob
query := postgres.Datahistoryjobs(
qm.Load(postgres.DatahistoryjobRels.ExchangeName),
qm.Load(postgres.DatahistoryjobRels.JobDatahistoryjobresults),
qm.Where("status = ?", 0))
results, err := query.All(context.Background(), db.sql)
if err != nil {
return jobs, err
}
for i := range results {
var jobResults []*datahistoryjobresult.DataHistoryJobResult
for j := range results[i].R.JobDatahistoryjobresults {
jobResults = append(jobResults, &datahistoryjobresult.DataHistoryJobResult{
ID: results[i].R.JobDatahistoryjobresults[j].ID,
JobID: results[i].R.JobDatahistoryjobresults[j].JobID,
IntervalStartDate: results[i].R.JobDatahistoryjobresults[j].IntervalStartTime,
IntervalEndDate: results[i].R.JobDatahistoryjobresults[j].IntervalEndTime,
Status: int64(results[i].R.JobDatahistoryjobresults[j].Status),
Result: results[i].R.JobDatahistoryjobresults[j].Result.String,
Date: results[i].R.JobDatahistoryjobresults[j].RunTime,
})
}
jobs = append(jobs, DataHistoryJob{
ID: results[i].ID,
Nickname: results[i].Nickname,
ExchangeID: results[i].ExchangeNameID,
ExchangeName: results[i].R.ExchangeName.Name,
Asset: results[i].Asset,
Base: results[i].Base,
Quote: results[i].Quote,
StartDate: results[i].StartTime,
EndDate: results[i].EndTime,
Interval: int64(results[i].Interval),
BatchSize: int64(results[i].BatchCount),
RequestSizeLimit: int64(results[i].RequestSize),
DataType: int64(results[i].DataType),
MaxRetryAttempts: int64(results[i].MaxRetries),
Status: int64(results[i].Status),
CreatedDate: results[i].Created,
Results: jobResults,
})
}
return jobs, nil
}

View File

@@ -0,0 +1,212 @@
package datahistoryjob
import (
"fmt"
"io/ioutil"
"log"
"os"
"strings"
"testing"
"time"
"github.com/gofrs/uuid"
"github.com/thrasher-corp/gocryptotrader/currency"
"github.com/thrasher-corp/gocryptotrader/database"
"github.com/thrasher-corp/gocryptotrader/database/drivers"
"github.com/thrasher-corp/gocryptotrader/database/repository/exchange"
"github.com/thrasher-corp/gocryptotrader/database/testhelpers"
"github.com/thrasher-corp/gocryptotrader/exchanges/asset"
)
var (
verbose = false
testExchanges = []exchange.Details{
{
Name: "one",
},
{
Name: "two",
},
}
)
func TestMain(m *testing.M) {
if verbose {
testhelpers.EnableVerboseTestOutput()
}
var err error
testhelpers.PostgresTestDatabase = testhelpers.GetConnectionDetails()
testhelpers.TempDir, err = ioutil.TempDir("", "gct-temp")
if err != nil {
log.Fatal(err)
}
t := m.Run()
err = os.RemoveAll(testhelpers.TempDir)
if err != nil {
fmt.Printf("Failed to remove temp db file: %v", err)
}
os.Exit(t)
}
func seedDB() error {
err := exchange.InsertMany(testExchanges)
if err != nil {
return err
}
for i := range testExchanges {
lol, err := exchange.One(testExchanges[i].Name)
if err != nil {
return err
}
testExchanges[i].UUID = lol.UUID
}
return nil
}
func TestDataHistoryJob(t *testing.T) {
testCases := []struct {
name string
config *database.Config
seedDB func() error
runner func(t *testing.T)
closer func(dbConn *database.Instance) error
}{
{
name: "postgresql",
config: testhelpers.PostgresTestDatabase,
seedDB: seedDB,
},
{
name: "SQLite",
config: &database.Config{
Driver: database.DBSQLite3,
ConnectionDetails: drivers.ConnectionDetails{Database: "./testdb"},
},
seedDB: seedDB,
},
}
for x := range testCases {
test := testCases[x]
t.Run(test.name, func(t *testing.T) {
if !testhelpers.CheckValidConfig(&test.config.ConnectionDetails) {
t.Skip("database not configured skipping test")
}
dbConn, err := testhelpers.ConnectToDatabase(test.config)
if err != nil {
t.Fatal(err)
}
if test.seedDB != nil {
err = test.seedDB()
if err != nil {
t.Error(err)
}
}
db, err := Setup(dbConn)
if err != nil {
log.Fatal(err)
}
var jerberinos, jerberoos []*DataHistoryJob
for i := 0; i < 20; i++ {
uu, _ := uuid.NewV4()
jerberinos = append(jerberinos, &DataHistoryJob{
ID: uu.String(),
Nickname: fmt.Sprintf("TestDataHistoryJob%v", i),
ExchangeID: testExchanges[0].UUID.String(),
ExchangeName: testExchanges[0].Name,
Asset: asset.Spot.String(),
Base: currency.BTC.String(),
Quote: currency.USD.String(),
StartDate: time.Now().Add(time.Duration(i) * time.Second),
EndDate: time.Now().Add(time.Minute * time.Duration(i)),
Interval: int64(i),
})
}
err = db.Upsert(jerberinos...)
if err != nil {
t.Fatal(err)
}
// insert the same jerbs to test conflict resolution
for i := 0; i < 20; i++ {
uu, _ := uuid.NewV4()
j := &DataHistoryJob{
ID: uu.String(),
Nickname: fmt.Sprintf("TestDataHistoryJob%v", i),
ExchangeID: testExchanges[0].UUID.String(),
ExchangeName: testExchanges[0].Name,
Asset: asset.Spot.String(),
Base: currency.BTC.String(),
Quote: currency.USD.String(),
StartDate: time.Now().Add(time.Duration(i) * time.Second),
EndDate: time.Now().Add(time.Minute * time.Duration(i)),
Interval: int64(i),
}
if i == 19 {
j.Status = 1
}
jerberoos = append(jerberoos, j)
}
err = db.Upsert(jerberoos...)
if err != nil {
t.Fatal(err)
}
_, err = db.GetJobsBetween(time.Now(), time.Now().Add(time.Hour))
if err != nil {
t.Fatal(err)
}
resp, err := db.GetByNickName("TestDataHistoryJob19")
if err != nil {
t.Fatal(err)
}
if !strings.EqualFold(resp.Nickname, "TestDataHistoryJob19") {
t.Fatal("the database no longer functions")
}
results, err := db.GetAllIncompleteJobsAndResults()
if err != nil {
t.Error(err)
}
if len(results) != 19 {
t.Errorf("expected 19, received %v", len(results))
}
jerb, err := db.getJobAndAllResultsPostgres(jerberoos[0].Nickname)
if err != nil {
t.Fatal(err)
}
if !strings.EqualFold(jerb.Nickname, jerberoos[0].Nickname) {
t.Errorf("expected %v, received %v", jerb.Nickname, jerberoos[0].Nickname)
}
results, err = db.GetJobsBetween(time.Now().Add(-time.Hour), time.Now())
if err != nil {
t.Error(err)
}
if len(results) != 20 {
t.Errorf("expected 20, received %v", len(results))
}
jerb, err = db.GetJobAndAllResults(jerberoos[0].Nickname)
if err != nil {
t.Error(err)
}
if !strings.EqualFold(jerb.Nickname, jerberoos[0].Nickname) {
t.Errorf("expected %v, received %v", jerb.Nickname, jerberoos[0].Nickname)
}
err = testhelpers.CloseDatabase(dbConn)
if err != nil {
t.Error(err)
}
})
}
}

View File

@@ -0,0 +1,47 @@
package datahistoryjob
import (
"time"
"github.com/thrasher-corp/gocryptotrader/database"
"github.com/thrasher-corp/gocryptotrader/database/repository/datahistoryjobresult"
)
// DataHistoryJob is a DTO for database data
type DataHistoryJob struct {
ID string
Nickname string
ExchangeID string
ExchangeName string
Asset string
Base string
Quote string
StartDate time.Time
EndDate time.Time
Interval int64
RequestSizeLimit int64
DataType int64
MaxRetryAttempts int64
BatchSize int64
Status int64
CreatedDate time.Time
Results []*datahistoryjobresult.DataHistoryJobResult
}
// DBService is a service which allows the interaction with
// the database without a direct reference to a global
type DBService struct {
sql database.ISQL
driver string
}
// IDBService allows using data history job database service
// without needing to care about implementation
type IDBService interface {
Upsert(jobs ...*DataHistoryJob) error
GetByNickName(nickname string) (*DataHistoryJob, error)
GetByID(id string) (*DataHistoryJob, error)
GetJobsBetween(startDate, endDate time.Time) ([]DataHistoryJob, error)
GetAllIncompleteJobsAndResults() ([]DataHistoryJob, error)
GetJobAndAllResults(nickname string) (*DataHistoryJob, error)
}

View File

@@ -0,0 +1,280 @@
package datahistoryjobresult
import (
"context"
"database/sql"
"fmt"
"time"
"github.com/gofrs/uuid"
"github.com/thrasher-corp/gocryptotrader/database"
"github.com/thrasher-corp/gocryptotrader/database/models/postgres"
"github.com/thrasher-corp/gocryptotrader/database/models/sqlite3"
"github.com/thrasher-corp/gocryptotrader/log"
"github.com/thrasher-corp/sqlboiler/boil"
"github.com/thrasher-corp/sqlboiler/queries/qm"
"github.com/volatiletech/null"
)
// Setup returns a DBService
func Setup(db database.IDatabase) (*DBService, error) {
if db == nil {
return nil, nil
}
if !db.IsConnected() {
return nil, nil
}
cfg := db.GetConfig()
dbCon, err := db.GetSQL()
if err != nil {
return nil, err
}
return &DBService{
sql: dbCon,
driver: cfg.Driver,
}, nil
}
// Upsert inserts or updates jobs into the database
func (db *DBService) Upsert(jobs ...*DataHistoryJobResult) error {
if len(jobs) == 0 {
return nil
}
ctx := context.Background()
tx, err := db.sql.BeginTx(ctx, nil)
if err != nil {
return fmt.Errorf("beginTx %w", err)
}
defer func() {
if err != nil {
errRB := tx.Rollback()
if errRB != nil {
log.Errorf(log.DatabaseMgr, "Insert tx.Rollback %v", errRB)
}
}
}()
switch db.driver {
case database.DBSQLite3, database.DBSQLite:
err = upsertSqlite(ctx, tx, jobs...)
case database.DBPostgreSQL:
err = upsertPostgres(ctx, tx, jobs...)
default:
return database.ErrNoDatabaseProvided
}
if err != nil {
return err
}
return tx.Commit()
}
// GetByJobID returns a job by its related JobID
func (db *DBService) GetByJobID(jobID string) ([]DataHistoryJobResult, error) {
var err error
var job []DataHistoryJobResult
switch db.driver {
case database.DBSQLite3, database.DBSQLite:
job, err = db.getByJobIDSQLite(jobID)
case database.DBPostgreSQL:
job, err = db.getByJobIDPostgres(jobID)
default:
return nil, database.ErrNoDatabaseProvided
}
if err != nil {
return nil, err
}
return job, nil
}
// GetJobResultsBetween will return all jobs between two dates
func (db *DBService) GetJobResultsBetween(jobID string, startDate, endDate time.Time) ([]DataHistoryJobResult, error) {
var err error
var jobs []DataHistoryJobResult
switch db.driver {
case database.DBSQLite3, database.DBSQLite:
jobs, err = db.getJobResultsBetweenSQLite(jobID, startDate, endDate)
case database.DBPostgreSQL:
jobs, err = db.getJobResultsBetweenPostgres(jobID, startDate, endDate)
default:
return nil, database.ErrNoDatabaseProvided
}
if err != nil {
return nil, err
}
return jobs, nil
}
func upsertSqlite(ctx context.Context, tx *sql.Tx, results ...*DataHistoryJobResult) error {
for i := range results {
if results[i].ID == "" {
freshUUID, err := uuid.NewV4()
if err != nil {
return err
}
results[i].ID = freshUUID.String()
}
var tempEvent = sqlite3.Datahistoryjobresult{
ID: results[i].ID,
JobID: results[i].JobID,
Result: null.NewString(results[i].Result, results[i].Result != ""),
Status: float64(results[i].Status),
IntervalStartTime: results[i].IntervalStartDate.UTC().Format(time.RFC3339),
IntervalEndTime: results[i].IntervalEndDate.UTC().Format(time.RFC3339),
RunTime: results[i].Date.UTC().Format(time.RFC3339),
}
err := tempEvent.Insert(ctx, tx, boil.Infer())
if err != nil {
return err
}
}
return nil
}
func upsertPostgres(ctx context.Context, tx *sql.Tx, results ...*DataHistoryJobResult) error {
var err error
for i := range results {
if results[i].ID == "" {
var freshUUID uuid.UUID
freshUUID, err = uuid.NewV4()
if err != nil {
return err
}
results[i].ID = freshUUID.String()
}
var tempEvent = postgres.Datahistoryjobresult{
ID: results[i].ID,
JobID: results[i].JobID,
Result: null.NewString(results[i].Result, results[i].Result != ""),
Status: float64(results[i].Status),
IntervalStartTime: results[i].IntervalStartDate.UTC(),
IntervalEndTime: results[i].IntervalEndDate.UTC(),
RunTime: results[i].Date.UTC(),
}
err = tempEvent.Upsert(ctx, tx, true, nil, boil.Infer(), boil.Infer())
if err != nil {
return err
}
}
return nil
}
func (db *DBService) getByJobIDSQLite(jobID string) ([]DataHistoryJobResult, error) {
query := sqlite3.Datahistoryjobresults(qm.Where("job_id = ?", jobID))
results, err := query.All(context.Background(), db.sql)
if err != nil {
return nil, err
}
var resp []DataHistoryJobResult
for i := range results {
var start, end, run time.Time
start, err = time.Parse(time.RFC3339, results[i].IntervalStartTime)
if err != nil {
return nil, err
}
end, err = time.Parse(time.RFC3339, results[i].IntervalEndTime)
if err != nil {
return nil, err
}
run, err = time.Parse(time.RFC3339, results[i].RunTime)
if err != nil {
return nil, err
}
resp = append(resp, DataHistoryJobResult{
ID: results[i].ID,
JobID: results[i].JobID,
IntervalStartDate: start,
IntervalEndDate: end,
Status: int64(results[i].Status),
Result: results[i].Result.String,
Date: run,
})
}
return resp, nil
}
func (db *DBService) getByJobIDPostgres(jobID string) ([]DataHistoryJobResult, error) {
query := postgres.Datahistoryjobresults(qm.Where("job_id = ?", jobID))
results, err := query.All(context.Background(), db.sql)
if err != nil {
return nil, err
}
var resp []DataHistoryJobResult
for i := range results {
resp = append(resp, DataHistoryJobResult{
ID: results[i].ID,
JobID: results[i].JobID,
IntervalStartDate: results[i].IntervalStartTime,
IntervalEndDate: results[i].IntervalEndTime,
Status: int64(results[i].Status),
Result: results[i].Result.String,
Date: results[i].RunTime,
})
}
return resp, nil
}
func (db *DBService) getJobResultsBetweenSQLite(jobID string, startDate, endDate time.Time) ([]DataHistoryJobResult, error) {
var results []DataHistoryJobResult
query := sqlite3.Datahistoryjobresults(qm.Where("job_id = ? AND run_time BETWEEN ? AND ? ", jobID, startDate.UTC().Format(time.RFC3339), endDate.UTC().Format(time.RFC3339)))
resp, err := query.All(context.Background(), db.sql)
if err != nil {
return results, err
}
for i := range resp {
var start, end, run time.Time
start, err = time.Parse(time.RFC3339, resp[i].IntervalStartTime)
if err != nil {
return nil, err
}
end, err = time.Parse(time.RFC3339, resp[i].IntervalEndTime)
if err != nil {
return nil, err
}
run, err = time.Parse(time.RFC3339, resp[i].RunTime)
if err != nil {
return nil, err
}
results = append(results, DataHistoryJobResult{
ID: resp[i].ID,
JobID: resp[i].JobID,
IntervalStartDate: start,
IntervalEndDate: end,
Status: int64(resp[i].Status),
Result: resp[i].Result.String,
Date: run,
})
}
return results, nil
}
func (db *DBService) getJobResultsBetweenPostgres(jobID string, startDate, endDate time.Time) ([]DataHistoryJobResult, error) {
var jobs []DataHistoryJobResult
query := postgres.Datahistoryjobresults(qm.Where("job_id = ? AND run_time BETWEEN ? AND ? ", jobID, startDate, endDate))
results, err := query.All(context.Background(), db.sql)
if err != nil {
return jobs, err
}
for i := range results {
jobs = append(jobs, DataHistoryJobResult{
ID: results[i].ID,
JobID: results[i].JobID,
IntervalStartDate: results[i].IntervalStartTime,
IntervalEndDate: results[i].IntervalEndTime,
Status: int64(results[i].Status),
Result: results[i].Result.String,
Date: results[i].RunTime,
})
}
return jobs, nil
}

View File

@@ -0,0 +1,200 @@
package datahistoryjobresult
import (
"database/sql"
"fmt"
"io/ioutil"
"log"
"os"
"testing"
"time"
"github.com/gofrs/uuid"
"github.com/thrasher-corp/gocryptotrader/database"
"github.com/thrasher-corp/gocryptotrader/database/drivers"
"github.com/thrasher-corp/gocryptotrader/database/repository/exchange"
"github.com/thrasher-corp/gocryptotrader/database/testhelpers"
)
var (
verbose = false
testExchanges = []exchange.Details{
{
Name: "one",
},
{
Name: "two",
},
}
)
func TestMain(m *testing.M) {
if verbose {
testhelpers.EnableVerboseTestOutput()
}
var err error
testhelpers.PostgresTestDatabase = testhelpers.GetConnectionDetails()
testhelpers.TempDir, err = ioutil.TempDir("", "gct-temp")
if err != nil {
log.Fatal(err)
}
t := m.Run()
err = os.RemoveAll(testhelpers.TempDir)
if err != nil {
fmt.Printf("Failed to remove temp db file: %v", err)
}
os.Exit(t)
}
func seedDB() error {
err := exchange.InsertMany(testExchanges)
if err != nil {
return err
}
for i := range testExchanges {
lol, err := exchange.One(testExchanges[i].Name)
if err != nil {
return err
}
testExchanges[i].UUID = lol.UUID
}
return nil
}
func TestDataHistoryJob(t *testing.T) {
testCases := []struct {
name string
config *database.Config
seedDB func() error
runner func(t *testing.T)
closer func(dbConn *database.Instance) error
}{
{
name: "postgresql",
config: testhelpers.PostgresTestDatabase,
seedDB: seedDB,
},
{
name: "SQLite",
config: &database.Config{
Driver: database.DBSQLite3,
ConnectionDetails: drivers.ConnectionDetails{Database: "./testdb"},
},
seedDB: seedDB,
},
}
for x := range testCases {
test := testCases[x]
t.Run(test.name, func(t *testing.T) {
if !testhelpers.CheckValidConfig(&test.config.ConnectionDetails) {
t.Skip("database not configured skipping test")
}
dbConn, err := testhelpers.ConnectToDatabase(test.config)
if err != nil {
t.Fatal(err)
}
if test.seedDB != nil {
err = test.seedDB()
if err != nil {
t.Error(err)
}
}
db, err := Setup(dbConn)
if err != nil {
t.Fatal(err)
}
// postgres requires job for tests to function
var id string
if test.name == "postgresql" {
var selectID *sql.Rows
selectID, err = db.sql.Query("select id from datahistoryjob where nickname = 'testdatahistoryjob1'")
if err != nil {
t.Fatal(err)
}
defer func() {
err = selectID.Close()
if err != nil {
t.Fatal(err)
}
if selectID.Err() != nil {
t.Fatal(selectID.Err())
}
}()
selectID.Next()
err = selectID.Scan(&id)
if err != nil {
t.Error(err)
}
}
var resulterinos, resultaroos []*DataHistoryJobResult
for i := 0; i < 20; i++ {
uu, _ := uuid.NewV4()
resulterinos = append(resulterinos, &DataHistoryJobResult{
ID: uu.String(),
JobID: id,
IntervalStartDate: time.Now(),
IntervalEndDate: time.Now().Add(time.Second),
Status: 0,
Result: "Yay",
Date: time.Now(),
})
}
err = db.Upsert(resulterinos...)
if err != nil {
t.Fatal(err)
}
// insert the same results to test conflict resolution
for i := 0; i < 20; i++ {
uu, _ := uuid.NewV4()
j := &DataHistoryJobResult{
ID: uu.String(),
JobID: id,
IntervalStartDate: time.Now(),
IntervalEndDate: time.Now().Add(time.Second),
Status: 0,
Result: "Wow",
Date: time.Now(),
}
if i == 19 {
j.Status = 1
j.Date = time.Now().Add(time.Hour * 24)
}
resultaroos = append(resultaroos, j)
}
err = db.Upsert(resultaroos...)
if err != nil {
t.Fatal(err)
}
results, err := db.GetByJobID(id)
if err != nil {
t.Fatal(err)
}
if len(results) == 0 {
t.Error("expected job results")
}
results, err = db.GetJobResultsBetween(id, time.Now().Add(time.Hour*23), time.Now().Add(time.Hour*25))
if err != nil {
t.Fatal(err)
}
if len(results) == 0 {
t.Errorf("expected job result, received %v", len(results))
}
err = testhelpers.CloseDatabase(dbConn)
if err != nil {
t.Error(err)
}
})
}
}

View File

@@ -0,0 +1,33 @@
package datahistoryjobresult
import (
"time"
"github.com/thrasher-corp/gocryptotrader/database"
)
// DataHistoryJobResult is a DTO for database data
type DataHistoryJobResult struct {
ID string
JobID string
IntervalStartDate time.Time
IntervalEndDate time.Time
Status int64
Result string
Date time.Time
}
// DBService is a service which allows the interaction with
// the database without a direct reference to a global
type DBService struct {
sql database.ISQL
driver string
}
// IDBService allows using data history job result database service
// without needing to care about implementation
type IDBService interface {
Upsert(jobs ...*DataHistoryJobResult) error
GetByJobID(jobID string) ([]DataHistoryJobResult, error)
GetJobResultsBetween(jobID string, startDate, endDate time.Time) ([]DataHistoryJobResult, error)
}

View File

@@ -8,7 +8,7 @@ import (
)
var (
exchangeCache = cache.New(10)
exchangeCache = cache.New(30)
// ErrNoExchangeFound is a basic predefined error
ErrNoExchangeFound = errors.New("exchange not found")
)

View File

@@ -32,9 +32,10 @@ func TestGetSQLDialect(t *testing.T) {
test := testCases[x]
t.Run(test.driver, func(t *testing.T) {
err := database.DB.SetConfig(&database.Config{
cfg := &database.Config{
Driver: test.driver,
})
}
err := database.DB.SetConfig(cfg)
if err != nil {
t.Error(err)
}

View File

@@ -10,10 +10,11 @@ import (
"github.com/gofrs/uuid"
"github.com/thrasher-corp/gocryptotrader/database"
modelPSQL "github.com/thrasher-corp/gocryptotrader/database/models/postgres"
modelSQLite "github.com/thrasher-corp/gocryptotrader/database/models/sqlite3"
"github.com/thrasher-corp/gocryptotrader/database/models/postgres"
"github.com/thrasher-corp/gocryptotrader/database/models/sqlite3"
"github.com/thrasher-corp/gocryptotrader/database/repository"
"github.com/thrasher-corp/gocryptotrader/database/repository/exchange"
"github.com/thrasher-corp/gocryptotrader/exchanges/kline"
"github.com/thrasher-corp/gocryptotrader/log"
"github.com/thrasher-corp/sqlboiler/boil"
"github.com/thrasher-corp/sqlboiler/queries/qm"
@@ -61,6 +62,89 @@ func Insert(trades ...Data) error {
return tx.Commit()
}
// VerifyTradeInIntervals will query for ONE trade within each kline interval and verify if data exists
// if it does, it will set the range holder property "HasData" to true
func VerifyTradeInIntervals(exchangeName, assetType, base, quote string, irh *kline.IntervalRangeHolder) error {
ctx := context.Background()
ctx = boil.SkipTimestamps(ctx)
tx, err := database.DB.SQL.BeginTx(ctx, nil)
if err != nil {
return fmt.Errorf("beginTx %w", err)
}
defer func() {
if err != nil {
errRB := tx.Rollback()
if errRB != nil {
log.Errorf(log.DatabaseMgr, "Insert tx.Rollback %v", errRB)
}
}
}()
if repository.GetSQLDialect() == database.DBSQLite3 || repository.GetSQLDialect() == database.DBSQLite {
err = verifyTradeInIntervalsSqlite(ctx, tx, exchangeName, assetType, base, quote, irh)
} else {
err = verifyTradeInIntervalsPostgres(ctx, tx, exchangeName, assetType, base, quote, irh)
}
if err != nil {
return err
}
return tx.Commit()
}
func verifyTradeInIntervalsSqlite(ctx context.Context, tx *sql.Tx, exchangeName, assetType, base, quote string, irh *kline.IntervalRangeHolder) error {
exch, err := sqlite3.Exchanges(qm.Where("name = ?", exchangeName)).One(ctx, tx)
if err != nil {
return err
}
for i := range irh.Ranges {
for j := range irh.Ranges[i].Intervals {
result, err := sqlite3.Trades(qm.Where("exchange_name_id = ? AND asset = ? AND base = ? AND quote = ? AND timestamp between ? AND ?",
exch.ID,
assetType,
base,
quote,
irh.Ranges[i].Intervals[j].Start.Time.UTC().Format(time.RFC3339),
irh.Ranges[i].Intervals[j].End.Time.UTC().Format(time.RFC3339))).One(ctx, tx)
if err != nil {
return err
}
if result != nil {
irh.Ranges[i].Intervals[j].HasData = true
}
}
}
return nil
}
func verifyTradeInIntervalsPostgres(ctx context.Context, tx *sql.Tx, exchangeName, assetType, base, quote string, irh *kline.IntervalRangeHolder) error {
exch, err := postgres.Exchanges(qm.Where("name = ?", exchangeName)).One(ctx, tx)
if err != nil {
return err
}
for i := range irh.Ranges {
for j := range irh.Ranges[i].Intervals {
result, err := postgres.Trades(qm.Where("exchange_name_id = ? AND asset = ? AND base = ? AND quote = ? timestamp between ? AND ?",
exch.ID,
assetType,
base,
quote,
irh.Ranges[i].Intervals[j].Start.Time.UTC().Format(time.RFC3339),
irh.Ranges[i].Intervals[j].End.Time.UTC().Format(time.RFC3339))).One(ctx, tx)
if err != nil {
return err
}
if result != nil {
irh.Ranges[i].Intervals[j].HasData = true
}
}
}
return nil
}
func insertSQLite(ctx context.Context, tx *sql.Tx, trades ...Data) error {
for i := range trades {
if trades[i].ID == "" {
@@ -70,7 +154,7 @@ func insertSQLite(ctx context.Context, tx *sql.Tx, trades ...Data) error {
}
trades[i].ID = freshUUID.String()
}
var tempEvent = modelSQLite.Trade{
var tempEvent = sqlite3.Trade{
ID: trades[i].ID,
ExchangeNameID: trades[i].ExchangeNameID,
Base: strings.ToUpper(trades[i].Base),
@@ -106,7 +190,7 @@ func insertPostgres(ctx context.Context, tx *sql.Tx, trades ...Data) error {
}
trades[i].ID = freshUUID.String()
}
var tempEvent = modelPSQL.Trade{
var tempEvent = postgres.Trade{
ExchangeNameID: trades[i].ExchangeNameID,
Base: strings.ToUpper(trades[i].Base),
Quote: strings.ToUpper(trades[i].Quote),
@@ -152,7 +236,7 @@ func GetByUUID(uuid string) (td Data, err error) {
func getByUUIDSQLite(uuid string) (Data, error) {
var td Data
var ts time.Time
query := modelSQLite.Trades(qm.Where("id = ?", uuid))
query := sqlite3.Trades(qm.Where("id = ?", uuid))
result, err := query.One(context.Background(), database.DB.SQL)
if err != nil {
return td, err
@@ -179,8 +263,8 @@ func getByUUIDSQLite(uuid string) (Data, error) {
}
func getByUUIDPostgres(uuid string) (td Data, err error) {
query := modelPSQL.Trades(qm.Where("id = ?", uuid))
var result *modelPSQL.Trade
query := postgres.Trades(qm.Where("id = ?", uuid))
var result *postgres.Trade
result, err = query.One(context.Background(), database.DB.SQL)
if err != nil {
return td, err
@@ -232,8 +316,8 @@ func getInRangeSQLite(exchangeName, assetType, base, quote string, startDate, en
"quote": strings.ToUpper(quote),
}
q := generateQuery(wheres, startDate, endDate)
query := modelSQLite.Trades(q...)
var result []*modelSQLite.Trade
query := sqlite3.Trades(q...)
var result []*sqlite3.Trade
result, err = query.All(context.Background(), database.DB.SQL)
if err != nil {
return td, err
@@ -274,8 +358,8 @@ func getInRangePostgres(exchangeName, assetType, base, quote string, startDate,
"quote": strings.ToUpper(quote),
}
q := generateQuery(wheres, startDate, endDate)
query := modelPSQL.Trades(q...)
var result []*modelPSQL.Trade
query := postgres.Trades(q...)
var result []*postgres.Trade
result, err = query.All(context.Background(), database.DB.SQL)
if err != nil {
return td, err
@@ -333,7 +417,7 @@ func deleteTradesSQLite(ctx context.Context, tx *sql.Tx, trades ...Data) error {
for i := range trades {
tradeIDs = append(tradeIDs, trades[i].ID)
}
query := modelSQLite.Trades(qm.WhereIn(`id in ?`, tradeIDs...))
query := sqlite3.Trades(qm.WhereIn(`id in ?`, tradeIDs...))
_, err := query.DeleteAll(ctx, tx)
return err
}
@@ -343,7 +427,7 @@ func deleteTradesPostgres(ctx context.Context, tx *sql.Tx, trades ...Data) error
for i := range trades {
tradeIDs = append(tradeIDs, trades[i].ID)
}
query := modelPSQL.Trades(qm.WhereIn(`id in ?`, tradeIDs...))
query := postgres.Trades(qm.WhereIn(`id in ?`, tradeIDs...))
_, err := query.DeleteAll(ctx, tx)
return err
}

View File

@@ -15,6 +15,7 @@ import (
"github.com/thrasher-corp/gocryptotrader/database/repository/exchange"
"github.com/thrasher-corp/gocryptotrader/database/testhelpers"
"github.com/thrasher-corp/gocryptotrader/exchanges/asset"
"github.com/thrasher-corp/gocryptotrader/exchanges/kline"
"github.com/thrasher-corp/gocryptotrader/exchanges/order"
)
@@ -97,12 +98,12 @@ func TestTrades(t *testing.T) {
func tradeSQLTester(t *testing.T) {
var trades, trades2 []Data
firstTime := time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC)
for i := 0; i < 20; i++ {
uu, _ := uuid.NewV4()
trades = append(trades, Data{
ID: uu.String(),
Timestamp: time.Now(),
Timestamp: firstTime.Add(time.Minute * time.Duration(i)),
Exchange: testExchanges[0].Name,
Base: currency.BTC.String(),
Quote: currency.USD.String(),
@@ -122,7 +123,7 @@ func tradeSQLTester(t *testing.T) {
uu, _ := uuid.NewV4()
trades2 = append(trades2, Data{
ID: uu.String(),
Timestamp: time.Now(),
Timestamp: firstTime.Add(time.Minute * time.Duration(i)),
Exchange: testExchanges[0].Name,
Base: currency.BTC.String(),
Quote: currency.USD.String(),
@@ -142,8 +143,8 @@ func tradeSQLTester(t *testing.T) {
asset.Spot.String(),
currency.BTC.String(),
currency.USD.String(),
time.Now().Add(-time.Hour),
time.Now().Add(time.Hour),
firstTime.Add(-time.Hour),
firstTime.Add(time.Hour),
)
if err != nil {
t.Error(err)
@@ -157,8 +158,8 @@ func tradeSQLTester(t *testing.T) {
asset.Spot.String(),
currency.BTC.String(),
currency.USD.String(),
time.Now().Add(-time.Hour),
time.Now().Add(time.Hour))
firstTime.Add(-time.Hour),
firstTime.Add(time.Hour))
if err != nil {
t.Error(err)
}
@@ -166,6 +167,24 @@ func tradeSQLTester(t *testing.T) {
t.Error("Bad get!")
}
ranges, err := kline.CalculateCandleDateRanges(firstTime, firstTime.Add(20*time.Minute), kline.OneMin, 100)
if err != nil {
t.Error(err)
}
err = VerifyTradeInIntervals(testExchanges[0].Name,
asset.Spot.String(),
currency.BTC.String(),
currency.USD.String(),
ranges)
if err != nil {
t.Error(err)
}
if !ranges.HasDataAtDate(firstTime) {
t.Error("expected data")
}
err = DeleteTrades(trades...)
if err != nil {
t.Error(err)

View File

@@ -80,13 +80,13 @@ func ConnectToDatabase(conn *database.Config) (dbConn *database.Instance, err er
return nil, err
}
if conn.Driver == database.DBPostgreSQL {
dbConn, err = psqlConn.Connect()
dbConn, err = psqlConn.Connect(conn)
if err != nil {
return nil, err
}
} else if conn.Driver == database.DBSQLite3 || conn.Driver == database.DBSQLite {
database.DB.DataPath = TempDir
dbConn, err = sqliteConn.Connect()
dbConn, err = sqliteConn.Connect(conn.Database)
if err != nil {
return nil, err
}
@@ -96,7 +96,7 @@ func ConnectToDatabase(conn *database.Config) (dbConn *database.Instance, err er
if err != nil {
return nil, err
}
database.DB.SetConnected(true)
return
}

View File

@@ -1,7 +1,6 @@
package engine
import (
"errors"
"fmt"
"sync/atomic"
@@ -21,8 +20,6 @@ type CommunicationManager struct {
comms *communications.Communications
}
var errNilConfig = errors.New("received nil communications config")
// SetupCommunicationManager creates a communications manager
func SetupCommunicationManager(cfg *base.CommunicationsConfig) (*CommunicationManager, error) {
if cfg == nil {

View File

@@ -1,4 +1,4 @@
# GoCryptoTrader package Communication_manager
# GoCryptoTrader package Communication manager
<img src="/common/gctlogo.png?raw=true" width="350px" height="350px" hspace="70">
@@ -18,7 +18,7 @@ You can track ideas, planned features and what's in progress on this Trello boar
Join our slack to discuss all things related to GoCryptoTrader! [GoCryptoTrader Slack](https://join.slack.com/t/gocryptotrader/shared_invite/enQtNTQ5NDAxMjA2Mjc5LTc5ZDE1ZTNiOGM3ZGMyMmY1NTAxYWZhODE0MWM5N2JlZDk1NDU0YTViYzk4NTk3OTRiMDQzNGQ1YTc4YmRlMTk)
## Current Features for Communication_manager
## Current Features for Communication manager
+ The communication manager subsystem is used to push events raised in GoCryptoTrader to any enabled communication system such as a Slack server
+ In order to modify the behaviour of the communication manager subsystem, you can edit the following inside your config file under `communications`:

View File

@@ -1,4 +1,4 @@
# GoCryptoTrader package Connection_manager
# GoCryptoTrader package Connection manager
<img src="/common/gctlogo.png?raw=true" width="350px" height="350px" hspace="70">
@@ -18,7 +18,7 @@ You can track ideas, planned features and what's in progress on this Trello boar
Join our slack to discuss all things related to GoCryptoTrader! [GoCryptoTrader Slack](https://join.slack.com/t/gocryptotrader/shared_invite/enQtNTQ5NDAxMjA2Mjc5LTc5ZDE1ZTNiOGM3ZGMyMmY1NTAxYWZhODE0MWM5N2JlZDk1NDU0YTViYzk4NTk3OTRiMDQzNGQ1YTc4YmRlMTk)
## Current Features for Connection_manager
## Current Features for Connection manager
+ The connection manager subsystem is used to periodically check whether the application is connected to the internet and will provide alerts of any changes
+ In order to modify the behaviour of the connection manager subsystem, you can edit the following inside your config file under `connectionMonitor`:

View File

@@ -16,9 +16,7 @@ import (
// DatabaseConnectionManagerName is an exported subsystem name
const DatabaseConnectionManagerName = "database"
var (
errDatabaseDisabled = errors.New("database support disabled")
)
var errDatabaseDisabled = errors.New("database support disabled")
// DatabaseConnectionManager holds the database connection and its status
type DatabaseConnectionManager struct {
@@ -43,6 +41,15 @@ func (m *DatabaseConnectionManager) IsRunning() bool {
return atomic.LoadInt32(&m.started) == 1
}
// GetInstance returns a limited scoped database instance
func (m *DatabaseConnectionManager) GetInstance() database.IDatabase {
if m == nil || atomic.LoadInt32(&m.started) == 0 {
return nil
}
return m.dbConn
}
// SetupDatabaseConnectionManager creates a new database manager
func SetupDatabaseConnectionManager(cfg *database.Config) (*DatabaseConnectionManager, error) {
if cfg == nil {
@@ -67,6 +74,14 @@ func SetupDatabaseConnectionManager(cfg *database.Config) (*DatabaseConnectionMa
return m, nil
}
// IsConnected is an exported check to verify if the database is connected
func (m *DatabaseConnectionManager) IsConnected() bool {
if m == nil || atomic.LoadInt32(&m.started) == 0 {
return false
}
return m.dbConn.IsConnected()
}
// Start sets up the database connection manager to maintain a SQL connection
func (m *DatabaseConnectionManager) Start(wg *sync.WaitGroup) (err error) {
if m == nil {
@@ -92,14 +107,14 @@ func (m *DatabaseConnectionManager) Start(wg *sync.WaitGroup) (err error) {
m.host,
m.database,
m.driver)
m.dbConn, err = dbpsql.Connect()
m.dbConn, err = dbpsql.Connect(m.dbConn.GetConfig())
case database.DBSQLite,
database.DBSQLite3:
log.Debugf(log.DatabaseMgr,
"Attempting to establish database connection to %s utilising %s driver\n",
m.database,
m.driver)
m.dbConn, err = dbsqlite3.Connect()
m.dbConn, err = dbsqlite3.Connect(m.database)
default:
return database.ErrNoDatabaseProvided
}

View File

@@ -1,4 +1,4 @@
# GoCryptoTrader package Database_connection
# GoCryptoTrader package Database connection
<img src="/common/gctlogo.png?raw=true" width="350px" height="350px" hspace="70">
@@ -18,7 +18,7 @@ You can track ideas, planned features and what's in progress on this Trello boar
Join our slack to discuss all things related to GoCryptoTrader! [GoCryptoTrader Slack](https://join.slack.com/t/gocryptotrader/shared_invite/enQtNTQ5NDAxMjA2Mjc5LTc5ZDE1ZTNiOGM3ZGMyMmY1NTAxYWZhODE0MWM5N2JlZDk1NDU0YTViYzk4NTk3OTRiMDQzNGQ1YTc4YmRlMTk)
## Current Features for Database_connection
## Current Features for Database connection
+ The database connection manager subsystem is used to periodically check whether the application is connected to the database and will provide alerts of any changes
+ In order to modify the behaviour of the database connection manager subsystem, you can edit the following inside your config file under `database`:

View File

@@ -23,7 +23,7 @@ func CreateDatabase(t *testing.T) string {
return tmpDir
}
func Cleanup(t *testing.T, tmpDir string) {
func Cleanup(tmpDir string) {
if database.DB.IsConnected() {
err := database.DB.CloseConnection()
if err != nil {
@@ -53,7 +53,7 @@ func TestSetupDatabaseConnectionManager(t *testing.T) {
func TestStartSQLite(t *testing.T) {
tmpDir := CreateDatabase(t)
defer Cleanup(t, tmpDir)
defer Cleanup(tmpDir)
m, err := SetupDatabaseConnectionManager(&database.Config{})
if !errors.Is(err, nil) {
t.Errorf("error '%v', expected '%v'", err, nil)
@@ -92,7 +92,7 @@ func TestStartSQLite(t *testing.T) {
// This test does not care for a successful connection
func TestStartPostgres(t *testing.T) {
tmpDir := CreateDatabase(t)
defer Cleanup(t, tmpDir)
defer Cleanup(tmpDir)
m, err := SetupDatabaseConnectionManager(&database.Config{})
if !errors.Is(err, nil) {
t.Errorf("error '%v', expected '%v'", err, nil)
@@ -116,7 +116,7 @@ func TestStartPostgres(t *testing.T) {
func TestDatabaseConnectionManagerIsRunning(t *testing.T) {
tmpDir := CreateDatabase(t)
defer Cleanup(t, tmpDir)
defer Cleanup(tmpDir)
m, err := SetupDatabaseConnectionManager(&database.Config{
Enabled: true,
Driver: database.DBSQLite,
@@ -147,7 +147,7 @@ func TestDatabaseConnectionManagerIsRunning(t *testing.T) {
func TestDatabaseConnectionManagerStop(t *testing.T) {
tmpDir := CreateDatabase(t)
defer Cleanup(t, tmpDir)
defer Cleanup(tmpDir)
m, err := SetupDatabaseConnectionManager(&database.Config{
Enabled: true,
Driver: database.DBSQLite,
@@ -184,7 +184,7 @@ func TestDatabaseConnectionManagerStop(t *testing.T) {
func TestCheckConnection(t *testing.T) {
tmpDir := CreateDatabase(t)
defer Cleanup(t, tmpDir)
defer Cleanup(tmpDir)
var m *DatabaseConnectionManager
err := m.checkConnection()
if !errors.Is(err, ErrNilSubsystem) {
@@ -235,7 +235,42 @@ func TestCheckConnection(t *testing.T) {
m.dbConn.SetConnected(false)
err = m.checkConnection()
if !errors.Is(err, database.ErrDatabaseNotConnected) {
t.Errorf("error '%v', expected '%v'", err, database.ErrDatabaseNotConnected)
}
}
func TestGetInstance(t *testing.T) {
tmpDir := CreateDatabase(t)
defer Cleanup(tmpDir)
m, err := SetupDatabaseConnectionManager(&database.Config{
Enabled: true,
Driver: database.DBSQLite,
ConnectionDetails: drivers.ConnectionDetails{
Host: "localhost",
Database: "test.db",
},
})
if !errors.Is(err, nil) {
t.Errorf("error '%v', expected '%v'", err, nil)
}
db := m.GetInstance()
if db != nil {
t.Error("expected nil")
}
var wg sync.WaitGroup
err = m.Start(&wg)
if !errors.Is(err, nil) {
t.Errorf("error '%v', expected '%v'", err, nil)
}
db = m.GetInstance()
if db == nil {
t.Error("expected not nil")
}
m = nil
db = m.GetInstance()
if db != nil {
t.Error("expected nil")
}
}

View File

@@ -0,0 +1,930 @@
package engine
import (
"database/sql"
"errors"
"fmt"
"strings"
"sync/atomic"
"time"
"github.com/gofrs/uuid"
"github.com/thrasher-corp/gocryptotrader/common"
"github.com/thrasher-corp/gocryptotrader/config"
"github.com/thrasher-corp/gocryptotrader/currency"
"github.com/thrasher-corp/gocryptotrader/database/repository/candle"
"github.com/thrasher-corp/gocryptotrader/database/repository/datahistoryjob"
"github.com/thrasher-corp/gocryptotrader/database/repository/datahistoryjobresult"
"github.com/thrasher-corp/gocryptotrader/exchanges/asset"
"github.com/thrasher-corp/gocryptotrader/exchanges/kline"
"github.com/thrasher-corp/gocryptotrader/exchanges/trade"
"github.com/thrasher-corp/gocryptotrader/log"
)
// SetupDataHistoryManager creates a data history manager subsystem
func SetupDataHistoryManager(em iExchangeManager, dcm iDatabaseConnectionManager, cfg *config.DataHistoryManager) (*DataHistoryManager, error) {
if em == nil {
return nil, errNilExchangeManager
}
if dcm == nil {
return nil, errNilDatabaseConnectionManager
}
if cfg == nil {
return nil, errNilConfig
}
if cfg.CheckInterval <= 0 {
cfg.CheckInterval = defaultDataHistoryTicker
}
if cfg.MaxJobsPerCycle == 0 {
cfg.MaxJobsPerCycle = defaultDataHistoryMaxJobsPerCycle
}
db := dcm.GetInstance()
dhj, err := datahistoryjob.Setup(db)
if err != nil {
return nil, err
}
dhjr, err := datahistoryjobresult.Setup(db)
if err != nil {
return nil, err
}
return &DataHistoryManager{
exchangeManager: em,
databaseConnectionInstance: db,
shutdown: make(chan struct{}),
interval: time.NewTicker(cfg.CheckInterval),
jobDB: dhj,
jobResultDB: dhjr,
maxJobsPerCycle: cfg.MaxJobsPerCycle,
verbose: cfg.Verbose,
tradeLoader: trade.HasTradesInRanges,
candleLoader: kline.LoadFromDatabase,
}, nil
}
// Start runs the subsystem
func (m *DataHistoryManager) Start() error {
if m == nil {
return ErrNilSubsystem
}
if !atomic.CompareAndSwapInt32(&m.started, 0, 1) {
return ErrSubSystemAlreadyStarted
}
m.shutdown = make(chan struct{})
m.run()
log.Debugf(log.DataHistory, "Data history manager %v", MsgSubSystemStarted)
return nil
}
// IsRunning checks whether the subsystem is running
func (m *DataHistoryManager) IsRunning() bool {
if m == nil {
return false
}
return atomic.LoadInt32(&m.started) == 1
}
// Stop stops the subsystem
func (m *DataHistoryManager) Stop() error {
if m == nil {
return ErrNilSubsystem
}
if !atomic.CompareAndSwapInt32(&m.started, 1, 0) {
return ErrSubSystemNotStarted
}
close(m.shutdown)
log.Debugf(log.DataHistory, "Data history manager %v", MsgSubSystemShutdown)
return nil
}
// retrieveJobs will connect to the database and look for existing jobs
func (m *DataHistoryManager) retrieveJobs() ([]*DataHistoryJob, error) {
if m == nil {
return nil, ErrNilSubsystem
}
if atomic.LoadInt32(&m.started) == 0 {
return nil, ErrSubSystemNotStarted
}
dbJobs, err := m.jobDB.GetAllIncompleteJobsAndResults()
if err != nil {
return nil, err
}
var response []*DataHistoryJob
for i := range dbJobs {
dbJob, err := m.convertDBModelToJob(&dbJobs[i])
if err != nil {
return nil, err
}
err = m.validateJob(dbJob)
if err != nil {
log.Error(log.DataHistory, err)
continue
}
response = append(response, dbJob)
}
return response, nil
}
// PrepareJobs will validate the config jobs, verify their status with the database
// and return all valid jobs to be processed
// m.jobs will be overridden by this function
func (m *DataHistoryManager) PrepareJobs() ([]*DataHistoryJob, error) {
if m == nil {
return nil, ErrNilSubsystem
}
if atomic.LoadInt32(&m.started) == 0 {
return nil, ErrSubSystemNotStarted
}
m.m.Lock()
defer m.m.Unlock()
jobs, err := m.retrieveJobs()
if err != nil {
defer func() {
err = m.Stop()
if err != nil {
log.Error(log.DataHistory, err)
}
}()
return nil, fmt.Errorf("error retrieving jobs, has everything been setup? Data history manager will shut down. %w", err)
}
err = m.compareJobsToData(jobs...)
if err != nil {
return nil, err
}
return jobs, nil
}
func (m *DataHistoryManager) compareJobsToData(jobs ...*DataHistoryJob) error {
if m == nil {
return ErrNilSubsystem
}
if atomic.LoadInt32(&m.started) == 0 {
return ErrSubSystemNotStarted
}
var err error
for i := range jobs {
jobs[i].rangeHolder, err = kline.CalculateCandleDateRanges(jobs[i].StartDate, jobs[i].EndDate, jobs[i].Interval, uint32(jobs[i].RequestSizeLimit))
if err != nil {
return err
}
var candles kline.Item
switch jobs[i].DataType {
case dataHistoryCandleDataType:
candles, err = m.candleLoader(jobs[i].Exchange, jobs[i].Pair, jobs[i].Asset, jobs[i].Interval, jobs[i].StartDate, jobs[i].EndDate)
if err != nil && !errors.Is(err, candle.ErrNoCandleDataFound) {
return fmt.Errorf("%s could not load candle data: %w", jobs[i].Nickname, err)
}
jobs[i].rangeHolder.SetHasDataFromCandles(candles.Candles)
case dataHistoryTradeDataType:
err := m.tradeLoader(jobs[i].Exchange, jobs[i].Asset.String(), jobs[i].Pair.Base.String(), jobs[i].Pair.Quote.String(), jobs[i].rangeHolder)
if err != nil && !errors.Is(err, sql.ErrNoRows) {
return fmt.Errorf("%s could not load trade data: %w", jobs[i].Nickname, err)
}
default:
return fmt.Errorf("%s %w %s", jobs[i].Nickname, errUnknownDataType, jobs[i].DataType)
}
}
return nil
}
func (m *DataHistoryManager) run() {
go func() {
validJobs, err := m.PrepareJobs()
if err != nil {
log.Error(log.DataHistory, err)
}
m.m.Lock()
m.jobs = validJobs
m.m.Unlock()
for {
select {
case <-m.shutdown:
return
case <-m.interval.C:
if m.databaseConnectionInstance.IsConnected() {
go func() {
if err := m.runJobs(); err != nil {
log.Error(log.DataHistory, err)
}
}()
}
}
}
}()
}
func (m *DataHistoryManager) runJobs() error {
if m == nil {
return ErrNilSubsystem
}
if atomic.LoadInt32(&m.started) == 0 {
return ErrSubSystemNotStarted
}
if !atomic.CompareAndSwapInt32(&m.processing, 0, 1) {
return fmt.Errorf("runJobs %w", errAlreadyRunning)
}
defer atomic.StoreInt32(&m.processing, 0)
validJobs, err := m.PrepareJobs()
if err != nil {
return err
}
m.m.Lock()
defer func() {
m.m.Unlock()
}()
m.jobs = validJobs
log.Infof(log.DataHistory, "processing data history jobs")
for i := 0; (i < int(m.maxJobsPerCycle) || m.maxJobsPerCycle == -1) && i < len(m.jobs); i++ {
err := m.runJob(m.jobs[i])
if err != nil {
log.Error(log.DataHistory, err)
}
if m.verbose {
log.Debugf(log.DataHistory, "completed run of data history job %v", m.jobs[i].Nickname)
}
}
log.Infof(log.DataHistory, "completed run of data history jobs")
return nil
}
// runJob processes an active job, retrieves candle or trade data
// for a given date range and saves all results to the database
func (m *DataHistoryManager) runJob(job *DataHistoryJob) error {
if m == nil {
return ErrNilSubsystem
}
if atomic.LoadInt32(&m.started) == 0 {
return ErrSubSystemNotStarted
}
if job.Status != dataHistoryStatusActive {
return nil
}
var intervalsProcessed int64
if job.rangeHolder == nil || len(job.rangeHolder.Ranges) == 0 {
return fmt.Errorf("%s %w invalid start/end range %s-%s",
job.Nickname,
errJobInvalid,
job.StartDate.Format(common.SimpleTimeFormatWithTimezone),
job.EndDate.Format(common.SimpleTimeFormatWithTimezone),
)
}
exch := m.exchangeManager.GetExchangeByName(job.Exchange)
if exch == nil {
return fmt.Errorf("%s %w, cannot process job %s for %s %s",
job.Exchange,
errExchangeNotLoaded,
job.Nickname,
job.Asset,
job.Pair)
}
if m.verbose {
log.Debugf(log.DataHistory, "running data history job %v start: %s end: %s interval: %s datatype: %s",
job.Nickname,
job.StartDate,
job.EndDate,
job.Interval,
job.DataType)
}
ranges:
for i := range job.rangeHolder.Ranges {
isCompleted := true
for j := range job.rangeHolder.Ranges[i].Intervals {
if !job.rangeHolder.Ranges[i].Intervals[j].HasData {
isCompleted = false
break
}
}
if isCompleted ||
intervalsProcessed >= job.RunBatchLimit {
continue
}
var failures int64
hasDataInRange := false
resultLookup := job.Results[job.rangeHolder.Ranges[i].Start.Time]
for x := range resultLookup {
switch resultLookup[x].Status {
case dataHistoryIntervalMissingData:
continue ranges
case dataHistoryStatusFailed:
failures++
case dataHistoryStatusComplete:
// this can occur in the scenario where data is missing
// however no errors were encountered when data is missing
// eg an exchange only returns an empty slice
// or the exchange is simply missing the data and does not have an error
hasDataInRange = true
}
}
if failures >= job.MaxRetryAttempts {
// failure threshold reached, we should not attempt
// to check this interval again
for x := range resultLookup {
resultLookup[x].Status = dataHistoryIntervalMissingData
}
job.Results[job.rangeHolder.Ranges[i].Start.Time] = resultLookup
continue
}
if hasDataInRange {
continue
}
if m.verbose {
log.Debugf(log.DataHistory, "job %s processing range %v-%v", job.Nickname, job.rangeHolder.Ranges[i].Start, job.rangeHolder.Ranges[i].End)
}
intervalsProcessed++
id, err := uuid.NewV4()
if err != nil {
return err
}
result := DataHistoryJobResult{
ID: id,
JobID: job.ID,
IntervalStartDate: job.rangeHolder.Ranges[i].Start.Time,
IntervalEndDate: job.rangeHolder.Ranges[i].End.Time,
Status: dataHistoryStatusComplete,
Date: time.Now(),
}
// processing the job
switch job.DataType {
case dataHistoryCandleDataType:
candles, err := exch.GetHistoricCandlesExtended(job.Pair, job.Asset, job.rangeHolder.Ranges[i].Start.Time, job.rangeHolder.Ranges[i].End.Time, job.Interval)
if err != nil {
result.Result += "could not get candles: " + err.Error() + ". "
result.Status = dataHistoryStatusFailed
break
}
job.rangeHolder.SetHasDataFromCandles(candles.Candles)
for j := range job.rangeHolder.Ranges[i].Intervals {
if !job.rangeHolder.Ranges[i].Intervals[j].HasData {
result.Status = dataHistoryStatusFailed
result.Result += fmt.Sprintf("missing data from %v - %v. ",
job.rangeHolder.Ranges[i].Intervals[j].Start.Time.Format(common.SimpleTimeFormatWithTimezone),
job.rangeHolder.Ranges[i].Intervals[j].End.Time.Format(common.SimpleTimeFormatWithTimezone))
}
}
_, err = kline.StoreInDatabase(&candles, true)
if err != nil {
result.Result += "could not save results: " + err.Error() + ". "
result.Status = dataHistoryStatusFailed
}
case dataHistoryTradeDataType:
trades, err := exch.GetHistoricTrades(job.Pair, job.Asset, job.rangeHolder.Ranges[i].Start.Time, job.rangeHolder.Ranges[i].End.Time)
if err != nil {
result.Result += "could not get trades: " + err.Error() + ". "
result.Status = dataHistoryStatusFailed
break
}
candles, err := trade.ConvertTradesToCandles(job.Interval, trades...)
if err != nil {
result.Result += "could not convert candles to trades: " + err.Error() + ". "
result.Status = dataHistoryStatusFailed
break
}
job.rangeHolder.SetHasDataFromCandles(candles.Candles)
for j := range job.rangeHolder.Ranges[i].Intervals {
if !job.rangeHolder.Ranges[i].Intervals[j].HasData {
result.Status = dataHistoryStatusFailed
result.Result += fmt.Sprintf("missing data from %v - %v. ",
job.rangeHolder.Ranges[i].Intervals[j].Start.Time.Format(common.SimpleTimeFormatWithTimezone),
job.rangeHolder.Ranges[i].Intervals[j].End.Time.Format(common.SimpleTimeFormatWithTimezone))
}
}
err = trade.SaveTradesToDatabase(trades...)
if err != nil {
result.Result += "could not save results: " + err.Error() + ". "
result.Status = dataHistoryStatusFailed
}
default:
return errUnknownDataType
}
lookup := job.Results[result.IntervalStartDate]
lookup = append(lookup, result)
job.Results[result.IntervalStartDate] = lookup
}
completed := true
allResultsSuccessful := true
allResultsFailed := true
completionCheck:
for i := range job.rangeHolder.Ranges {
result, ok := job.Results[job.rangeHolder.Ranges[i].Start.Time]
if !ok {
completed = false
}
results:
for j := range result {
switch result[j].Status {
case dataHistoryIntervalMissingData:
allResultsSuccessful = false
break results
case dataHistoryStatusComplete:
allResultsFailed = false
break results
default:
completed = false
break completionCheck
}
}
}
if completed {
switch {
case allResultsSuccessful:
job.Status = dataHistoryStatusComplete
case allResultsFailed:
job.Status = dataHistoryStatusFailed
default:
job.Status = dataHistoryIntervalMissingData
}
log.Infof(log.DataHistory, "job %s finished! Status: %s", job.Nickname, job.Status)
}
dbJob := m.convertJobToDBModel(job)
err := m.jobDB.Upsert(dbJob)
if err != nil {
return fmt.Errorf("job %s failed to update database: %w", job.Nickname, err)
}
dbJobResults := m.convertJobResultToDBResult(job.Results)
err = m.jobResultDB.Upsert(dbJobResults...)
if err != nil {
return fmt.Errorf("job %s failed to insert job results to database: %w", job.Nickname, err)
}
return nil
}
// UpsertJob allows for GRPC interaction to upsert a job to be processed
func (m *DataHistoryManager) UpsertJob(job *DataHistoryJob, insertOnly bool) error {
if m == nil {
return ErrNilSubsystem
}
if !m.IsRunning() {
return ErrSubSystemNotStarted
}
if job == nil {
return errNilJob
}
if job.Nickname == "" {
return fmt.Errorf("upsert job %w", errNicknameUnset)
}
j, err := m.GetByNickname(job.Nickname, false)
if err != nil && !errors.Is(err, errJobNotFound) {
return err
}
if insertOnly && j != nil ||
(j != nil && j.Status != dataHistoryStatusActive) {
return fmt.Errorf("upsert job %w nickname: %s - status: %s ", errNicknameInUse, j.Nickname, j.Status)
}
m.m.Lock()
defer m.m.Unlock()
err = m.validateJob(job)
if err != nil {
return err
}
toUpdate := false
if !insertOnly {
for i := range m.jobs {
if !strings.EqualFold(m.jobs[i].Nickname, job.Nickname) {
continue
}
toUpdate = true
job.ID = m.jobs[i].ID
if job.Exchange != "" && m.jobs[i].Exchange != job.Exchange {
m.jobs[i].Exchange = job.Exchange
}
if job.Asset != "" && m.jobs[i].Asset != job.Asset {
m.jobs[i].Asset = job.Asset
}
if !job.Pair.IsEmpty() && !m.jobs[i].Pair.Equal(job.Pair) {
m.jobs[i].Pair = job.Pair
}
if !job.StartDate.IsZero() && !m.jobs[i].StartDate.Equal(job.StartDate) {
m.jobs[i].StartDate = job.StartDate
}
if !job.EndDate.IsZero() && !m.jobs[i].EndDate.Equal(job.EndDate) {
m.jobs[i].EndDate = job.EndDate
}
if job.Interval != 0 && m.jobs[i].Interval != job.Interval {
m.jobs[i].Interval = job.Interval
}
if job.RunBatchLimit != 0 && m.jobs[i].RunBatchLimit != job.RunBatchLimit {
m.jobs[i].RunBatchLimit = job.RunBatchLimit
}
if job.RequestSizeLimit != 0 && m.jobs[i].RequestSizeLimit != job.RequestSizeLimit {
m.jobs[i].RequestSizeLimit = job.RequestSizeLimit
}
if job.MaxRetryAttempts != 0 && m.jobs[i].MaxRetryAttempts != job.MaxRetryAttempts {
m.jobs[i].MaxRetryAttempts = job.MaxRetryAttempts
}
m.jobs[i].DataType = job.DataType
m.jobs[i].Status = job.Status
break
}
}
if job.ID == uuid.Nil {
job.ID, err = uuid.NewV4()
if err != nil {
return err
}
}
job.rangeHolder, err = kline.CalculateCandleDateRanges(job.StartDate, job.EndDate, job.Interval, uint32(job.RequestSizeLimit))
if err != nil {
return err
}
if !toUpdate {
m.jobs = append(m.jobs, job)
}
dbJob := m.convertJobToDBModel(job)
return m.jobDB.Upsert(dbJob)
}
func (m *DataHistoryManager) validateJob(job *DataHistoryJob) error {
if job == nil {
return errNilJob
}
if !job.Asset.IsValid() {
return fmt.Errorf("job %s %w %s", job.Nickname, asset.ErrNotSupported, job.Asset)
}
if job.Pair.IsEmpty() {
return fmt.Errorf("job %s %w", job.Nickname, errCurrencyPairUnset)
}
if !job.Status.Valid() {
return fmt.Errorf("job %s %w: %s", job.Nickname, errInvalidDataHistoryStatus, job.Status)
}
if !job.DataType.Valid() {
return fmt.Errorf("job %s %w: %s", job.Nickname, errInvalidDataHistoryDataType, job.DataType)
}
exch := m.exchangeManager.GetExchangeByName(job.Exchange)
if exch == nil {
return fmt.Errorf("job %s cannot process job: %s %w",
job.Nickname,
job.Exchange,
errExchangeNotLoaded)
}
pairs, err := exch.GetEnabledPairs(job.Asset)
if err != nil {
return fmt.Errorf("job %s exchange %s asset %s currency %s %w", job.Nickname, job.Exchange, job.Asset, job.Pair, err)
}
if !pairs.Contains(job.Pair, false) {
return fmt.Errorf("job %s exchange %s asset %s currency %s %w", job.Nickname, job.Exchange, job.Asset, job.Pair, errCurrencyNotEnabled)
}
if job.Results == nil {
job.Results = make(map[time.Time][]DataHistoryJobResult)
}
if job.RunBatchLimit <= 0 {
log.Warnf(log.DataHistory, "job %s has unset batch limit, defaulting to %v", job.Nickname, defaultDataHistoryBatchLimit)
job.RunBatchLimit = defaultDataHistoryBatchLimit
}
if job.MaxRetryAttempts <= 0 {
log.Warnf(log.DataHistory, "job %s has unset max retry limit, defaulting to %v", job.Nickname, defaultDataHistoryRetryAttempts)
job.MaxRetryAttempts = defaultDataHistoryRetryAttempts
}
if job.RequestSizeLimit <= 0 {
job.RequestSizeLimit = defaultDataHistoryRequestSizeLimit
}
if job.DataType == dataHistoryTradeDataType &&
(job.Interval >= kline.FourHour || job.Interval <= kline.TenMin) {
log.Warnf(log.DataHistory, "job %s interval %v outside limits, defaulting to %v", job.Nickname, job.Interval.Word(), defaultDataHistoryTradeInterval)
job.Interval = defaultDataHistoryTradeInterval
}
b := exch.GetBase()
if !b.Features.Enabled.Kline.Intervals[job.Interval.Word()] {
return fmt.Errorf("job %s %s %w %s", job.Nickname, job.Interval.Word(), kline.ErrUnsupportedInterval, job.Exchange)
}
job.StartDate = job.StartDate.Round(job.Interval.Duration())
job.EndDate = job.EndDate.Round(job.Interval.Duration())
if err := common.StartEndTimeCheck(job.StartDate, job.EndDate); err != nil {
return fmt.Errorf("job %s %w start: %v end %v", job.Nickname, err, job.StartDate, job.EndDate)
}
return nil
}
// GetByID returns a job's details from its ID
func (m *DataHistoryManager) GetByID(id uuid.UUID) (*DataHistoryJob, error) {
if m == nil {
return nil, ErrNilSubsystem
}
if atomic.LoadInt32(&m.started) == 0 {
return nil, ErrSubSystemNotStarted
}
if id == uuid.Nil {
return nil, errEmptyID
}
m.m.Lock()
for i := range m.jobs {
if m.jobs[i].ID == id {
cpy := *m.jobs[i]
m.m.Unlock()
return &cpy, nil
}
}
m.m.Unlock()
dbJ, err := m.jobDB.GetByID(id.String())
if err != nil {
return nil, fmt.Errorf("%w with id %s %s", errJobNotFound, id, err)
}
result, err := m.convertDBModelToJob(dbJ)
if err != nil {
return nil, fmt.Errorf("could not convert model with id %s %w", id, err)
}
return result, nil
}
// GetByNickname searches for jobs by name and returns it if found
// returns nil if not
// if fullDetails is enabled, it will retrieve all job history results from the database
func (m *DataHistoryManager) GetByNickname(nickname string, fullDetails bool) (*DataHistoryJob, error) {
if m == nil {
return nil, ErrNilSubsystem
}
if atomic.LoadInt32(&m.started) == 0 {
return nil, ErrSubSystemNotStarted
}
if fullDetails {
dbJ, err := m.jobDB.GetJobAndAllResults(nickname)
if err != nil {
return nil, fmt.Errorf("job %s could not load job from database: %w", nickname, err)
}
result, err := m.convertDBModelToJob(dbJ)
if err != nil {
return nil, fmt.Errorf("could not convert model with nickname %s %w", nickname, err)
}
return result, nil
}
m.m.Lock()
for i := range m.jobs {
if strings.EqualFold(m.jobs[i].Nickname, nickname) {
cpy := m.jobs[i]
m.m.Unlock()
return cpy, nil
}
}
m.m.Unlock()
// now try the database
j, err := m.jobDB.GetByNickName(nickname)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
// no need to display normal sql err to user
return nil, errJobNotFound
}
return nil, fmt.Errorf("job %s %w, %s", nickname, errJobNotFound, err)
}
job, err := m.convertDBModelToJob(j)
if err != nil {
return nil, err
}
return job, nil
}
// GetAllJobStatusBetween will return all jobs between two ferns
func (m *DataHistoryManager) GetAllJobStatusBetween(start, end time.Time) ([]*DataHistoryJob, error) {
if m == nil {
return nil, ErrNilSubsystem
}
if atomic.LoadInt32(&m.started) == 0 {
return nil, ErrSubSystemNotStarted
}
if err := common.StartEndTimeCheck(start, end); err != nil {
return nil, err
}
dbJobs, err := m.jobDB.GetJobsBetween(start, end)
if err != nil {
return nil, err
}
var results []*DataHistoryJob
for i := range dbJobs {
dbJob, err := m.convertDBModelToJob(&dbJobs[i])
if err != nil {
return nil, err
}
results = append(results, dbJob)
}
return results, nil
}
// DeleteJob helper function to assist in setting a job to deleted
func (m *DataHistoryManager) DeleteJob(nickname, id string) error {
if m == nil {
return ErrNilSubsystem
}
if atomic.LoadInt32(&m.started) == 0 {
return ErrSubSystemNotStarted
}
if nickname == "" && id == "" {
return errNicknameIDUnset
}
if nickname != "" && id != "" {
return errOnlyNicknameOrID
}
var dbJob *datahistoryjob.DataHistoryJob
var err error
m.m.Lock()
defer m.m.Unlock()
for i := range m.jobs {
if strings.EqualFold(m.jobs[i].Nickname, nickname) ||
m.jobs[i].ID.String() == id {
dbJob = m.convertJobToDBModel(m.jobs[i])
m.jobs = append(m.jobs[:i], m.jobs[i+1:]...)
break
}
}
if dbJob == nil {
if nickname != "" {
dbJob, err = m.jobDB.GetByNickName(nickname)
if err != nil {
return err
}
} else {
dbJob, err = m.jobDB.GetByID(id)
if err != nil {
return err
}
}
}
if dbJob.Status != int64(dataHistoryStatusActive) {
status := dataHistoryStatus(dbJob.Status)
return fmt.Errorf("job: %v status: %s error: %w", dbJob.Nickname, status, errCanOnlyDeleteActiveJobs)
}
dbJob.Status = int64(dataHistoryStatusRemoved)
err = m.jobDB.Upsert(dbJob)
if err != nil {
return err
}
log.Infof(log.DataHistory, "deleted job %v", dbJob.Nickname)
return nil
}
// GetActiveJobs returns all jobs with the status `dataHistoryStatusActive`
func (m *DataHistoryManager) GetActiveJobs() ([]DataHistoryJob, error) {
if m == nil {
return nil, ErrNilSubsystem
}
if !m.IsRunning() {
return nil, ErrSubSystemNotStarted
}
m.m.Lock()
defer m.m.Unlock()
var results []DataHistoryJob
for i := range m.jobs {
if m.jobs[i].Status == dataHistoryStatusActive {
results = append(results, *m.jobs[i])
}
}
return results, nil
}
// GenerateJobSummary returns a human readable summary of a job's status
func (m *DataHistoryManager) GenerateJobSummary(nickname string) (*DataHistoryJobSummary, error) {
if m == nil {
return nil, ErrNilSubsystem
}
job, err := m.GetByNickname(nickname, false)
if err != nil {
return nil, fmt.Errorf("job: %v %w", nickname, err)
}
err = m.compareJobsToData(job)
if err != nil {
return nil, err
}
return &DataHistoryJobSummary{
Nickname: job.Nickname,
Exchange: job.Exchange,
Asset: job.Asset,
Pair: job.Pair,
StartDate: job.StartDate,
EndDate: job.EndDate,
Interval: job.Interval,
Status: job.Status,
DataType: job.DataType,
ResultRanges: job.rangeHolder.DataSummary(true),
}, nil
}
// ----------------------------Lovely-converters----------------------------
func (m *DataHistoryManager) convertDBModelToJob(dbModel *datahistoryjob.DataHistoryJob) (*DataHistoryJob, error) {
id, err := uuid.FromString(dbModel.ID)
if err != nil {
return nil, err
}
cp, err := currency.NewPairFromString(fmt.Sprintf("%s-%s", dbModel.Base, dbModel.Quote))
if err != nil {
return nil, fmt.Errorf("job %s could not format pair %s-%s: %w", dbModel.Nickname, dbModel.Base, dbModel.Quote, err)
}
jobResults, err := m.convertDBResultToJobResult(dbModel.Results)
if err != nil {
return nil, fmt.Errorf("job %s could not convert database job: %w", dbModel.Nickname, err)
}
return &DataHistoryJob{
ID: id,
Nickname: dbModel.Nickname,
Exchange: dbModel.ExchangeName,
Asset: asset.Item(dbModel.Asset),
Pair: cp,
StartDate: dbModel.StartDate,
EndDate: dbModel.EndDate,
Interval: kline.Interval(dbModel.Interval),
RunBatchLimit: dbModel.BatchSize,
RequestSizeLimit: dbModel.RequestSizeLimit,
DataType: dataHistoryDataType(dbModel.DataType),
MaxRetryAttempts: dbModel.MaxRetryAttempts,
Status: dataHistoryStatus(dbModel.Status),
CreatedDate: dbModel.CreatedDate,
Results: jobResults,
}, nil
}
func (m *DataHistoryManager) convertDBResultToJobResult(dbModels []*datahistoryjobresult.DataHistoryJobResult) (map[time.Time][]DataHistoryJobResult, error) {
result := make(map[time.Time][]DataHistoryJobResult)
for i := range dbModels {
id, err := uuid.FromString(dbModels[i].ID)
if err != nil {
return nil, err
}
jobID, err := uuid.FromString(dbModels[i].JobID)
if err != nil {
return nil, err
}
lookup := result[dbModels[i].IntervalStartDate]
lookup = append(lookup, DataHistoryJobResult{
ID: id,
JobID: jobID,
IntervalStartDate: dbModels[i].IntervalStartDate,
IntervalEndDate: dbModels[i].IntervalEndDate,
Status: dataHistoryStatus(dbModels[i].Status),
Result: dbModels[i].Result,
Date: dbModels[i].Date,
})
result[dbModels[i].IntervalStartDate] = lookup
}
return result, nil
}
func (m *DataHistoryManager) convertJobResultToDBResult(results map[time.Time][]DataHistoryJobResult) []*datahistoryjobresult.DataHistoryJobResult {
var response []*datahistoryjobresult.DataHistoryJobResult
for _, v := range results {
for i := range v {
response = append(response, &datahistoryjobresult.DataHistoryJobResult{
ID: v[i].ID.String(),
JobID: v[i].JobID.String(),
IntervalStartDate: v[i].IntervalStartDate,
IntervalEndDate: v[i].IntervalEndDate,
Status: int64(v[i].Status),
Result: v[i].Result,
Date: v[i].Date,
})
}
}
return response
}
func (m *DataHistoryManager) convertJobToDBModel(job *DataHistoryJob) *datahistoryjob.DataHistoryJob {
model := &datahistoryjob.DataHistoryJob{
Nickname: job.Nickname,
ExchangeName: job.Exchange,
Asset: job.Asset.String(),
Base: job.Pair.Base.String(),
Quote: job.Pair.Quote.String(),
StartDate: job.StartDate,
EndDate: job.EndDate,
Interval: int64(job.Interval.Duration()),
RequestSizeLimit: job.RequestSizeLimit,
DataType: int64(job.DataType),
MaxRetryAttempts: job.MaxRetryAttempts,
Status: int64(job.Status),
CreatedDate: job.CreatedDate,
BatchSize: job.RunBatchLimit,
Results: m.convertJobResultToDBResult(job.Results),
}
if job.ID != uuid.Nil {
model.ID = job.ID.String()
}
return model
}

View File

@@ -0,0 +1,144 @@
# GoCryptoTrader package Datahistory manager
<img src="/common/gctlogo.png?raw=true" width="350px" height="350px" hspace="70">
[![Build Status](https://github.com/thrasher-corp/gocryptotrader/actions/workflows/tests.yml/badge.svg?branch=master)](https://github.com/thrasher-corp/gocryptotrader/actions/workflows/tests.yml)
[![Software License](https://img.shields.io/badge/License-MIT-orange.svg?style=flat-square)](https://github.com/thrasher-corp/gocryptotrader/blob/master/LICENSE)
[![GoDoc](https://godoc.org/github.com/thrasher-corp/gocryptotrader?status.svg)](https://godoc.org/github.com/thrasher-corp/gocryptotrader/engine/datahistory_manager)
[![Coverage Status](http://codecov.io/github/thrasher-corp/gocryptotrader/coverage.svg?branch=master)](http://codecov.io/github/thrasher-corp/gocryptotrader?branch=master)
[![Go Report Card](https://goreportcard.com/badge/github.com/thrasher-corp/gocryptotrader)](https://goreportcard.com/report/github.com/thrasher-corp/gocryptotrader)
This datahistory_manager package is part of the GoCryptoTrader codebase.
## This is still in active development
You can track ideas, planned features and what's in progress on this Trello board: [https://trello.com/b/ZAhMhpOy/gocryptotrader](https://trello.com/b/ZAhMhpOy/gocryptotrader).
Join our slack to discuss all things related to GoCryptoTrader! [GoCryptoTrader Slack](https://join.slack.com/t/gocryptotrader/shared_invite/enQtNTQ5NDAxMjA2Mjc5LTc5ZDE1ZTNiOGM3ZGMyMmY1NTAxYWZhODE0MWM5N2JlZDk1NDU0YTViYzk4NTk3OTRiMDQzNGQ1YTc4YmRlMTk)
## Current Features for Datahistory manager
+ The data history manager is an engine subsystem responsible for ensuring that the candle/trade history in the range you define is synchronised to your database
+ It is a long running synchronisation task designed to not overwhelm resources and ensure that all data requested is accounted for and saved to the database
+ The data history manager is disabled by default and requires a database connection to function
+ It can be enabled either via a runtime param, config modification or via RPC command `enablesubsystem`
+ The data history manager accepts jobs from RPC commands
+ A job is defined in the `Database tables` section below
+ Jobs will be addressed by the data history manager at an interval defined in your config, this is detailed below in the `Application run time parameters` table below
+ Jobs will fetch data at sizes you request (which can cater to hardware limitations such as low RAM)
+ Jobs are completed once all data has been fetched/attempted to be fetched in the time range
## What are the prerequisites?
+ Ensure you have a database setup, you can read about that [here](/database)
+ Ensure you have run dbmigrate under `/cmd/dbmigrate` via `dbmigrate -command=up`, you can read about that [here](/database#create-and-run-migrations)
+ Ensure you have seeded exchanges to the database via the application dbseed under `/cmd/dbseed`, you can read about it [here](/cmd/dbseed)
+ Ensure you have the database setup and enabled in your config, this can also be seen [here](/database)
+ Data retrieval can only be made on exchanges that support it, see the readmes for [candles](/docs/OHLCV.md) and [trades](/exchanges/trade#exchange-support-table)
+ Read below on how to enable the data history manager and add data history jobs
## What is a data history job?
A job is a set of parameters which will allow GoCryptoTrader to periodically retrieve historical data. Its purpose is to break up the process of retrieving large sets of data for multiple currencies and exchanges into more manageable chunks in a "set and forget" style.
For a breakdown of what a job consists of and what each parameter does, please review the database tables and the cycle details below.
## What happens during a data history cycle?
+ Once the checkInterval ticker timer has finished, the data history manager will process all jobs considered `active`.
+ A job's start and end time is broken down into intervals defined by the `interval` variable of a job. For a job beginning `2020-01-01` to `2020-01-02` with an interval of one hour will create 24 chunks to retrieve
+ The number of intervals it will then request from an API is defined by the `RequestSizeLimit`. A `RequestSizeLimit` of 2 will mean when processing a job, the data history manager will fetch 2 hours worth of data
+ When processing a job the `RunBatchLimit` defines how many `RequestSizeLimits` it will fetch. A `RunBatchLimit` of 3 means when processing a job, the history manager will fetch 3 lots of 2 hour chunks from the API in a run of a job
+ If the data is successfully retrieved, that chunk will be considered `complete` and saved to the database
+ The `MaxRetryAttempts` defines how many times the data history manager will attempt to fetch a chunk of data before flagging it as `failed`.
+ A chunk is only attempted once per processing time.
+ If it fails, the next attempt will be after the `checkInterval` has finished again.
+ The errors for retrieval failures are stored in the database, allowing you to understand why a certain chunk of time is unavailable (eg exchange downtime and missing data)
+ All results are saved to the database, the data history manager will analyse all results and ready jobs for the next round of processing
## How do I add one?
+ First ensure that the data history monitor is enabled, you can do this via the config (see table `dataHistoryManager` under Config parameters below), via run time parameter (see table Application run time parameters below) or via the RPC command `enablesubsystem --subsystemname="data_history_manager"`
+ The simplest way of adding a new data history job is via the GCTCLI under `/cmd/gctcli`.
+ Modify the following example command to your needs: `.\gctcli.exe datahistory upsertjob --nickname=binance-spot-bnb-btc-1h-candles --exchange=binance --asset=spot --pair=BNB-BTC --interval=3600 --start_date="2020-06-02 12:00:00" --end_date="2020-12-02 12:00:00" --request_size_limit=10 --data_type=0 --max_retry_attempts=3 --batch_size=3`
### Candle intervals and trade fetching
+ A candle interval is required for a job, even when fetching trade data. This is to appropriately break down requests into time interval chunks. However, it is restricted to only a small range of times. This is to prevent fetching issues as fetching trades over a period of days or weeks will take a significant amount of time. When setting a job to fetch trades, the allowable range is less than 4 hours and greater than 10 minutes.
### Application run time parameters
| Parameter | Description | Example |
| ------ | ----------- | ------- |
| datahistorymanager | A boolean value which determines if the data history manager is enabled. Defaults to `false` | `-datahistorymanager=true` |
### Config parameters
#### dataHistoryManager
| Config | Description | Example |
| ------ | ----------- | ------- |
| enabled | If enabled will run the data history manager on startup | `true` |
| checkInterval | A golang `time.Duration` interval of when to attempt to fetch all active jobs' data | `15000000000` |
| maxJobsPerCycle | Allows you to control how many jobs are processed after the `checkInterval` timer finishes. Useful if you have many jobs, but don't wish to constantly be retrieving data | `5` |
| verbose | Displays some extra logs to your logging output to help debug | `false` |
### RPC commands
The below table is a summary of commands. For more details, view the commands in `/cmd/gctcli` or `/gctrpc/rpc.swagger.json`
| Command | Description |
| ------ | ----------- |
| UpsertDataHistoryJob | Updates or Inserts a job to the manager and database |
| GetDataHistoryJobDetails | Returns a job's details via its nickname or ID. Can optionally return an array of all run results |
| GetActiveDataHistoryJobs | Will return all jobs that have an `active` status |
| DeleteJob | Will remove a job for processing. Data is preserved in the database for later reference |
| GetDataHistoryJobsBetween | Returns all jobs, of all status types between the dates provided |
| GetDataHistoryJobSummary | Will return an executive summary of the progress of your job by nickname |
### Database tables
#### datahistoryjob
| Field | Description | Example |
| ------ | ----------- | ------- |
| id | Unique ID of the job. Generated at creation | `deadbeef-dead-beef-dead-beef13371337` |
| nickname | A custom name for the job that is unique for lookups | `binance-xrp-doge-2017` |
| exchange_name_id | The exchange id to fetch data from. The ID should be generated via `/cmd/dbmigrate`. When creating a job, you only need to provide the exchange name | `binance` |
| asset | The asset type of the data to be fetching | `spot` |
| base | The currency pair base of the data to be fetching | `xrp` |
| quote | The currency pair quote of the data to be fetching | `doge` |
| start_time | When to begin fetching data | `01-01-2017T13:33:37Z` |
| end_time | When to finish fetching data | `01-01-2018T13:33:37Z` |
| interval | A golang `time.Duration` representation of the candle interval to use. | `30000000000` |
| data_type | The data type to fetch. `0` is candles and `1` is trades | `0` |
| request_size | The number of candles to fetch. eg if `500`, the data history manager will break up the request into the appropriate timeframe to ensure the data history run interval will fetch 500 candles to save to the database | `500` |
| max_retries | For an interval period, the amount of attempts the data history manager is allowed to attempt to fetch data before moving onto the next period. This can be useful for determining whether the exchange is missing the data in that time period or, if just one failure of three, just means that the data history manager couldn't finish one request | `3` |
| batch_count | The number of requests to make when processing a job | `3` |
| status | A numerical representation for the status. `0` is active, `1` is failed `2` is complete, `3` is removed and `4` is missing data | `0` |
| created | The date the job was created. | `2020-01-01T13:33:37Z` |
#### datahistoryjobresult
| Field | Description | Example |
| ------ | ----------- | ------- |
| id | Unique ID of the job status | `deadbeef-dead-beef-dead-beef13371337` |
| job_id | The job ID being referenced | `deadbeef-dead-beef-dead-beef13371337` |
| result | If there is an error, it will be detailed here | `exchange missing candle data for 2020-01-01 13:37Z` |
| status | A numerical representation of the job result status. `1` is failed, `2` is complete and `4` is missing data | `2` |
| interval_start_time | The start date of the period fetched | `2020-01-01T13:33:37Z` |
| interval_end_time | The end date of the period fetched | `2020-01-02T13:33:37Z` |
| run_time | The time the job was ran | `2020-01-03T13:33:37Z` |
### Please click GoDocs chevron above to view current GoDoc information for this package
## Contribution
Please feel free to submit any pull requests or suggest any desired features to be added.
When submitting a PR, please abide by our coding guidelines:
+ Code must adhere to the official Go [formatting](https://golang.org/doc/effective_go.html#formatting) guidelines (i.e. uses [gofmt](https://golang.org/cmd/gofmt/)).
+ Code must be documented adhering to the official Go [commentary](https://golang.org/doc/effective_go.html#commentary) guidelines.
+ Code must adhere to our [coding style](https://github.com/thrasher-corp/gocryptotrader/blob/master/doc/coding_style.md).
+ Pull requests need to be based on and opened against the `master` branch.
## Donations
<img src="https://github.com/thrasher-corp/gocryptotrader/blob/master/web/src/assets/donate.png?raw=true" hspace="70">
If this framework helped you in any way, or you would like to support the developers working on it, please donate Bitcoin to:
***bc1qk0jareu4jytc0cfrhr5wgshsq8282awpavfahc***

View File

@@ -0,0 +1,948 @@
package engine
import (
"database/sql"
"errors"
"strings"
"sync/atomic"
"testing"
"time"
"github.com/gofrs/uuid"
"github.com/thrasher-corp/gocryptotrader/common"
"github.com/thrasher-corp/gocryptotrader/common/convert"
"github.com/thrasher-corp/gocryptotrader/config"
"github.com/thrasher-corp/gocryptotrader/currency"
"github.com/thrasher-corp/gocryptotrader/database"
"github.com/thrasher-corp/gocryptotrader/database/repository/datahistoryjob"
"github.com/thrasher-corp/gocryptotrader/database/repository/datahistoryjobresult"
"github.com/thrasher-corp/gocryptotrader/exchanges/asset"
"github.com/thrasher-corp/gocryptotrader/exchanges/kline"
)
func TestSetupDataHistoryManager(t *testing.T) {
t.Parallel()
_, err := SetupDataHistoryManager(nil, nil, nil)
if !errors.Is(err, errNilExchangeManager) {
t.Errorf("error '%v', expected '%v'", err, errNilConfig)
}
_, err = SetupDataHistoryManager(SetupExchangeManager(), nil, nil)
if !errors.Is(err, errNilDatabaseConnectionManager) {
t.Errorf("error '%v', expected '%v'", err, errNilDatabaseConnectionManager)
}
_, err = SetupDataHistoryManager(SetupExchangeManager(), &DatabaseConnectionManager{}, nil)
if !errors.Is(err, errNilConfig) {
t.Errorf("error '%v', expected '%v'", err, errNilConfig)
}
_, err = SetupDataHistoryManager(SetupExchangeManager(), &DatabaseConnectionManager{}, &config.DataHistoryManager{})
if !errors.Is(err, database.ErrNilInstance) {
t.Errorf("error '%v', expected '%v'", err, database.ErrNilInstance)
}
dbInst := &database.Instance{}
err = dbInst.SetConfig(&database.Config{Enabled: true})
if !errors.Is(err, nil) {
t.Errorf("error '%v', expected '%v'", err, nil)
}
dbInst.SetConnected(true)
dbCM := &DatabaseConnectionManager{
dbConn: dbInst,
started: 1,
}
err = dbInst.SetSQLiteConnection(&sql.DB{})
if !errors.Is(err, nil) {
t.Errorf("error '%v', expected '%v'", err, nil)
}
m, err := SetupDataHistoryManager(SetupExchangeManager(), dbCM, &config.DataHistoryManager{})
if !errors.Is(err, nil) {
t.Errorf("error '%v', expected '%v'", err, nil)
}
if m == nil {
t.Fatal("expected manager")
}
}
func TestDataHistoryManagerIsRunning(t *testing.T) {
t.Parallel()
m := createDHM(t)
m.started = 0
if m.IsRunning() {
t.Error("expected false")
}
m.started = 1
if !m.IsRunning() {
t.Error("expected true")
}
m = nil
if m.IsRunning() {
t.Error("expected false")
}
}
func TestDataHistoryManagerStart(t *testing.T) {
t.Parallel()
m := createDHM(t)
m.started = 0
err := m.Start()
if !errors.Is(err, nil) {
t.Errorf("error '%v', expected '%v'", err, nil)
}
err = m.Start()
if !errors.Is(err, ErrSubSystemAlreadyStarted) {
t.Errorf("error '%v', expected '%v'", err, ErrSubSystemAlreadyStarted)
}
m = nil
err = m.Start()
if !errors.Is(err, ErrNilSubsystem) {
t.Errorf("error '%v', expected '%v'", err, ErrNilSubsystem)
}
}
func TestDataHistoryManagerStop(t *testing.T) {
t.Parallel()
m := createDHM(t)
m.shutdown = make(chan struct{})
err := m.Stop()
if !errors.Is(err, nil) {
t.Errorf("error '%v', expected '%v'", err, nil)
}
err = m.Stop()
if !errors.Is(err, ErrSubSystemNotStarted) {
t.Errorf("error '%v', expected '%v'", err, ErrSubSystemNotStarted)
}
m = nil
err = m.Stop()
if !errors.Is(err, ErrNilSubsystem) {
t.Errorf("error '%v', expected '%v'", err, ErrNilSubsystem)
}
}
func TestUpsertJob(t *testing.T) {
t.Parallel()
m := createDHM(t)
err := m.UpsertJob(nil, false)
if !errors.Is(err, errNilJob) {
t.Errorf("error '%v', expected '%v'", err, errNilJob)
}
dhj := &DataHistoryJob{}
err = m.UpsertJob(dhj, false)
if !errors.Is(err, errNicknameUnset) {
t.Errorf("error '%v', expected '%v'", err, errNicknameUnset)
}
dhj.Nickname = "test1337"
err = m.UpsertJob(dhj, false)
if !errors.Is(err, asset.ErrNotSupported) {
t.Errorf("error '%v', expected '%v'", err, asset.ErrNotSupported)
}
dhj.Asset = asset.Spot
err = m.UpsertJob(dhj, false)
if !errors.Is(err, errCurrencyPairUnset) {
t.Errorf("error '%v', expected '%v'", err, errCurrencyPairUnset)
}
dhj.Exchange = strings.ToLower(testExchange)
dhj.Pair = currency.NewPair(currency.BTC, currency.USDT)
err = m.UpsertJob(dhj, false)
if !errors.Is(err, errCurrencyNotEnabled) {
t.Errorf("error '%v', expected '%v'", err, errCurrencyNotEnabled)
}
dhj.Pair = currency.NewPair(currency.BTC, currency.USD)
err = m.UpsertJob(dhj, false)
if !errors.Is(err, kline.ErrUnsupportedInterval) {
t.Errorf("error '%v', expected '%v'", err, kline.ErrUnsupportedInterval)
}
dhj.Interval = kline.OneHour
err = m.UpsertJob(dhj, false)
if !errors.Is(err, common.ErrDateUnset) {
t.Errorf("error '%v', expected '%v'", err, common.ErrDateUnset)
}
dhj.StartDate = time.Now().Add(-time.Hour)
dhj.EndDate = time.Now()
err = m.UpsertJob(dhj, false)
if !errors.Is(err, nil) {
t.Errorf("error '%v', expected '%v'", err, nil)
}
if len(m.jobs) != 1 {
t.Error("unexpected jerrb")
}
err = m.UpsertJob(dhj, true)
if !errors.Is(err, errNicknameInUse) {
t.Errorf("error '%v', expected '%v'", err, errNicknameInUse)
}
newJob := &DataHistoryJob{
Nickname: dhj.Nickname,
Exchange: testExchange,
Asset: asset.Spot,
Pair: currency.NewPair(currency.BTC, currency.USD),
StartDate: startDate,
EndDate: time.Now().Add(-time.Minute),
Interval: kline.FifteenMin,
RunBatchLimit: 1338,
RequestSizeLimit: 1337,
DataType: 2,
MaxRetryAttempts: 1337,
}
err = m.UpsertJob(newJob, false)
if !errors.Is(err, errInvalidDataHistoryDataType) {
t.Errorf("error '%v', expected '%v'", err, errInvalidDataHistoryDataType)
}
newJob.DataType = dataHistoryTradeDataType
err = m.UpsertJob(newJob, false)
if !errors.Is(err, nil) {
t.Errorf("error '%v', expected '%v'", err, nil)
}
if !m.jobs[0].StartDate.Equal(startDate) {
t.Error(err)
}
}
func TestDeleteJob(t *testing.T) {
t.Parallel()
m := createDHM(t)
dhj := &DataHistoryJob{
Nickname: "TestDeleteJob",
Exchange: testExchange,
Asset: asset.Spot,
Pair: currency.NewPair(currency.BTC, currency.USD),
StartDate: time.Now().Add(-time.Minute * 5),
EndDate: time.Now(),
Interval: kline.OneMin,
}
err := m.UpsertJob(dhj, false)
if !errors.Is(err, nil) {
t.Errorf("error '%v', expected '%v'", err, nil)
}
err = m.DeleteJob("", "")
if !errors.Is(err, errNicknameIDUnset) {
t.Errorf("error '%v', expected '%v'", err, errNicknameIDUnset)
}
err = m.DeleteJob("1337", "1337")
if !errors.Is(err, errOnlyNicknameOrID) {
t.Errorf("error '%v', expected '%v'", err, errOnlyNicknameOrID)
}
err = m.DeleteJob(dhj.Nickname, "")
if !errors.Is(err, nil) {
t.Errorf("error '%v', expected '%v'", err, nil)
}
if len(m.jobs) != 0 {
t.Error("expected 0")
}
err = m.DeleteJob("", dhj.ID.String())
if !errors.Is(err, nil) {
t.Errorf("error '%v', expected '%v'", err, nil)
}
atomic.StoreInt32(&m.started, 0)
err = m.DeleteJob("", dhj.ID.String())
if !errors.Is(err, ErrSubSystemNotStarted) {
t.Errorf("error '%v', expected '%v'", err, ErrSubSystemNotStarted)
}
m = nil
err = m.DeleteJob("", dhj.ID.String())
if !errors.Is(err, ErrNilSubsystem) {
t.Errorf("error '%v', expected '%v'", err, ErrNilSubsystem)
}
}
func TestGetByNickname(t *testing.T) {
t.Parallel()
m := createDHM(t)
dhj := &DataHistoryJob{
Nickname: "TestGetByNickname",
Exchange: testExchange,
Asset: asset.Spot,
Pair: currency.NewPair(currency.BTC, currency.USD),
StartDate: time.Now().Add(-time.Minute * 5),
EndDate: time.Now(),
Interval: kline.OneMin,
}
err := m.UpsertJob(dhj, false)
if !errors.Is(err, nil) {
t.Errorf("error '%v', expected '%v'", err, nil)
}
_, err = m.GetByNickname(dhj.Nickname, false)
if !errors.Is(err, nil) {
t.Errorf("error '%v', expected '%v'", err, nil)
}
_, err = m.GetByNickname(dhj.Nickname, true)
if !errors.Is(err, nil) {
t.Errorf("error '%v', expected '%v'", err, nil)
}
m.jobs = []*DataHistoryJob{}
_, err = m.GetByNickname(dhj.Nickname, false)
if !errors.Is(err, nil) {
t.Errorf("error '%v', expected '%v'", err, nil)
}
atomic.StoreInt32(&m.started, 0)
_, err = m.GetByNickname("test123", false)
if !errors.Is(err, ErrSubSystemNotStarted) {
t.Errorf("error '%v', expected '%v'", err, ErrSubSystemNotStarted)
}
m = nil
_, err = m.GetByNickname("test123", false)
if !errors.Is(err, ErrNilSubsystem) {
t.Errorf("error '%v', expected '%v'", err, ErrNilSubsystem)
}
}
func TestGetByID(t *testing.T) {
t.Parallel()
m := createDHM(t)
dhj := &DataHistoryJob{
Nickname: "TestGetByID",
Exchange: testExchange,
Asset: asset.Spot,
Pair: currency.NewPair(currency.BTC, currency.USD),
StartDate: time.Now().Add(-time.Minute * 5),
EndDate: time.Now(),
Interval: kline.OneMin,
}
err := m.UpsertJob(dhj, false)
if !errors.Is(err, nil) {
t.Errorf("error '%v', expected '%v'", err, nil)
}
_, err = m.GetByID(dhj.ID)
if !errors.Is(err, nil) {
t.Errorf("error '%v', expected '%v'", err, nil)
}
_, err = m.GetByID(uuid.UUID{})
if !errors.Is(err, errEmptyID) {
t.Errorf("error '%v', expected '%v'", err, errEmptyID)
}
m.jobs = []*DataHistoryJob{}
_, err = m.GetByID(dhj.ID)
if !errors.Is(err, nil) {
t.Errorf("error '%v', expected '%v'", err, nil)
}
atomic.StoreInt32(&m.started, 0)
_, err = m.GetByID(dhj.ID)
if !errors.Is(err, ErrSubSystemNotStarted) {
t.Errorf("error '%v', expected '%v'", err, ErrSubSystemNotStarted)
}
m = nil
_, err = m.GetByID(dhj.ID)
if !errors.Is(err, ErrNilSubsystem) {
t.Errorf("error '%v', expected '%v'", err, ErrNilSubsystem)
}
}
func TestRetrieveJobs(t *testing.T) {
t.Parallel()
m := createDHM(t)
dhj := &DataHistoryJob{
Nickname: "TestRetrieveJobs",
Exchange: testExchange,
Asset: asset.Spot,
Pair: currency.NewPair(currency.BTC, currency.USD),
StartDate: time.Now().Add(-time.Minute * 5),
EndDate: time.Now(),
Interval: kline.OneMin,
}
err := m.UpsertJob(dhj, false)
if !errors.Is(err, nil) {
t.Errorf("error '%v', expected '%v'", err, nil)
}
jobs, err := m.retrieveJobs()
if !errors.Is(err, nil) {
t.Errorf("error '%v', expected '%v'", err, nil)
}
if len(jobs) != 1 {
t.Error("expected job")
}
atomic.StoreInt32(&m.started, 0)
_, err = m.retrieveJobs()
if !errors.Is(err, ErrSubSystemNotStarted) {
t.Errorf("error '%v', expected '%v'", err, ErrSubSystemNotStarted)
}
m = nil
_, err = m.retrieveJobs()
if !errors.Is(err, ErrNilSubsystem) {
t.Errorf("error '%v', expected '%v'", err, ErrNilSubsystem)
}
}
func TestGetActiveJobs(t *testing.T) {
t.Parallel()
m := createDHM(t)
jobs, err := m.GetActiveJobs()
if !errors.Is(err, nil) {
t.Errorf("error '%v', expected '%v'", err, nil)
}
if len(jobs) != 0 {
t.Error("expected 0 jobs")
}
dhj := &DataHistoryJob{
Nickname: "TestGetActiveJobs",
Exchange: testExchange,
Asset: asset.Spot,
Pair: currency.NewPair(currency.BTC, currency.USD),
StartDate: time.Now().Add(-time.Minute * 5),
EndDate: time.Now(),
Interval: kline.OneMin,
}
err = m.UpsertJob(dhj, false)
if !errors.Is(err, nil) {
t.Errorf("error '%v', expected '%v'", err, nil)
}
jobs, err = m.GetActiveJobs()
if !errors.Is(err, nil) {
t.Errorf("error '%v', expected '%v'", err, nil)
}
if len(jobs) != 1 {
t.Error("expected 1 job")
}
dhj.Status = dataHistoryStatusFailed
jobs, err = m.GetActiveJobs()
if !errors.Is(err, nil) {
t.Errorf("error '%v', expected '%v'", err, nil)
}
if len(jobs) != 0 {
t.Error("expected 0 jobs")
}
atomic.StoreInt32(&m.started, 0)
_, err = m.GetActiveJobs()
if !errors.Is(err, ErrSubSystemNotStarted) {
t.Errorf("error '%v', expected '%v'", err, ErrSubSystemNotStarted)
}
m = nil
_, err = m.GetActiveJobs()
if !errors.Is(err, ErrNilSubsystem) {
t.Errorf("error '%v', expected '%v'", err, ErrNilSubsystem)
}
}
func TestValidateJob(t *testing.T) {
t.Parallel()
m := createDHM(t)
err := m.validateJob(nil)
if !errors.Is(err, errNilJob) {
t.Errorf("error '%v', expected '%v'", err, errNilJob)
}
dhj := &DataHistoryJob{}
err = m.validateJob(dhj)
if !errors.Is(err, asset.ErrNotSupported) {
t.Errorf("error '%v', expected '%v'", err, asset.ErrNotSupported)
}
dhj.Asset = asset.Spot
err = m.validateJob(dhj)
if !errors.Is(err, errCurrencyPairUnset) {
t.Errorf("error '%v', expected '%v'", err, errCurrencyPairUnset)
}
dhj.Exchange = testExchange
dhj.Pair = currency.NewPair(currency.BTC, currency.USDT)
err = m.validateJob(dhj)
if !errors.Is(err, errCurrencyNotEnabled) {
t.Errorf("error '%v', expected '%v'", err, errCurrencyNotEnabled)
}
dhj.Pair = currency.NewPair(currency.BTC, currency.USD)
err = m.validateJob(dhj)
if !errors.Is(err, kline.ErrUnsupportedInterval) {
t.Errorf("error '%v', expected '%v'", err, kline.ErrUnsupportedInterval)
}
dhj.Interval = kline.OneMin
err = m.validateJob(dhj)
if !errors.Is(err, common.ErrDateUnset) {
t.Errorf("error '%v', expected '%v'", err, common.ErrDateUnset)
}
dhj.StartDate = time.Now().Add(time.Minute)
dhj.EndDate = time.Now().Add(time.Hour)
err = m.validateJob(dhj)
if !errors.Is(err, common.ErrStartAfterTimeNow) {
t.Errorf("error '%v', expected '%v'", err, errInvalidTimes)
}
dhj.StartDate = time.Now().Add(-time.Hour)
dhj.EndDate = time.Now().Add(-time.Minute)
err = m.validateJob(dhj)
if !errors.Is(err, nil) {
t.Errorf("error '%v', expected '%v'", err, nil)
}
}
func TestGetAllJobStatusBetween(t *testing.T) {
t.Parallel()
m := createDHM(t)
dhj := &DataHistoryJob{
Nickname: "TestGetActiveJobs",
Exchange: testExchange,
Asset: asset.Spot,
Pair: currency.NewPair(currency.BTC, currency.USD),
StartDate: time.Now().Add(-time.Minute * 5),
EndDate: time.Now(),
Interval: kline.OneMin,
}
err := m.UpsertJob(dhj, false)
if !errors.Is(err, nil) {
t.Errorf("error '%v', expected '%v'", err, nil)
}
jobs, err := m.GetAllJobStatusBetween(time.Now().Add(-time.Minute*5), time.Now().Add(time.Minute))
if !errors.Is(err, nil) {
t.Errorf("error '%v', expected '%v'", err, nil)
}
if len(jobs) != 1 {
t.Error("expected 1 job")
}
_, err = m.GetAllJobStatusBetween(time.Now().Add(-time.Hour), time.Now().Add(-time.Minute*30))
if !errors.Is(err, nil) {
t.Errorf("error '%v', expected '%v'", err, nil)
}
m.started = 0
_, err = m.GetAllJobStatusBetween(time.Now().Add(-time.Hour), time.Now().Add(-time.Minute*30))
if !errors.Is(err, ErrSubSystemNotStarted) {
t.Errorf("error '%v', expected '%v'", err, ErrSubSystemNotStarted)
}
m = nil
_, err = m.GetAllJobStatusBetween(time.Now().Add(-time.Hour), time.Now().Add(-time.Minute*30))
if !errors.Is(err, ErrNilSubsystem) {
t.Errorf("error '%v', expected '%v'", err, ErrNilSubsystem)
}
}
func TestPrepareJobs(t *testing.T) {
t.Parallel()
m := createDHM(t)
jobs, err := m.PrepareJobs()
if !errors.Is(err, nil) {
t.Errorf("error '%v', expected '%v'", err, nil)
}
if len(jobs) != 1 {
t.Errorf("expected 1 job, received %v", len(jobs))
}
m.started = 0
_, err = m.PrepareJobs()
if !errors.Is(err, ErrSubSystemNotStarted) {
t.Errorf("error '%v', expected '%v'", err, ErrSubSystemNotStarted)
}
m = nil
_, err = m.PrepareJobs()
if !errors.Is(err, ErrNilSubsystem) {
t.Errorf("error '%v', expected '%v'", err, ErrNilSubsystem)
}
}
func TestCompareJobsToData(t *testing.T) {
t.Parallel()
m := createDHM(t)
dhj := &DataHistoryJob{
Nickname: "TestGenerateJobSummary",
Exchange: testExchange,
Asset: asset.Spot,
Pair: currency.NewPair(currency.BTC, currency.USD),
StartDate: time.Now().Add(-time.Minute * 5),
EndDate: time.Now(),
Interval: kline.OneMin,
}
err := m.compareJobsToData(dhj)
if !errors.Is(err, nil) {
t.Errorf("error '%v', expected '%v'", err, nil)
}
dhj.DataType = dataHistoryTradeDataType
err = m.compareJobsToData(dhj)
if !errors.Is(err, nil) {
t.Errorf("error '%v', expected '%v'", err, nil)
}
dhj.DataType = 1337
err = m.compareJobsToData(dhj)
if !errors.Is(err, errUnknownDataType) {
t.Errorf("error '%v', expected '%v'", err, errUnknownDataType)
}
m.started = 0
err = m.compareJobsToData(dhj)
if !errors.Is(err, ErrSubSystemNotStarted) {
t.Errorf("error '%v', expected '%v'", err, ErrSubSystemNotStarted)
}
m = nil
err = m.compareJobsToData(dhj)
if !errors.Is(err, ErrNilSubsystem) {
t.Errorf("error '%v', expected '%v'", err, ErrNilSubsystem)
}
}
func TestRunJob(t *testing.T) {
t.Parallel()
m := createDHM(t)
dhj := &DataHistoryJob{
Nickname: "TestProcessJobs",
Exchange: "Binance",
Asset: asset.Spot,
Pair: currency.NewPair(currency.BTC, currency.USDT),
StartDate: time.Now().Add(-time.Hour * 2),
EndDate: time.Now(),
Interval: kline.OneHour,
}
err := m.UpsertJob(dhj, false)
if !errors.Is(err, nil) {
t.Errorf("error '%v', expected '%v'", err, nil)
}
err = m.runJob(dhj)
if !errors.Is(err, nil) {
t.Errorf("error '%v', expected '%v'", err, nil)
}
dhj.Pair = currency.NewPair(currency.DOGE, currency.USDT)
err = m.runJob(dhj)
if !errors.Is(err, nil) {
t.Errorf("error '%v', expected '%v'", err, nil)
}
dhjt := &DataHistoryJob{
Nickname: "TestProcessJobs2",
Exchange: "Binance",
Asset: asset.Spot,
Pair: currency.NewPair(currency.BTC, currency.USDT),
StartDate: time.Now().Add(-time.Hour * 5),
EndDate: time.Now(),
Interval: kline.OneHour,
DataType: dataHistoryTradeDataType,
}
err = m.UpsertJob(dhjt, false)
if !errors.Is(err, nil) {
t.Errorf("error '%v', expected '%v'", err, nil)
}
err = m.compareJobsToData(dhjt)
if !errors.Is(err, nil) {
t.Errorf("error '%v', expected '%v'", err, nil)
}
err = m.runJob(dhjt)
if !errors.Is(err, nil) {
t.Errorf("error '%v', expected '%v'", err, nil)
}
atomic.StoreInt32(&m.started, 0)
err = m.runJob(dhjt)
if !errors.Is(err, ErrSubSystemNotStarted) {
t.Errorf("error '%v', expected '%v'", err, ErrSubSystemNotStarted)
}
m = nil
err = m.runJob(dhjt)
if !errors.Is(err, ErrNilSubsystem) {
t.Errorf("error '%v', expected '%v'", err, ErrNilSubsystem)
}
}
func TestGenerateJobSummaryTest(t *testing.T) {
t.Parallel()
m := createDHM(t)
dhj := &DataHistoryJob{
Nickname: "TestGenerateJobSummary",
Exchange: testExchange,
Asset: asset.Spot,
Pair: currency.NewPair(currency.BTC, currency.USD),
StartDate: time.Now().Add(-time.Minute * 5),
EndDate: time.Now(),
Interval: kline.OneMin,
}
err := m.UpsertJob(dhj, false)
if !errors.Is(err, nil) {
t.Errorf("error '%v', expected '%v'", err, nil)
}
summary, err := m.GenerateJobSummary("TestGenerateJobSummary")
if !errors.Is(err, nil) {
t.Errorf("error '%v', expected '%v'", err, nil)
}
if len(summary.ResultRanges) == 0 {
t.Error("expected result ranges")
}
atomic.StoreInt32(&m.started, 0)
_, err = m.GenerateJobSummary("TestGenerateJobSummary")
if !errors.Is(err, ErrSubSystemNotStarted) {
t.Errorf("error '%v', expected '%v'", err, ErrSubSystemNotStarted)
}
m = nil
_, err = m.GenerateJobSummary("TestGenerateJobSummary")
if !errors.Is(err, ErrNilSubsystem) {
t.Errorf("error '%v', expected '%v'", err, ErrNilSubsystem)
}
}
func TestRunJobs(t *testing.T) {
t.Parallel()
m := createDHM(t)
err := m.runJobs()
if !errors.Is(err, nil) {
t.Errorf("error '%v', expected '%v'", err, nil)
}
atomic.StoreInt32(&m.started, 0)
err = m.runJobs()
if !errors.Is(err, ErrSubSystemNotStarted) {
t.Errorf("error '%v', expected '%v'", err, ErrSubSystemNotStarted)
}
m = nil
err = m.runJobs()
if !errors.Is(err, ErrNilSubsystem) {
t.Errorf("error '%v', expected '%v'", err, ErrNilSubsystem)
}
}
func TestConverters(t *testing.T) {
t.Parallel()
m := createDHM(t)
id, err := uuid.NewV4()
if !errors.Is(err, nil) {
t.Errorf("error '%v', expected '%v'", err, nil)
}
id2, err := uuid.NewV4()
if !errors.Is(err, nil) {
t.Errorf("error '%v', expected '%v'", err, nil)
}
dhj := &DataHistoryJob{
ID: id,
Nickname: "TestProcessJobs",
Exchange: testExchange,
Asset: asset.Spot,
Pair: currency.NewPair(currency.BTC, currency.USDT),
StartDate: time.Now().Add(-time.Hour * 24),
EndDate: time.Now(),
Interval: kline.OneHour,
}
dbJob := m.convertJobToDBModel(dhj)
if dhj.ID.String() != dbJob.ID ||
dhj.Nickname != dbJob.Nickname ||
!dhj.StartDate.Equal(dbJob.StartDate) ||
int64(dhj.Interval.Duration()) != dbJob.Interval ||
dhj.Pair.Base.String() != dbJob.Base ||
dhj.Pair.Quote.String() != dbJob.Quote {
t.Error("expected matching job")
}
convertBack, err := m.convertDBModelToJob(dbJob)
if !errors.Is(err, nil) {
t.Errorf("error '%v', expected '%v'", err, nil)
}
if dhj.ID != convertBack.ID ||
dhj.Nickname != convertBack.Nickname ||
!dhj.StartDate.Equal(convertBack.StartDate) ||
dhj.Interval != convertBack.Interval ||
!dhj.Pair.Equal(convertBack.Pair) {
t.Error("expected matching job")
}
jr := DataHistoryJobResult{
ID: id,
JobID: id2,
IntervalStartDate: dhj.StartDate,
IntervalEndDate: dhj.EndDate,
Status: 0,
Result: "test123",
Date: time.Now(),
}
mapperino := make(map[time.Time][]DataHistoryJobResult)
mapperino[dhj.StartDate] = append(mapperino[dhj.StartDate], jr)
result := m.convertJobResultToDBResult(mapperino)
if jr.ID.String() != result[0].ID ||
jr.JobID.String() != result[0].JobID ||
jr.Result != result[0].Result ||
!jr.Date.Equal(result[0].Date) ||
!jr.IntervalStartDate.Equal(result[0].IntervalStartDate) ||
!jr.IntervalEndDate.Equal(result[0].IntervalEndDate) ||
jr.Status != dataHistoryStatus(result[0].Status) {
t.Error("expected matching job")
}
andBackAgain, err := m.convertDBResultToJobResult(result)
if !errors.Is(err, nil) {
t.Errorf("error '%v', expected '%v'", err, nil)
}
if jr.ID != andBackAgain[dhj.StartDate][0].ID ||
jr.JobID != andBackAgain[dhj.StartDate][0].JobID ||
jr.Result != andBackAgain[dhj.StartDate][0].Result ||
!jr.Date.Equal(andBackAgain[dhj.StartDate][0].Date) ||
!jr.IntervalStartDate.Equal(andBackAgain[dhj.StartDate][0].IntervalStartDate) ||
!jr.IntervalEndDate.Equal(andBackAgain[dhj.StartDate][0].IntervalEndDate) ||
jr.Status != andBackAgain[dhj.StartDate][0].Status {
t.Error("expected matching job")
}
}
// test helper functions
func createDHM(t *testing.T) *DataHistoryManager {
em := SetupExchangeManager()
exch, err := em.NewExchangeByName(testExchange)
if !errors.Is(err, nil) {
t.Errorf("error '%v', expected '%v'", err, nil)
}
cp := currency.NewPair(currency.BTC, currency.USD)
exch.SetDefaults()
b := exch.GetBase()
b.CurrencyPairs.Pairs = make(map[asset.Item]*currency.PairStore)
b.CurrencyPairs.Pairs[asset.Spot] = &currency.PairStore{
Available: currency.Pairs{cp},
Enabled: currency.Pairs{cp},
AssetEnabled: convert.BoolPtr(true)}
em.Add(exch)
exch2, err := em.NewExchangeByName("Binance")
if !errors.Is(err, nil) {
t.Errorf("error '%v', expected '%v'", err, nil)
}
cp2 := currency.NewPair(currency.BTC, currency.USDT)
exch2.SetDefaults()
b = exch2.GetBase()
b.CurrencyPairs.Pairs = make(map[asset.Item]*currency.PairStore)
b.CurrencyPairs.Pairs[asset.Spot] = &currency.PairStore{
Available: currency.Pairs{cp2},
Enabled: currency.Pairs{cp2},
AssetEnabled: convert.BoolPtr(true),
ConfigFormat: &currency.PairFormat{Uppercase: true}}
em.Add(exch2)
m := &DataHistoryManager{
jobDB: dataHistoryJobService{},
jobResultDB: dataHistoryJobResultService{},
started: 1,
exchangeManager: em,
tradeLoader: dataHistoryTradeLoader,
candleLoader: dataHistoryCandleLoader,
interval: time.NewTicker(time.Minute),
}
return m
}
// these structs and function implementations are used
// to override database implementations as we are not testing those
// results here. see tests in the database folder
type dataHistoryJobService struct {
datahistoryjob.IDBService
}
type dataHistoryJobResultService struct {
datahistoryjobresult.IDBService
}
var (
jobID = "00a434e2-8502-4d6b-865f-e4243fd8b5a7"
startDate = time.Date(2020, 1, 1, 0, 0, 0, 0, time.Local)
endDate = time.Date(2021, 1, 1, 0, 0, 0, 0, time.Local)
)
func (d dataHistoryJobService) Upsert(_ ...*datahistoryjob.DataHistoryJob) error {
return nil
}
func (d dataHistoryJobService) GetByNickName(nickname string) (*datahistoryjob.DataHistoryJob, error) {
jc := j
jc.Nickname = nickname
return &jc, nil
}
func (d dataHistoryJobService) GetJobsBetween(_, _ time.Time) ([]datahistoryjob.DataHistoryJob, error) {
jc := j
return []datahistoryjob.DataHistoryJob{jc}, nil
}
func (d dataHistoryJobService) GetByID(id string) (*datahistoryjob.DataHistoryJob, error) {
jc := j
jc.ID = id
return &jc, nil
}
func (d dataHistoryJobService) GetAllIncompleteJobsAndResults() ([]datahistoryjob.DataHistoryJob, error) {
jc := j
return []datahistoryjob.DataHistoryJob{jc}, nil
}
func (d dataHistoryJobService) GetJobAndAllResults(nickname string) (*datahistoryjob.DataHistoryJob, error) {
jc := j
jc.Nickname = nickname
return &jc, nil
}
func (d dataHistoryJobResultService) Upsert(_ ...*datahistoryjobresult.DataHistoryJobResult) error {
return nil
}
func (d dataHistoryJobResultService) GetByJobID(_ string) ([]datahistoryjobresult.DataHistoryJobResult, error) {
return nil, nil
}
func (d dataHistoryJobResultService) GetJobResultsBetween(_ string, _, _ time.Time) ([]datahistoryjobresult.DataHistoryJobResult, error) {
return nil, nil
}
var j = datahistoryjob.DataHistoryJob{
ID: jobID,
Nickname: "datahistoryjob",
ExchangeName: testExchange,
Asset: "spot",
Base: "btc",
Quote: "usd",
StartDate: startDate,
EndDate: endDate,
Interval: int64(kline.OneHour.Duration()),
RequestSizeLimit: 3,
MaxRetryAttempts: 3,
BatchSize: 3,
CreatedDate: endDate,
Status: 0,
Results: []*datahistoryjobresult.DataHistoryJobResult{
{
ID: jobID,
JobID: jobID,
},
},
}
func dataHistoryTradeLoader(_, _, _, _ string, irh *kline.IntervalRangeHolder) error {
for i := range irh.Ranges {
for j := range irh.Ranges[i].Intervals {
irh.Ranges[i].Intervals[j].HasData = true
}
}
return nil
}
func dataHistoryCandleLoader(string, currency.Pair, asset.Item, kline.Interval, time.Time, time.Time) (kline.Item, error) {
return kline.Item{}, nil
}

View File

@@ -0,0 +1,163 @@
package engine
import (
"errors"
"sync"
"time"
"github.com/gofrs/uuid"
"github.com/thrasher-corp/gocryptotrader/currency"
"github.com/thrasher-corp/gocryptotrader/database"
"github.com/thrasher-corp/gocryptotrader/database/repository/datahistoryjob"
"github.com/thrasher-corp/gocryptotrader/database/repository/datahistoryjobresult"
"github.com/thrasher-corp/gocryptotrader/exchanges/asset"
"github.com/thrasher-corp/gocryptotrader/exchanges/kline"
)
const dataHistoryManagerName = "data_history_manager"
type dataHistoryStatus int64
type dataHistoryDataType int64
// Data type descriptors
const (
dataHistoryCandleDataType dataHistoryDataType = iota
dataHistoryTradeDataType
)
// DataHistoryJob status descriptors
const (
dataHistoryStatusActive dataHistoryStatus = iota
dataHistoryStatusFailed
dataHistoryStatusComplete
dataHistoryStatusRemoved
dataHistoryIntervalMissingData
)
// String stringifies iotas to readable
func (d dataHistoryStatus) String() string {
switch {
case int64(d) == 0:
return "active"
case int64(d) == 1:
return "failed"
case int64(d) == 2:
return "complete"
case int64(d) == 3:
return "removed"
case int64(d) == 4:
return "missing data"
}
return ""
}
// Valid ensures the value set is legitimate
func (d dataHistoryStatus) Valid() bool {
return int64(d) >= 0 && int64(d) <= 4
}
// String stringifies iotas to readable
func (d dataHistoryDataType) String() string {
switch {
case int64(d) == 0:
return "candles"
case int64(d) == 1:
return "trades"
}
return ""
}
// Valid ensures the value set is legitimate
func (d dataHistoryDataType) Valid() bool {
return int64(d) == 0 || int64(d) == 1
}
var (
errJobNotFound = errors.New("job not found")
errUnknownDataType = errors.New("job has invalid datatype set and cannot be processed")
errNilJob = errors.New("nil job received")
errNicknameIDUnset = errors.New("must set 'id' OR 'nickname'")
errEmptyID = errors.New("id not set")
errOnlyNicknameOrID = errors.New("can only set 'id' OR 'nickname'")
errNicknameInUse = errors.New("cannot continue as nickname already in use")
errNicknameUnset = errors.New("cannot continue as nickname unset")
errJobInvalid = errors.New("job has not been setup properly and cannot be processed")
errInvalidDataHistoryStatus = errors.New("unsupported data history status received")
errInvalidDataHistoryDataType = errors.New("unsupported data history data type received")
errCanOnlyDeleteActiveJobs = errors.New("can only delete active jobs")
// defaultDataHistoryTradeInterval is the default interval size used to verify whether there is any database data
// for a trade job
defaultDataHistoryTradeInterval = kline.FifteenMin
defaultDataHistoryMaxJobsPerCycle int64 = 5
defaultDataHistoryBatchLimit int64 = 3
defaultDataHistoryRetryAttempts int64 = 3
defaultDataHistoryRequestSizeLimit int64 = 10
defaultDataHistoryTicker = time.Minute
)
// DataHistoryManager is responsible for synchronising,
// retrieving and saving candle and trade data from loaded jobs
type DataHistoryManager struct {
exchangeManager iExchangeManager
databaseConnectionInstance database.IDatabase
started int32
processing int32
shutdown chan struct{}
interval *time.Ticker
jobs []*DataHistoryJob
m sync.Mutex
jobDB datahistoryjob.IDBService
jobResultDB datahistoryjobresult.IDBService
maxJobsPerCycle int64
verbose bool
tradeLoader func(string, string, string, string, *kline.IntervalRangeHolder) error
candleLoader func(string, currency.Pair, asset.Item, kline.Interval, time.Time, time.Time) (kline.Item, error)
}
// DataHistoryJob used to gather candle/trade history and save
// to the database
type DataHistoryJob struct {
ID uuid.UUID
Nickname string
Exchange string
Asset asset.Item
Pair currency.Pair
StartDate time.Time
EndDate time.Time
Interval kline.Interval
RunBatchLimit int64
RequestSizeLimit int64
DataType dataHistoryDataType
MaxRetryAttempts int64
Status dataHistoryStatus
CreatedDate time.Time
Results map[time.Time][]DataHistoryJobResult
rangeHolder *kline.IntervalRangeHolder
}
// DataHistoryJobResult contains details on
// the result of a history request
type DataHistoryJobResult struct {
ID uuid.UUID
JobID uuid.UUID
IntervalStartDate time.Time
IntervalEndDate time.Time
Status dataHistoryStatus
Result string
Date time.Time
}
// DataHistoryJobSummary is a human readable summary of the job
// for quickly understanding the status of a given job
type DataHistoryJobSummary struct {
Nickname string
Exchange string
Asset asset.Item
Pair currency.Pair
StartDate time.Time
EndDate time.Time
Interval kline.Interval
Status dataHistoryStatus
DataType dataHistoryDataType
ResultRanges []string
}

View File

@@ -44,6 +44,7 @@ type Engine struct {
gctScriptManager *gctscript.GctScriptManager
websocketRoutineManager *websocketRoutineManager
WithdrawManager *WithdrawManager
dataHistoryManager *DataHistoryManager
Settings Settings
uptime time.Time
ServicesWG sync.WaitGroup
@@ -139,6 +140,8 @@ func loadConfigWithSettings(settings *Settings, flagSet map[string]bool) (*confi
func validateSettings(b *Engine, s *Settings, flagSet map[string]bool) {
b.Settings = *s
b.Settings.EnableDataHistoryManager = (flagSet["datahistorymanager"] && b.Settings.EnableDatabaseManager) || b.Config.DataHistoryManager.Enabled
b.Settings.EnableGCTScriptManager = b.Settings.EnableGCTScriptManager &&
(flagSet["gctscriptmanager"] || b.Config.GCTScript.Enabled)
@@ -205,7 +208,7 @@ func validateSettings(b *Engine, s *Settings, flagSet map[string]bool) {
if b.Settings.GlobalHTTPTimeout <= 0 {
b.Settings.GlobalHTTPTimeout = b.Config.GlobalHTTPTimeout
}
common.HTTPClient = common.NewHTTPClientWithTimeout(b.Settings.GlobalHTTPTimeout)
common.SetHTTPClientWithTimeout(b.Settings.GlobalHTTPTimeout)
if b.Settings.GlobalHTTPUserAgent != "" {
common.HTTPUserAgent = b.Settings.GlobalHTTPUserAgent
@@ -223,6 +226,7 @@ func PrintSettings(s *Settings) {
gctlog.Debugf(gctlog.Global, "\t Enable all pairs: %v", s.EnableAllPairs)
gctlog.Debugf(gctlog.Global, "\t Enable coinmarketcap analaysis: %v", s.EnableCoinmarketcapAnalysis)
gctlog.Debugf(gctlog.Global, "\t Enable portfolio manager: %v", s.EnablePortfolioManager)
gctlog.Debugf(gctlog.Global, "\t Enable data history manager: %v", s.EnableDataHistoryManager)
gctlog.Debugf(gctlog.Global, "\t Portfolio manager sleep delay: %v\n", s.PortfolioManagerDelay)
gctlog.Debugf(gctlog.Global, "\t Enable gPRC: %v", s.EnableGRPC)
gctlog.Debugf(gctlog.Global, "\t Enable gRPC Proxy: %v", s.EnableGRPCProxy)
@@ -423,6 +427,20 @@ func (bot *Engine) Start() error {
}
}
if bot.Settings.EnableDataHistoryManager {
if bot.dataHistoryManager == nil {
bot.dataHistoryManager, err = SetupDataHistoryManager(bot.ExchangeManager, bot.DatabaseManager, &bot.Config.DataHistoryManager)
if err != nil {
gctlog.Errorf(gctlog.Global, "database history manager unable to setup: %s", err)
} else {
err = bot.dataHistoryManager.Start()
if err != nil {
gctlog.Errorf(gctlog.Global, "database history manager unable to start: %s", err)
}
}
}
}
bot.WithdrawManager, err = SetupWithdrawManager(bot.ExchangeManager, bot.portfolioManager, bot.Settings.EnableDryRun)
if err != nil {
return err
@@ -610,6 +628,12 @@ func (bot *Engine) Stop() {
}
}
if bot.dataHistoryManager.IsRunning() {
if err := bot.dataHistoryManager.Stop(); err != nil {
gctlog.Errorf(gctlog.DataHistory, "data history manager unable to stop. Error: %v", err)
}
}
if bot.DatabaseManager.IsRunning() {
if err := bot.DatabaseManager.Stop(); err != nil {
gctlog.Errorf(gctlog.Global, "Database manager unable to stop. Error: %v", err)

View File

@@ -76,14 +76,31 @@ func TestLoadConfigWithSettings(t *testing.T) {
func TestStartStopDoesNotCausePanic(t *testing.T) {
t.Parallel()
tempDir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("Problem creating temp dir at %s: %s\n", tempDir, err)
}
defer func() {
err = os.RemoveAll(tempDir)
if err != nil {
t.Error(err)
}
}()
botOne, err := NewFromSettings(&Settings{
ConfigFile: config.TestFile,
EnableDryRun: true,
DataDir: tempDir,
}, nil)
if err != nil {
t.Error(err)
}
botOne.Settings.EnableGRPCProxy = false
for i := range botOne.Config.Exchanges {
if botOne.Config.Exchanges[i].Name != testExchange {
// there is no need to load all exchanges for this test
botOne.Config.Exchanges[i].Enabled = false
}
}
if err = botOne.Start(); err != nil {
t.Error(err)
}

View File

@@ -20,6 +20,7 @@ type Settings struct {
EnableAllPairs bool
EnableCoinmarketcapAnalysis bool
EnablePortfolioManager bool
EnableDataHistoryManager bool
PortfolioManagerDelay time.Duration
EnableGRPC bool
EnableGRPCProxy bool

View File

@@ -1,4 +1,4 @@
# GoCryptoTrader package Event_manager
# GoCryptoTrader package Event manager
<img src="/common/gctlogo.png?raw=true" width="350px" height="350px" hspace="70">
@@ -18,7 +18,7 @@ You can track ideas, planned features and what's in progress on this Trello boar
Join our slack to discuss all things related to GoCryptoTrader! [GoCryptoTrader Slack](https://join.slack.com/t/gocryptotrader/shared_invite/enQtNTQ5NDAxMjA2Mjc5LTc5ZDE1ZTNiOGM3ZGMyMmY1NTAxYWZhODE0MWM5N2JlZDk1NDU0YTViYzk4NTk3OTRiMDQzNGQ1YTc4YmRlMTk)
## Current Features for Event_manager
## Current Features for Event manager
+ The event manager subsystem is used to push events to communication systems such as Slack
+ The only configurable aspects of the event manager are the delays between receiving an event and pushing it and enabling verbose:

View File

@@ -1,4 +1,4 @@
# GoCryptoTrader package Exchange_manager
# GoCryptoTrader package Exchange manager
<img src="/common/gctlogo.png?raw=true" width="350px" height="350px" hspace="70">
@@ -18,7 +18,7 @@ You can track ideas, planned features and what's in progress on this Trello boar
Join our slack to discuss all things related to GoCryptoTrader! [GoCryptoTrader Slack](https://join.slack.com/t/gocryptotrader/shared_invite/enQtNTQ5NDAxMjA2Mjc5LTc5ZDE1ZTNiOGM3ZGMyMmY1NTAxYWZhODE0MWM5N2JlZDk1NDU0YTViYzk4NTk3OTRiMDQzNGQ1YTc4YmRlMTk)
## Current Features for Exchange_manager
## Current Features for Exchange manager
+ The exchange manager subsystem is used load and store exchanges so that the engine Bot can use them to track orderbooks, submit orders etc etc
+ The exchange manager itself is not customisable, it is always enabled.
+ The exchange manager by default will load all exchanges that are enabled in your config, however, it will also load exchanges by request via GRPC commands

View File

@@ -55,6 +55,7 @@ func (bot *Engine) GetSubsystemsStatus() map[string]bool {
systems[DeprecatedName] = bot.Settings.EnableDeprecatedRPC
systems[WebsocketName] = bot.Settings.EnableWebsocketRPC
systems[dispatch.Name] = dispatch.IsRunning()
systems[dataHistoryManagerName] = bot.dataHistoryManager.IsRunning()
return systems
}
@@ -226,7 +227,17 @@ func (bot *Engine) SetSubsystem(subSystemName string, enable bool) error {
return bot.apiServer.StopWebsocketServer()
case grpcName, grpcProxyName:
return errors.New("cannot manage GRPC subsystem via GRPC. Please manually change your config")
case dataHistoryManagerName:
if enable {
if bot.dataHistoryManager == nil {
bot.dataHistoryManager, err = SetupDataHistoryManager(bot.ExchangeManager, bot.DatabaseManager, &bot.Config.DataHistoryManager)
if err != nil {
return err
}
}
return bot.dataHistoryManager.Start()
}
return bot.dataHistoryManager.Stop()
case vm.Name:
if enable {
if bot.gctScriptManager == nil {

View File

@@ -121,20 +121,10 @@ func TestGetAuthAPISupportedExchanges(t *testing.T) {
}
exch := e.ExchangeManager.GetExchangeByName(testExchange)
cfg, err := exch.GetDefaultConfig()
if err != nil {
t.Error(err)
}
cfg.Enabled = true
cfg.API.AuthenticatedSupport = true
cfg.API.AuthenticatedWebsocketSupport = true
cfg.API.Credentials.Key = "test"
cfg.API.Credentials.Secret = "test"
cfg.WebsocketTrafficTimeout = time.Minute
err = exch.Setup(cfg)
if err != nil {
t.Error(err)
}
b := exch.GetBase()
b.API.AuthenticatedWebsocketSupport = true
b.API.Credentials.Key = "test"
b.API.Credentials.Secret = "test"
if result := e.GetAuthAPISupportedExchanges(); len(result) != 1 {
t.Fatal("Unexpected result", result)
}

View File

@@ -123,7 +123,7 @@ func (m *ntpManager) FetchNTPTime() (time.Time, error) {
if atomic.LoadInt32(&m.started) == 0 {
return time.Time{}, fmt.Errorf("NTP manager %w", ErrSubSystemNotStarted)
}
return checkTimeInPools(m.pools), nil
return m.checkTimeInPools(), nil
}
// processTime determines the difference between system time and NTP time
@@ -154,11 +154,11 @@ func (m *ntpManager) processTime() error {
// checkTimeInPools returns local based on ntp servers provided timestamp
// if no server can be reached will return local time in UTC()
func checkTimeInPools(pool []string) time.Time {
for i := range pool {
con, err := net.DialTimeout("udp", pool[i], 5*time.Second)
func (m *ntpManager) checkTimeInPools() time.Time {
for i := range m.pools {
con, err := net.DialTimeout("udp", m.pools[i], 5*time.Second)
if err != nil {
log.Warnf(log.TimeMgr, "Unable to connect to hosts %v attempting next", pool[i])
log.Warnf(log.TimeMgr, "Unable to connect to hosts %v attempting next", m.pools[i])
continue
}

View File

@@ -1,4 +1,4 @@
# GoCryptoTrader package Ntp_manager
# GoCryptoTrader package Ntp manager
<img src="/common/gctlogo.png?raw=true" width="350px" height="350px" hspace="70">
@@ -18,7 +18,7 @@ You can track ideas, planned features and what's in progress on this Trello boar
Join our slack to discuss all things related to GoCryptoTrader! [GoCryptoTrader Slack](https://join.slack.com/t/gocryptotrader/shared_invite/enQtNTQ5NDAxMjA2Mjc5LTc5ZDE1ZTNiOGM3ZGMyMmY1NTAxYWZhODE0MWM5N2JlZDk1NDU0YTViYzk4NTk3OTRiMDQzNGQ1YTc4YmRlMTk)
## Current Features for Ntp_manager
## Current Features for Ntp manager
+ The NTP manager subsystem is used highlight discrepancies between your system time and specified NTP server times
+ It is useful for debugging and understanding why a request to an exchange may be rejected
+ The NTP manager cannot update your system clock, so when it does alert you of issues, you must take it upon yourself to change your system time in the event your requests are being rejected for being too far out of sync

View File

@@ -1,4 +1,4 @@
# GoCryptoTrader package Order_manager
# GoCryptoTrader package Order manager
<img src="/common/gctlogo.png?raw=true" width="350px" height="350px" hspace="70">
@@ -18,7 +18,7 @@ You can track ideas, planned features and what's in progress on this Trello boar
Join our slack to discuss all things related to GoCryptoTrader! [GoCryptoTrader Slack](https://join.slack.com/t/gocryptotrader/shared_invite/enQtNTQ5NDAxMjA2Mjc5LTc5ZDE1ZTNiOGM3ZGMyMmY1NTAxYWZhODE0MWM5N2JlZDk1NDU0YTViYzk4NTk3OTRiMDQzNGQ1YTc4YmRlMTk)
## Current Features for Order_manager
## Current Features for Order manager
+ The order manager subsystem stores and monitors all orders from enabled exchanges with API keys and `authenticatedSupport` enabled
+ It can be enabled or disabled via runtime command `-ordermanager=false` and defaults to true
+ All orders placed via GoCryptoTrader will be added to the order manager store

View File

@@ -30,6 +30,7 @@ type portfolioManager struct {
exchangeManager *ExchangeManager
shutdown chan struct{}
base *portfolio.Base
m sync.Mutex
}
// setupPortfolioManager creates a new portfolio manager
@@ -122,6 +123,8 @@ func (m *portfolioManager) processPortfolio() {
if !atomic.CompareAndSwapInt32(&m.processing, 0, 1) {
return
}
m.m.Lock()
defer m.m.Unlock()
data := m.base.GetPortfolioGroupedCoin()
for key, value := range data {
err := m.base.UpdatePortfolio(value, key)
@@ -270,6 +273,8 @@ func (m *portfolioManager) AddAddress(address, description string, coinType curr
if !m.IsRunning() {
return fmt.Errorf("portfolio manager %w", ErrSubSystemNotStarted)
}
m.m.Lock()
defer m.m.Unlock()
return m.base.AddAddress(address, description, coinType, balance)
}
@@ -281,6 +286,8 @@ func (m *portfolioManager) RemoveAddress(address, description string, coinType c
if !m.IsRunning() {
return fmt.Errorf("portfolio manager %w", ErrSubSystemNotStarted)
}
m.m.Lock()
defer m.m.Unlock()
return m.base.RemoveAddress(address, description, coinType)
}

View File

@@ -1,4 +1,4 @@
# GoCryptoTrader package Portfolio_manager
# GoCryptoTrader package Portfolio manager
<img src="/common/gctlogo.png?raw=true" width="350px" height="350px" hspace="70">
@@ -18,7 +18,7 @@ You can track ideas, planned features and what's in progress on this Trello boar
Join our slack to discuss all things related to GoCryptoTrader! [GoCryptoTrader Slack](https://join.slack.com/t/gocryptotrader/shared_invite/enQtNTQ5NDAxMjA2Mjc5LTc5ZDE1ZTNiOGM3ZGMyMmY1NTAxYWZhODE0MWM5N2JlZDk1NDU0YTViYzk4NTk3OTRiMDQzNGQ1YTc4YmRlMTk)
## Current Features for Portfolio_manager
## Current Features for Portfolio manager
+ The portfolio manager subsystem is used to synchronise and monitor wallet addresses
+ It can read addresses specified in your config file
+ If you have set API keys for an enabled exchange and enabled `authenticatedSupport`, it will store your exchange addresses

View File

@@ -63,6 +63,7 @@ var (
errCurrencyNotEnabled = errors.New("currency not enabled")
errCurrencyPairInvalid = errors.New("currency provided is not found in the available pairs list")
errNoTrades = errors.New("no trades returned from supplied params")
errNilRequestData = errors.New("nil request data received, cannot continue")
)
// RPCServer struct
@@ -873,8 +874,9 @@ func (s *RPCServer) GetOrders(_ context.Context, r *gctrpc.GetOrdersRequest) (*g
return nil, err
}
}
if !start.IsZero() && !end.IsZero() && start.After(end) {
return nil, errInvalidTimes
err = common.StartEndTimeCheck(start, end)
if err != nil {
return nil, err
}
request := &order.GetOrdersRequest{
@@ -1513,17 +1515,20 @@ func (s *RPCServer) WithdrawalEventsByExchange(_ context.Context, r *gctrpc.With
// WithdrawalEventsByDate returns previous withdrawal request details by exchange
func (s *RPCServer) WithdrawalEventsByDate(_ context.Context, r *gctrpc.WithdrawalEventsByDateRequest) (*gctrpc.WithdrawalEventsByExchangeResponse, error) {
UTCStartTime, err := time.Parse(common.SimpleTimeFormat, r.Start)
start, err := time.Parse(common.SimpleTimeFormat, r.Start)
if err != nil {
return nil, err
return nil, fmt.Errorf("%w cannot parse start time %v", errInvalidTimes, err)
}
var UTCEndTime time.Time
UTCEndTime, err = time.Parse(common.SimpleTimeFormat, r.End)
end, err := time.Parse(common.SimpleTimeFormat, r.End)
if err != nil {
return nil, fmt.Errorf("%w cannot parse end time %v", errInvalidTimes, err)
}
err = common.StartEndTimeCheck(start, end)
if err != nil {
return nil, err
}
var ret []*withdraw.Response
ret, err = s.WithdrawManager.WithdrawEventByDate(r.Exchange, UTCStartTime, UTCEndTime, int(r.Limit))
ret, err = s.WithdrawManager.WithdrawEventByDate(r.Exchange, start, end, int(r.Limit))
if err != nil {
return nil, err
}
@@ -1894,16 +1899,19 @@ func (s *RPCServer) GetExchangeTickerStream(r *gctrpc.GetExchangeTickerStreamReq
// GetAuditEvent returns matching audit events from database
func (s *RPCServer) GetAuditEvent(_ context.Context, r *gctrpc.GetAuditEventRequest) (*gctrpc.GetAuditEventResponse, error) {
UTCStartTime, err := time.Parse(common.SimpleTimeFormat, r.StartDate)
start, err := time.Parse(common.SimpleTimeFormat, r.StartDate)
if err != nil {
return nil, fmt.Errorf("%w cannot parse start time %v", errInvalidTimes, err)
}
end, err := time.Parse(common.SimpleTimeFormat, r.EndDate)
if err != nil {
return nil, fmt.Errorf("%w cannot parse end time %v", errInvalidTimes, err)
}
err = common.StartEndTimeCheck(start, end)
if err != nil {
return nil, err
}
UTCEndTime, err := time.Parse(common.SimpleTimeFormat, r.EndDate)
if err != nil {
return nil, err
}
events, err := audit.GetEvent(UTCStartTime, UTCEndTime, r.OrderBy, int(r.Limit))
events, err := audit.GetEvent(start, end, r.OrderBy, int(r.Limit))
if err != nil {
return nil, err
}
@@ -1939,19 +1947,18 @@ func (s *RPCServer) GetAuditEvent(_ context.Context, r *gctrpc.GetAuditEventRequ
// GetHistoricCandles returns historical candles for a given exchange
func (s *RPCServer) GetHistoricCandles(_ context.Context, r *gctrpc.GetHistoricCandlesRequest) (*gctrpc.GetHistoricCandlesResponse, error) {
UTCStartTime, err := time.Parse(common.SimpleTimeFormat, r.Start)
start, err := time.Parse(common.SimpleTimeFormat, r.Start)
if err != nil {
return nil, fmt.Errorf("%w cannot parse start time %v", errInvalidTimes, err)
}
end, err := time.Parse(common.SimpleTimeFormat, r.End)
if err != nil {
return nil, fmt.Errorf("%w cannot parse end time %v", errInvalidTimes, err)
}
err = common.StartEndTimeCheck(start, end)
if err != nil {
return nil, err
}
var UTCEndTime time.Time
UTCEndTime, err = time.Parse(common.SimpleTimeFormat, r.End)
if err != nil {
return nil, err
}
if UTCStartTime.After(UTCEndTime) || UTCStartTime.Equal(UTCEndTime) {
return nil, errInvalidTimes
}
if r.Pair == nil {
return nil, errCurrencyPairUnset
}
@@ -1988,8 +1995,8 @@ func (s *RPCServer) GetHistoricCandles(_ context.Context, r *gctrpc.GetHistoricC
pair,
a,
interval,
UTCStartTime,
UTCEndTime)
start,
end)
if err != nil {
return nil, err
}
@@ -1997,14 +2004,14 @@ func (s *RPCServer) GetHistoricCandles(_ context.Context, r *gctrpc.GetHistoricC
if r.ExRequest {
klineItem, err = exch.GetHistoricCandlesExtended(pair,
a,
UTCStartTime,
UTCEndTime,
start,
end,
interval)
} else {
klineItem, err = exch.GetHistoricCandles(pair,
a,
UTCStartTime,
UTCEndTime,
start,
end,
interval)
}
}
@@ -2015,7 +2022,7 @@ func (s *RPCServer) GetHistoricCandles(_ context.Context, r *gctrpc.GetHistoricC
if r.FillMissingWithTrades {
var tradeDataKline *kline.Item
tradeDataKline, err = fillMissingCandlesWithStoredTrades(UTCStartTime, UTCEndTime, &klineItem)
tradeDataKline, err = fillMissingCandlesWithStoredTrades(start, end, &klineItem)
if err != nil {
return nil, err
}
@@ -2680,17 +2687,20 @@ func (s *RPCServer) GetSavedTrades(_ context.Context, r *gctrpc.GetSavedTradesRe
return nil, err
}
var UTCStartTime, UTCEndTime time.Time
UTCStartTime, err = time.Parse(common.SimpleTimeFormat, r.Start)
start, err := time.Parse(common.SimpleTimeFormat, r.Start)
if err != nil {
return nil, err
return nil, fmt.Errorf("%w cannot parse start time %v", errInvalidTimes, err)
}
UTCEndTime, err = time.Parse(common.SimpleTimeFormat, r.End)
end, err := time.Parse(common.SimpleTimeFormat, r.End)
if err != nil {
return nil, fmt.Errorf("%w cannot parse end time %v", errInvalidTimes, err)
}
err = common.StartEndTimeCheck(start, end)
if err != nil {
return nil, err
}
var trades []trade.Data
trades, err = trade.GetTradesInRange(r.Exchange, r.AssetType, r.Pair.Base, r.Pair.Quote, UTCStartTime, UTCEndTime)
trades, err = trade.GetTradesInRange(r.Exchange, r.AssetType, r.Pair.Base, r.Pair.Quote, start, end)
if err != nil {
return nil, err
}
@@ -2720,12 +2730,15 @@ func (s *RPCServer) ConvertTradesToCandles(_ context.Context, r *gctrpc.ConvertT
if r.End == "" || r.Start == "" || r.Exchange == "" || r.Pair == nil || r.AssetType == "" || r.Pair.String() == "" || r.TimeInterval == 0 {
return nil, errInvalidArguments
}
UTCStartTime, err := time.Parse(common.SimpleTimeFormat, r.Start)
start, err := time.Parse(common.SimpleTimeFormat, r.Start)
if err != nil {
return nil, err
return nil, fmt.Errorf("%w cannot parse start time %v", errInvalidTimes, err)
}
var UTCEndTime time.Time
UTCEndTime, err = time.Parse(common.SimpleTimeFormat, r.End)
end, err := time.Parse(common.SimpleTimeFormat, r.End)
if err != nil {
return nil, fmt.Errorf("%w cannot parse end time %v", errInvalidTimes, err)
}
err = common.StartEndTimeCheck(start, end)
if err != nil {
return nil, err
}
@@ -2747,7 +2760,7 @@ func (s *RPCServer) ConvertTradesToCandles(_ context.Context, r *gctrpc.ConvertT
}
var trades []trade.Data
trades, err = trade.GetTradesInRange(r.Exchange, r.AssetType, r.Pair.Base, r.Pair.Quote, UTCStartTime, UTCEndTime)
trades, err = trade.GetTradesInRange(r.Exchange, r.AssetType, r.Pair.Base, r.Pair.Quote, start, end)
if err != nil {
return nil, err
}
@@ -2814,12 +2827,15 @@ func (s *RPCServer) FindMissingSavedCandleIntervals(_ context.Context, r *gctrpc
return nil, err
}
var UTCStartTime, UTCEndTime time.Time
UTCStartTime, err = time.Parse(common.SimpleTimeFormat, r.Start)
start, err := time.Parse(common.SimpleTimeFormat, r.Start)
if err != nil {
return nil, err
return nil, fmt.Errorf("%w cannot parse start time %v", errInvalidTimes, err)
}
UTCEndTime, err = time.Parse(common.SimpleTimeFormat, r.End)
end, err := time.Parse(common.SimpleTimeFormat, r.End)
if err != nil {
return nil, fmt.Errorf("%w cannot parse end time %v", errInvalidTimes, err)
}
err = common.StartEndTimeCheck(start, end)
if err != nil {
return nil, err
}
@@ -2828,8 +2844,8 @@ func (s *RPCServer) FindMissingSavedCandleIntervals(_ context.Context, r *gctrpc
p,
a,
kline.Interval(r.Interval),
UTCStartTime,
UTCEndTime,
start,
end,
)
if err != nil {
return nil, err
@@ -2845,7 +2861,7 @@ func (s *RPCServer) FindMissingSavedCandleIntervals(_ context.Context, r *gctrpc
candleTimes = append(candleTimes, klineItem.Candles[i].Time)
}
var ranges []timeperiods.TimeRange
ranges, err = timeperiods.FindTimeRangesContainingData(UTCStartTime, UTCEndTime, klineItem.Interval.Duration(), candleTimes)
ranges, err = timeperiods.FindTimeRangesContainingData(start, end, klineItem.Interval.Duration(), candleTimes)
if err != nil {
return nil, err
}
@@ -2870,8 +2886,8 @@ func (s *RPCServer) FindMissingSavedCandleIntervals(_ context.Context, r *gctrpc
resp.Status = fmt.Sprintf("Found %v candles. Missing %v candles in requested timeframe starting %v ending %v",
foundCount,
len(resp.MissingPeriods),
UTCStartTime.In(time.UTC).Format(common.SimpleTimeFormatWithTimezone),
UTCEndTime.In(time.UTC).Format(common.SimpleTimeFormatWithTimezone))
start.In(time.UTC).Format(common.SimpleTimeFormatWithTimezone),
end.In(time.UTC).Format(common.SimpleTimeFormatWithTimezone))
}
return resp, nil
@@ -2898,22 +2914,24 @@ func (s *RPCServer) FindMissingSavedTradeIntervals(_ context.Context, r *gctrpc.
if err != nil {
return nil, err
}
var UTCStartTime, UTCEndTime time.Time
UTCStartTime, err = time.Parse(common.SimpleTimeFormat, r.Start)
start, err := time.Parse(common.SimpleTimeFormat, r.Start)
if err != nil {
return nil, fmt.Errorf("%w cannot parse start time %v", errInvalidTimes, err)
}
end, err := time.Parse(common.SimpleTimeFormat, r.End)
if err != nil {
return nil, fmt.Errorf("%w cannot parse end time %v", errInvalidTimes, err)
}
err = common.StartEndTimeCheck(start, end)
if err != nil {
return nil, err
}
UTCStartTime = UTCStartTime.Truncate(time.Hour)
UTCEndTime, err = time.Parse(common.SimpleTimeFormat, r.End)
if err != nil {
return nil, err
}
UTCEndTime = UTCEndTime.Truncate(time.Hour)
start = start.Truncate(time.Hour)
end = end.Truncate(time.Hour)
intervalMap := make(map[time.Time]bool)
iterationTime := UTCStartTime
for iterationTime.Before(UTCEndTime) {
iterationTime := start
for iterationTime.Before(end) {
intervalMap[iterationTime] = false
iterationTime = iterationTime.Add(time.Hour)
}
@@ -2924,8 +2942,8 @@ func (s *RPCServer) FindMissingSavedTradeIntervals(_ context.Context, r *gctrpc.
r.AssetType,
r.Pair.Base,
r.Pair.Quote,
UTCStartTime,
UTCEndTime,
start,
end,
)
if err != nil {
return nil, err
@@ -2941,7 +2959,7 @@ func (s *RPCServer) FindMissingSavedTradeIntervals(_ context.Context, r *gctrpc.
tradeTimes = append(tradeTimes, trades[i].Timestamp)
}
var ranges []timeperiods.TimeRange
ranges, err = timeperiods.FindTimeRangesContainingData(UTCStartTime, UTCEndTime, time.Hour, tradeTimes)
ranges, err = timeperiods.FindTimeRangesContainingData(start, end, time.Hour, tradeTimes)
if err != nil {
return nil, err
}
@@ -2966,8 +2984,8 @@ func (s *RPCServer) FindMissingSavedTradeIntervals(_ context.Context, r *gctrpc.
resp.Status = fmt.Sprintf("Found %v periods. Missing %v periods between %v and %v",
foundCount,
len(resp.MissingPeriods),
UTCStartTime.In(time.UTC).Format(common.SimpleTimeFormatWithTimezone),
UTCEndTime.In(time.UTC).Format(common.SimpleTimeFormatWithTimezone))
start.In(time.UTC).Format(common.SimpleTimeFormatWithTimezone),
end.In(time.UTC).Format(common.SimpleTimeFormatWithTimezone))
}
return resp, nil
@@ -3009,13 +3027,15 @@ func (s *RPCServer) GetHistoricTrades(r *gctrpc.GetSavedTradesRequest, stream gc
return err
}
var trades []trade.Data
var UTCStartTime, UTCEndTime time.Time
UTCStartTime, err = time.Parse(common.SimpleTimeFormat, r.Start)
start, err := time.Parse(common.SimpleTimeFormat, r.Start)
if err != nil {
return err
return fmt.Errorf("%w cannot parse start time %v", errInvalidTimes, err)
}
UTCEndTime, err = time.Parse(common.SimpleTimeFormat, r.End)
end, err := time.Parse(common.SimpleTimeFormat, r.End)
if err != nil {
return fmt.Errorf("%w cannot parse end time %v", errInvalidTimes, err)
}
err = common.StartEndTimeCheck(start, end)
if err != nil {
return err
}
@@ -3025,7 +3045,7 @@ func (s *RPCServer) GetHistoricTrades(r *gctrpc.GetSavedTradesRequest, stream gc
Pair: r.Pair,
}
for iterateStartTime := UTCStartTime; iterateStartTime.Before(UTCEndTime); iterateStartTime = iterateStartTime.Add(time.Hour) {
for iterateStartTime := start; iterateStartTime.Before(end); iterateStartTime = iterateStartTime.Add(time.Hour) {
iterateEndTime := iterateStartTime.Add(time.Hour)
trades, err = exch.GetHistoricTrades(cp, a, iterateStartTime, iterateEndTime)
if err != nil {
@@ -3041,7 +3061,7 @@ func (s *RPCServer) GetHistoricTrades(r *gctrpc.GetSavedTradesRequest, stream gc
}
for i := range trades {
tradeTS := trades[i].Timestamp.In(time.UTC)
if tradeTS.After(UTCEndTime) {
if tradeTS.After(end) {
break
}
grpcTrades.Trades = append(grpcTrades.Trades, &gctrpc.SavedTrades{
@@ -3282,3 +3302,270 @@ func parseSingleEvents(ret *withdraw.Response) *gctrpc.WithdrawalEventsByExchang
Event: []*gctrpc.WithdrawalEventResponse{tempEvent},
}
}
// UpsertDataHistoryJob adds or updates a data history job for the data history manager
// It will upsert the entry in the database and allow for the processing of the job
func (s *RPCServer) UpsertDataHistoryJob(_ context.Context, r *gctrpc.UpsertDataHistoryJobRequest) (*gctrpc.UpsertDataHistoryJobResponse, error) {
if r == nil {
return nil, errNilRequestData
}
a, err := asset.New(r.Asset)
if err != nil {
return nil, err
}
p := currency.Pair{
Delimiter: r.Pair.Delimiter,
Base: currency.NewCode(r.Pair.Base),
Quote: currency.NewCode(r.Pair.Quote),
}
e := s.GetExchangeByName(r.Exchange)
err = checkParams(r.Exchange, e, a, p)
if err != nil {
return nil, err
}
start, err := time.Parse(common.SimpleTimeFormat, r.StartDate)
if err != nil {
return nil, fmt.Errorf("%w cannot parse start time %v", errInvalidTimes, err)
}
end, err := time.Parse(common.SimpleTimeFormat, r.EndDate)
if err != nil {
return nil, fmt.Errorf("%w cannot parse end time %v", errInvalidTimes, err)
}
err = common.StartEndTimeCheck(start, end)
if err != nil {
return nil, err
}
job := DataHistoryJob{
Nickname: r.Nickname,
Exchange: r.Exchange,
Asset: a,
Pair: p,
StartDate: start,
EndDate: end,
Interval: kline.Interval(r.Interval),
RunBatchLimit: r.BatchSize,
RequestSizeLimit: r.RequestSizeLimit,
DataType: dataHistoryDataType(r.DataType),
Status: dataHistoryStatusActive,
MaxRetryAttempts: r.MaxRetryAttempts,
}
err = s.dataHistoryManager.UpsertJob(&job, r.InsertOnly)
if err != nil {
return nil, err
}
result, err := s.dataHistoryManager.GetByNickname(r.Nickname, false)
if err != nil {
return nil, fmt.Errorf("%s %w", r.Nickname, err)
}
return &gctrpc.UpsertDataHistoryJobResponse{
JobId: result.ID.String(),
Message: "successfully upserted job: " + result.Nickname,
}, nil
}
// GetDataHistoryJobDetails returns a data history job's details
// can request all data history results with r.FullDetails
func (s *RPCServer) GetDataHistoryJobDetails(_ context.Context, r *gctrpc.GetDataHistoryJobDetailsRequest) (*gctrpc.DataHistoryJob, error) {
if r == nil {
return nil, errNilRequestData
}
if r.Id == "" && r.Nickname == "" {
return nil, errNicknameIDUnset
}
if r.Nickname != "" && r.Id != "" {
return nil, errOnlyNicknameOrID
}
var (
result *DataHistoryJob
err error
jobResults []*gctrpc.DataHistoryJobResult
)
if r.Id != "" {
var id uuid.UUID
id, err = uuid.FromString(r.Id)
if err != nil {
return nil, fmt.Errorf("%s %w", r.Id, err)
}
result, err = s.dataHistoryManager.GetByID(id)
if err != nil {
return nil, fmt.Errorf("%s %w", r.Id, err)
}
} else {
result, err = s.dataHistoryManager.GetByNickname(r.Nickname, r.FullDetails)
if err != nil {
return nil, fmt.Errorf("%s %w", r.Nickname, err)
}
if r.FullDetails {
for _, v := range result.Results {
for i := range v {
jobResults = append(jobResults, &gctrpc.DataHistoryJobResult{
StartDate: v[i].IntervalStartDate.Format(common.SimpleTimeFormat),
EndDate: v[i].IntervalEndDate.Format(common.SimpleTimeFormat),
HasData: v[i].Status == dataHistoryStatusComplete,
Message: v[i].Result,
RunDate: v[i].Date.Format(common.SimpleTimeFormat),
})
}
}
}
}
return &gctrpc.DataHistoryJob{
Id: result.ID.String(),
Nickname: result.Nickname,
Exchange: result.Exchange,
Asset: result.Asset.String(),
Pair: &gctrpc.CurrencyPair{
Delimiter: result.Pair.Delimiter,
Base: result.Pair.Base.String(),
Quote: result.Pair.Quote.String(),
},
StartDate: result.StartDate.Format(common.SimpleTimeFormat),
EndDate: result.EndDate.Format(common.SimpleTimeFormat),
Interval: int64(result.Interval.Duration()),
RequestSizeLimit: result.RequestSizeLimit,
DataType: result.DataType.String(),
MaxRetryAttempts: result.MaxRetryAttempts,
BatchSize: result.RunBatchLimit,
JobResults: jobResults,
Status: result.Status.String(),
}, nil
}
// DeleteDataHistoryJob deletes a data history job from the database
func (s *RPCServer) DeleteDataHistoryJob(_ context.Context, r *gctrpc.GetDataHistoryJobDetailsRequest) (*gctrpc.GenericResponse, error) {
if r == nil {
return nil, errNilRequestData
}
if r.Nickname == "" && r.Id == "" {
return nil, errNicknameIDUnset
}
if r.Nickname != "" && r.Id != "" {
return nil, errOnlyNicknameOrID
}
status := "success"
err := s.dataHistoryManager.DeleteJob(r.Nickname, r.Id)
if err != nil {
log.Error(log.GRPCSys, err)
status = "failed"
}
return &gctrpc.GenericResponse{Status: status}, err
}
// GetActiveDataHistoryJobs returns any active data history job details
func (s *RPCServer) GetActiveDataHistoryJobs(_ context.Context, _ *gctrpc.GetInfoRequest) (*gctrpc.DataHistoryJobs, error) {
jobs, err := s.dataHistoryManager.GetActiveJobs()
if err != nil {
return nil, err
}
var response []*gctrpc.DataHistoryJob
for i := range jobs {
response = append(response, &gctrpc.DataHistoryJob{
Id: jobs[i].ID.String(),
Nickname: jobs[i].Nickname,
Exchange: jobs[i].Exchange,
Asset: jobs[i].Asset.String(),
Pair: &gctrpc.CurrencyPair{
Delimiter: jobs[i].Pair.Delimiter,
Base: jobs[i].Pair.Base.String(),
Quote: jobs[i].Pair.Quote.String(),
},
StartDate: jobs[i].StartDate.Format(common.SimpleTimeFormat),
EndDate: jobs[i].EndDate.Format(common.SimpleTimeFormat),
Interval: int64(jobs[i].Interval.Duration()),
RequestSizeLimit: jobs[i].RequestSizeLimit,
DataType: jobs[i].DataType.String(),
MaxRetryAttempts: jobs[i].MaxRetryAttempts,
BatchSize: jobs[i].RunBatchLimit,
Status: jobs[i].Status.String(),
})
}
return &gctrpc.DataHistoryJobs{Results: response}, nil
}
// GetDataHistoryJobsBetween returns all jobs created between supplied dates
func (s *RPCServer) GetDataHistoryJobsBetween(_ context.Context, r *gctrpc.GetDataHistoryJobsBetweenRequest) (*gctrpc.DataHistoryJobs, error) {
if r == nil {
return nil, errNilRequestData
}
start, err := time.Parse(common.SimpleTimeFormat, r.StartDate)
if err != nil {
return nil, fmt.Errorf("%w cannot parse start time %v", errInvalidTimes, err)
}
end, err := time.Parse(common.SimpleTimeFormat, r.EndDate)
if err != nil {
return nil, fmt.Errorf("%w cannot parse end time %v", errInvalidTimes, err)
}
err = common.StartEndTimeCheck(start.Local(), end)
if err != nil {
return nil, err
}
jobs, err := s.dataHistoryManager.GetAllJobStatusBetween(start, end)
if err != nil {
return nil, err
}
var respJobs []*gctrpc.DataHistoryJob
for i := range jobs {
respJobs = append(respJobs, &gctrpc.DataHistoryJob{
Id: jobs[i].ID.String(),
Nickname: jobs[i].Nickname,
Exchange: jobs[i].Exchange,
Asset: jobs[i].Asset.String(),
Pair: &gctrpc.CurrencyPair{
Delimiter: jobs[i].Pair.Delimiter,
Base: jobs[i].Pair.Base.String(),
Quote: jobs[i].Pair.Quote.String(),
},
StartDate: jobs[i].StartDate.Format(common.SimpleTimeFormat),
EndDate: jobs[i].EndDate.Format(common.SimpleTimeFormat),
Interval: int64(jobs[i].Interval.Duration()),
RequestSizeLimit: jobs[i].RequestSizeLimit,
DataType: jobs[i].DataType.String(),
MaxRetryAttempts: jobs[i].MaxRetryAttempts,
BatchSize: jobs[i].RunBatchLimit,
Status: jobs[i].Status.String(),
})
}
return &gctrpc.DataHistoryJobs{
Results: respJobs,
}, nil
}
// GetDataHistoryJobSummary provides a general look at how a data history job is going with the "resultSummaries" property
func (s *RPCServer) GetDataHistoryJobSummary(_ context.Context, r *gctrpc.GetDataHistoryJobDetailsRequest) (*gctrpc.DataHistoryJob, error) {
if r == nil {
return nil, errNilRequestData
}
if r.Nickname == "" {
return nil, fmt.Errorf("get job summary %w", errNicknameUnset)
}
job, err := s.dataHistoryManager.GenerateJobSummary(r.Nickname)
if err != nil {
return nil, err
}
return &gctrpc.DataHistoryJob{
Nickname: job.Nickname,
Exchange: job.Exchange,
Asset: job.Asset.String(),
Pair: &gctrpc.CurrencyPair{
Delimiter: job.Pair.Delimiter,
Base: job.Pair.Base.String(),
Quote: job.Pair.Quote.String(),
},
StartDate: job.StartDate.Format(common.SimpleTimeFormat),
EndDate: job.EndDate.Format(common.SimpleTimeFormat),
Interval: int64(job.Interval.Duration()),
DataType: job.DataType.String(),
Status: job.Status.String(),
ResultSummaries: job.ResultRanges,
}, nil
}

View File

@@ -9,11 +9,14 @@ import (
"path/filepath"
"reflect"
"runtime"
"strings"
"sync"
"testing"
"time"
"github.com/gofrs/uuid"
"github.com/thrasher-corp/gocryptotrader/common"
"github.com/thrasher-corp/gocryptotrader/common/convert"
"github.com/thrasher-corp/gocryptotrader/config"
"github.com/thrasher-corp/gocryptotrader/currency"
"github.com/thrasher-corp/gocryptotrader/database"
@@ -38,7 +41,7 @@ const (
unexpectedLackOfError = "unexpected lack of error"
migrationsFolder = "migrations"
databaseFolder = "database"
databaseName = "rpctestdb"
databaseName = "rpctestdb.db"
)
// fExchange is a fake exchange with function overrides
@@ -82,6 +85,7 @@ func (f fExchange) UpdateAccountInfo(a asset.Item) (account.Holdings, error) {
// Sets up everything required to run any function inside rpcserver
func RPCTestSetup(t *testing.T) *Engine {
t.Helper()
var err error
dbConf := database.Config{
Enabled: true,
@@ -102,6 +106,10 @@ func RPCTestSetup(t *testing.T) *Engine {
if err != nil {
log.Fatal(err)
}
err = engerino.LoadExchange("Binance", false, nil)
if err != nil {
log.Fatal(err)
}
engerino.Config.Database = dbConf
engerino.DatabaseManager, err = SetupDatabaseConnectionManager(&engerino.Config.Database)
if err != nil {
@@ -116,8 +124,15 @@ func RPCTestSetup(t *testing.T) *Engine {
if err != nil {
t.Fatalf("failed to run migrations %v", err)
}
uuider, _ := uuid.NewV4()
err = dbexchange.Insert(dbexchange.Details{Name: testExchange, UUID: uuider})
uuider, err := uuid.NewV4()
if err != nil {
t.Fatal(err)
}
uuider2, err := uuid.NewV4()
if err != nil {
t.Fatal(err)
}
err = dbexchange.InsertMany([]dbexchange.Details{{Name: testExchange, UUID: uuider}, {Name: "Binance", UUID: uuider2}})
if err != nil {
t.Fatalf("failed to insert exchange %v", err)
}
@@ -126,6 +141,7 @@ func RPCTestSetup(t *testing.T) *Engine {
}
func CleanRPCTest(t *testing.T, engerino *Engine) {
t.Helper()
err := engerino.DatabaseManager.Stop()
if err != nil {
t.Error(err)
@@ -388,8 +404,8 @@ func TestGetHistoricCandles(t *testing.T) {
Start: "2020-01-02 15:04:05",
End: "2020-01-02 15:04:05",
})
if !errors.Is(err, errInvalidTimes) {
t.Errorf("expected %v, received %v", errInvalidTimes, err)
if !errors.Is(err, common.ErrStartEqualsEnd) {
t.Errorf("received %v, expected %v", err, common.ErrStartEqualsEnd)
}
var results *gctrpc.GetHistoricCandlesResponse
// default run
@@ -875,11 +891,6 @@ func TestGetOrders(t *testing.T) {
t.Errorf("expected %v, received %v", errExchangeNotLoaded, err)
}
err = engerino.LoadExchange(exchName, false, nil)
if err != nil {
t.Error(err)
}
_, err = s.GetOrders(context.Background(), &gctrpc.GetOrdersRequest{
Exchange: exchName,
AssetType: asset.Spot.String(),
@@ -900,19 +911,19 @@ func TestGetOrders(t *testing.T) {
Exchange: exchName,
AssetType: asset.Spot.String(),
Pair: p,
StartDate: time.Now().Format(common.SimpleTimeFormat),
EndDate: time.Now().Add(-time.Hour).Format(common.SimpleTimeFormat),
StartDate: time.Now().UTC().Add(time.Second).Format(common.SimpleTimeFormat),
EndDate: time.Now().UTC().Add(-time.Hour).Format(common.SimpleTimeFormat),
})
if !errors.Is(err, errInvalidTimes) {
t.Errorf("expected %v, received %v", errInvalidTimes, err)
if !errors.Is(err, common.ErrStartAfterTimeNow) {
t.Errorf("received %v, expected %v", err, common.ErrStartAfterTimeNow)
}
_, err = s.GetOrders(context.Background(), &gctrpc.GetOrdersRequest{
Exchange: exchName,
AssetType: asset.Spot.String(),
Pair: p,
StartDate: time.Now().Format(common.SimpleTimeFormat),
EndDate: time.Now().Add(time.Hour).Format(common.SimpleTimeFormat),
StartDate: time.Now().UTC().Add(-time.Hour).Format(common.SimpleTimeFormat),
EndDate: time.Now().UTC().Add(time.Hour).Format(common.SimpleTimeFormat),
})
if !errors.Is(err, exchange.ErrAuthenticatedRequestWithoutCredentialsSet) {
t.Errorf("received '%v', expected '%v'", err, exchange.ErrAuthenticatedRequestWithoutCredentialsSet)
@@ -938,10 +949,16 @@ func TestGetOrders(t *testing.T) {
}
func TestGetOrder(t *testing.T) {
exchName := "binance"
exchName := "Binance"
engerino := RPCTestSetup(t)
defer CleanRPCTest(t, engerino)
s := RPCServer{Engine: engerino}
var wg sync.WaitGroup
var err error
engerino.OrderManager, err = SetupOrderManager(engerino.ExchangeManager, engerino.CommunicationsManager, &wg, false)
if !errors.Is(err, nil) {
t.Errorf("expected %v, received %v", errInvalidArguments, nil)
}
p := &gctrpc.CurrencyPair{
Delimiter: "-",
@@ -949,27 +966,21 @@ func TestGetOrder(t *testing.T) {
Quote: "USDT",
}
_, err := s.GetOrder(context.Background(), nil)
_, err = s.GetOrder(context.Background(), nil)
if !errors.Is(err, errInvalidArguments) {
t.Errorf("expected %v, received %v", errInvalidArguments, err)
}
_, err = s.GetOrder(context.Background(), &gctrpc.GetOrderRequest{
Exchange: exchName,
Exchange: "test123",
OrderId: "",
Pair: p,
Asset: "spot",
})
if !errors.Is(err, errExchangeNotLoaded) {
t.Errorf("expected %v, received %v", errExchangeNotLoaded, err)
}
err = engerino.LoadExchange(exchName, false, nil)
if err != nil {
t.Error(err)
}
_, err = s.GetOrder(context.Background(), &gctrpc.GetOrderRequest{
Exchange: exchName,
OrderId: "",
@@ -1178,3 +1189,279 @@ func TestParseEvents(t *testing.T) {
t.Fatal("Expected second entry in slice to return a Request.Type of Crypto")
}
}
func TestRPCServerUpsertDataHistoryJob(t *testing.T) {
t.Parallel()
m := createDHM(t)
em := SetupExchangeManager()
exch, err := em.NewExchangeByName(testExchange)
if err != nil {
t.Fatal(err)
}
exch.SetDefaults()
b := exch.GetBase()
cp := currency.NewPair(currency.BTC, currency.USD)
b.CurrencyPairs.Pairs = make(map[asset.Item]*currency.PairStore)
b.CurrencyPairs.Pairs[asset.Spot] = &currency.PairStore{
Available: currency.Pairs{cp},
Enabled: currency.Pairs{cp},
AssetEnabled: convert.BoolPtr(true)}
em.Add(exch)
s := RPCServer{Engine: &Engine{dataHistoryManager: m, ExchangeManager: em}}
_, err = s.UpsertDataHistoryJob(context.Background(), nil)
if !errors.Is(err, errNilRequestData) {
t.Errorf("received %v, expected %v", err, errNilRequestData)
}
_, err = s.UpsertDataHistoryJob(context.Background(), &gctrpc.UpsertDataHistoryJobRequest{})
if !errors.Is(err, asset.ErrNotSupported) {
t.Errorf("received %v, expected %v", err, asset.ErrNotSupported)
}
job := &gctrpc.UpsertDataHistoryJobRequest{
Nickname: "hellomoto",
Exchange: testExchange,
Asset: asset.Spot.String(),
Pair: &gctrpc.CurrencyPair{
Delimiter: "-",
Base: "BTC",
Quote: "USD",
},
StartDate: time.Now().Add(-time.Hour * 24).Format(common.SimpleTimeFormat),
EndDate: time.Now().Format(common.SimpleTimeFormat),
Interval: int64(kline.OneHour.Duration()),
RequestSizeLimit: 10,
DataType: int64(dataHistoryCandleDataType),
MaxRetryAttempts: 3,
BatchSize: 500,
}
_, err = s.UpsertDataHistoryJob(context.Background(), job)
if !errors.Is(err, nil) {
t.Errorf("received %v, expected %v", err, nil)
}
}
func TestGetDataHistoryJobDetails(t *testing.T) {
t.Parallel()
m := createDHM(t)
s := RPCServer{Engine: &Engine{dataHistoryManager: m}}
dhj := &DataHistoryJob{
Nickname: "TestGetDataHistoryJobDetails",
Exchange: testExchange,
Asset: asset.Spot,
Pair: currency.NewPair(currency.BTC, currency.USD),
StartDate: time.Now().UTC().Add(-time.Minute * 2),
EndDate: time.Now().UTC(),
Interval: kline.OneMin,
}
err := m.UpsertJob(dhj, false)
if !errors.Is(err, nil) {
t.Errorf("received %v, expected %v", err, nil)
}
_, err = s.GetDataHistoryJobDetails(context.Background(), nil)
if !errors.Is(err, errNilRequestData) {
t.Errorf("received %v, expected %v", err, errNilRequestData)
}
_, err = s.GetDataHistoryJobDetails(context.Background(), &gctrpc.GetDataHistoryJobDetailsRequest{})
if !errors.Is(err, errNicknameIDUnset) {
t.Errorf("received %v, expected %v", err, errNicknameIDUnset)
}
_, err = s.GetDataHistoryJobDetails(context.Background(), &gctrpc.GetDataHistoryJobDetailsRequest{Id: "123", Nickname: "123"})
if !errors.Is(err, errOnlyNicknameOrID) {
t.Errorf("received %v, expected %v", err, errOnlyNicknameOrID)
}
_, err = s.GetDataHistoryJobDetails(context.Background(), &gctrpc.GetDataHistoryJobDetailsRequest{Nickname: "TestGetDataHistoryJobDetails"})
if !errors.Is(err, nil) {
t.Errorf("received %v, expected %v", err, nil)
}
_, err = s.GetDataHistoryJobDetails(context.Background(), &gctrpc.GetDataHistoryJobDetailsRequest{Id: m.jobs[0].ID.String()})
if !errors.Is(err, nil) {
t.Errorf("received %v, expected %v", err, nil)
}
resp, err := s.GetDataHistoryJobDetails(context.Background(), &gctrpc.GetDataHistoryJobDetailsRequest{Nickname: "TestGetDataHistoryJobDetails", FullDetails: true})
if !errors.Is(err, nil) {
t.Errorf("received %v, expected %v", err, nil)
}
if resp == nil {
t.Fatal("expected job")
}
if !strings.EqualFold(resp.Nickname, "TestGetDataHistoryJobDetails") {
t.Errorf("received %v, expected %v", "TestGetDataHistoryJobDetails", resp.Nickname)
}
}
func TestDeleteDataHistoryJob(t *testing.T) {
t.Parallel()
m := createDHM(t)
s := RPCServer{Engine: &Engine{dataHistoryManager: m}}
dhj := &DataHistoryJob{
Nickname: "TestDeleteDataHistoryJob",
Exchange: testExchange,
Asset: asset.Spot,
Pair: currency.NewPair(currency.BTC, currency.USD),
StartDate: time.Now().UTC().Add(-time.Minute * 2),
EndDate: time.Now().UTC(),
Interval: kline.OneMin,
}
err := m.UpsertJob(dhj, false)
if !errors.Is(err, nil) {
t.Fatalf("received %v, expected %v", err, nil)
}
_, err = s.DeleteDataHistoryJob(context.Background(), nil)
if !errors.Is(err, errNilRequestData) {
t.Errorf("received %v, expected %v", err, errNilRequestData)
}
_, err = s.DeleteDataHistoryJob(context.Background(), &gctrpc.GetDataHistoryJobDetailsRequest{})
if !errors.Is(err, errNicknameIDUnset) {
t.Errorf("received %v, expected %v", err, errNicknameIDUnset)
}
_, err = s.DeleteDataHistoryJob(context.Background(), &gctrpc.GetDataHistoryJobDetailsRequest{Id: "123", Nickname: "123"})
if !errors.Is(err, errOnlyNicknameOrID) {
t.Errorf("received %v, expected %v", err, errOnlyNicknameOrID)
}
id := m.jobs[0].ID
_, err = s.DeleteDataHistoryJob(context.Background(), &gctrpc.GetDataHistoryJobDetailsRequest{Nickname: "TestDeleteDataHistoryJob"})
if !errors.Is(err, nil) {
t.Errorf("received %v, expected %v", err, nil)
}
dhj.ID = id
m.jobs = append(m.jobs, dhj)
_, err = s.DeleteDataHistoryJob(context.Background(), &gctrpc.GetDataHistoryJobDetailsRequest{Id: id.String()})
if !errors.Is(err, nil) {
t.Errorf("received %v, expected %v", err, nil)
}
if len(m.jobs) != 0 {
t.Errorf("received %v, expected %v", len(m.jobs), 0)
}
}
func TestGetActiveDataHistoryJobs(t *testing.T) {
t.Parallel()
m := createDHM(t)
s := RPCServer{Engine: &Engine{dataHistoryManager: m}}
dhj := &DataHistoryJob{
Nickname: "TestGetActiveDataHistoryJobs",
Exchange: testExchange,
Asset: asset.Spot,
Pair: currency.NewPair(currency.BTC, currency.USD),
StartDate: time.Now().UTC().Add(-time.Minute * 2),
EndDate: time.Now().UTC(),
Interval: kline.OneMin,
}
err := m.UpsertJob(dhj, false)
if !errors.Is(err, nil) {
t.Fatalf("received %v, expected %v", err, nil)
}
r, err := s.GetActiveDataHistoryJobs(context.Background(), nil)
if !errors.Is(err, nil) {
t.Fatalf("received %v, expected %v", err, nil)
}
if len(r.Results) != 1 {
t.Fatalf("received %v, expected %v", len(r.Results), 1)
}
}
func TestGetDataHistoryJobsBetween(t *testing.T) {
t.Parallel()
m := createDHM(t)
s := RPCServer{Engine: &Engine{dataHistoryManager: m}}
dhj := &DataHistoryJob{
Nickname: "GetDataHistoryJobsBetween",
Exchange: testExchange,
Asset: asset.Spot,
Pair: currency.NewPair(currency.BTC, currency.USD),
StartDate: time.Now().UTC().Add(-time.Minute * 2),
EndDate: time.Now().UTC(),
Interval: kline.OneMin,
}
_, err := s.GetDataHistoryJobsBetween(context.Background(), nil)
if !errors.Is(err, errNilRequestData) {
t.Fatalf("received %v, expected %v", err, errNilRequestData)
}
_, err = s.GetDataHistoryJobsBetween(context.Background(), &gctrpc.GetDataHistoryJobsBetweenRequest{
StartDate: time.Now().UTC().Add(time.Minute).Format(common.SimpleTimeFormat),
EndDate: time.Now().UTC().Format(common.SimpleTimeFormat),
})
if !errors.Is(err, common.ErrStartAfterTimeNow) {
t.Fatalf("received %v, expected %v", err, common.ErrStartAfterTimeNow)
}
err = m.UpsertJob(dhj, false)
if !errors.Is(err, nil) {
t.Fatalf("received %v, expected %v", err, nil)
}
r, err := s.GetDataHistoryJobsBetween(context.Background(), &gctrpc.GetDataHistoryJobsBetweenRequest{
StartDate: time.Now().Add(-time.Minute).UTC().Format(common.SimpleTimeFormat),
EndDate: time.Now().Add(time.Minute).UTC().Format(common.SimpleTimeFormat),
})
if !errors.Is(err, nil) {
t.Errorf("received %v, expected %v", err, nil)
}
if len(r.Results) != 1 {
t.Errorf("received %v, expected %v", len(r.Results), 1)
}
}
func TestGetDataHistoryJobSummary(t *testing.T) {
t.Parallel()
m := createDHM(t)
s := RPCServer{Engine: &Engine{dataHistoryManager: m}}
dhj := &DataHistoryJob{
Nickname: "TestGetDataHistoryJobSummary",
Exchange: testExchange,
Asset: asset.Spot,
Pair: currency.NewPair(currency.BTC, currency.USD),
StartDate: time.Now().UTC().Add(-time.Minute * 2),
EndDate: time.Now().UTC(),
Interval: kline.OneMin,
}
err := m.UpsertJob(dhj, false)
if !errors.Is(err, nil) {
t.Errorf("received %v, expected %v", err, nil)
}
_, err = s.GetDataHistoryJobSummary(context.Background(), nil)
if !errors.Is(err, errNilRequestData) {
t.Errorf("received %v, expected %v", err, errNilRequestData)
}
_, err = s.GetDataHistoryJobSummary(context.Background(), &gctrpc.GetDataHistoryJobDetailsRequest{})
if !errors.Is(err, errNicknameUnset) {
t.Errorf("received %v, expected %v", err, errNicknameUnset)
}
resp, err := s.GetDataHistoryJobSummary(context.Background(), &gctrpc.GetDataHistoryJobDetailsRequest{Nickname: "TestGetDataHistoryJobSummary"})
if !errors.Is(err, nil) {
t.Errorf("received %v, expected %v", err, nil)
}
if resp == nil {
t.Fatal("expected job")
}
if !strings.EqualFold(resp.Nickname, "TestGetDataHistoryJobSummary") {
t.Errorf("received %v, expected %v", "TestGetDataHistoryJobSummary", resp.Nickname)
}
if resp.ResultSummaries == nil {
t.Errorf("received %v, expected %v", nil, "result summaries slice")
}
}

View File

@@ -5,6 +5,7 @@ import (
"github.com/thrasher-corp/gocryptotrader/communications/base"
"github.com/thrasher-corp/gocryptotrader/currency"
"github.com/thrasher-corp/gocryptotrader/database"
exchange "github.com/thrasher-corp/gocryptotrader/exchanges"
"github.com/thrasher-corp/gocryptotrader/exchanges/asset"
"github.com/thrasher-corp/gocryptotrader/exchanges/order"
@@ -31,9 +32,11 @@ var (
// ErrSubSystemNotStarted message to return when subsystem not started
ErrSubSystemNotStarted = errors.New("subsystem not started")
// ErrNilSubsystem is returned when a subsystem hasn't had its Setup() func run
ErrNilSubsystem = errors.New("subsystem not setup")
errNilWaitGroup = errors.New("nil wait group received")
errNilExchangeManager = errors.New("cannot start with nil exchange manager")
ErrNilSubsystem = errors.New("subsystem not setup")
errNilWaitGroup = errors.New("nil wait group received")
errNilExchangeManager = errors.New("cannot start with nil exchange manager")
errNilDatabaseConnectionManager = errors.New("cannot start with nil database connection manager")
errNilConfig = errors.New("received nil config")
)
// iExchangeManager limits exposure of accessible functions to exchange manager
@@ -83,3 +86,8 @@ type iCurrencyPairSyncer interface {
PrintOrderbookSummary(*orderbook.Base, string, error)
Update(string, currency.Pair, asset.Item, int, error) error
}
// iDatabaseConnectionManager defines a limited scoped databaseConnectionManager
type iDatabaseConnectionManager interface {
GetInstance() database.IDatabase
}

View File

@@ -1,4 +1,4 @@
# GoCryptoTrader package Subsystem_types
# GoCryptoTrader package Subsystem types
<img src="/common/gctlogo.png?raw=true" width="350px" height="350px" hspace="70">
@@ -18,7 +18,7 @@ You can track ideas, planned features and what's in progress on this Trello boar
Join our slack to discuss all things related to GoCryptoTrader! [GoCryptoTrader Slack](https://join.slack.com/t/gocryptotrader/shared_invite/enQtNTQ5NDAxMjA2Mjc5LTc5ZDE1ZTNiOGM3ZGMyMmY1NTAxYWZhODE0MWM5N2JlZDk1NDU0YTViYzk4NTk3OTRiMDQzNGQ1YTc4YmRlMTk)
## Current Features for Subsystem_types
## Current Features for Subsystem types
+ Subsystem contains subsystems that are used at run time by an `engine.Engine`, however they can be setup and run individually.
+ Subsystems are designed to be self contained
+ All subsystems have a public `Setup(...) (..., error)` function to return a valid subsystem ready for use

View File

@@ -32,7 +32,8 @@ var (
// DefaultSyncerWorkers limits the number of sync workers
DefaultSyncerWorkers = 15
// DefaultSyncerTimeoutREST the default time to switch from REST to websocket protocols without a response
DefaultSyncerTimeoutREST = time.Second * 15
DefaultSyncerTimeoutREST = time.Second * 15
// DefaultSyncerTimeoutWebsocket the default time to switch from websocket to REST protocols without a response
DefaultSyncerTimeoutWebsocket = time.Minute
errNoSyncItemsEnabled = errors.New("no sync items enabled")
errUnknownSyncItem = errors.New("unknown sync item")

View File

@@ -1,4 +1,4 @@
# GoCryptoTrader package Sync_manager
# GoCryptoTrader package Sync manager
<img src="/common/gctlogo.png?raw=true" width="350px" height="350px" hspace="70">
@@ -18,7 +18,7 @@ You can track ideas, planned features and what's in progress on this Trello boar
Join our slack to discuss all things related to GoCryptoTrader! [GoCryptoTrader Slack](https://join.slack.com/t/gocryptotrader/shared_invite/enQtNTQ5NDAxMjA2Mjc5LTc5ZDE1ZTNiOGM3ZGMyMmY1NTAxYWZhODE0MWM5N2JlZDk1NDU0YTViYzk4NTk3OTRiMDQzNGQ1YTc4YmRlMTk)
## Current Features for Sync_manager
## Current Features for Sync manager
+ The currency pair syncer subsystem is used to keep all trades, tickers and orderbooks up to date for all enabled exchange asset currency pairs
+ It can sync data via a websocket connection or REST and will switch between them if there has been no updates
+ In order to modify the behaviour of the currency pair syncer subsystem, you can change runtime parameters as detailed below:

View File

@@ -1,4 +1,4 @@
# GoCryptoTrader package Websocketroutine_manager
# GoCryptoTrader package Websocketroutine manager
<img src="/common/gctlogo.png?raw=true" width="350px" height="350px" hspace="70">
@@ -18,7 +18,7 @@ You can track ideas, planned features and what's in progress on this Trello boar
Join our slack to discuss all things related to GoCryptoTrader! [GoCryptoTrader Slack](https://join.slack.com/t/gocryptotrader/shared_invite/enQtNTQ5NDAxMjA2Mjc5LTc5ZDE1ZTNiOGM3ZGMyMmY1NTAxYWZhODE0MWM5N2JlZDk1NDU0YTViYzk4NTk3OTRiMDQzNGQ1YTc4YmRlMTk)
## Current Features for Websocketroutine_manager
## Current Features for Websocketroutine manager
+ The websocket routine manager subsystem is used process websocket data in a unified manner across enabled exchanges with websocket support
+ It can help process orders to the order manager subsystem when it receives new data
+ Logs output of ticker and orderbook updates

View File

@@ -1,4 +1,4 @@
# GoCryptoTrader package Withdraw_manager
# GoCryptoTrader package Withdraw manager
<img src="/common/gctlogo.png?raw=true" width="350px" height="350px" hspace="70">
@@ -18,7 +18,7 @@ You can track ideas, planned features and what's in progress on this Trello boar
Join our slack to discuss all things related to GoCryptoTrader! [GoCryptoTrader Slack](https://join.slack.com/t/gocryptotrader/shared_invite/enQtNTQ5NDAxMjA2Mjc5LTc5ZDE1ZTNiOGM3ZGMyMmY1NTAxYWZhODE0MWM5N2JlZDk1NDU0YTViYzk4NTk3OTRiMDQzNGQ1YTc4YmRlMTk)
## Current Features for Withdraw_manager
## Current Features for Withdraw manager
+ The withdraw manager subsystem is responsible for the processing of withdrawal requests and submitting them to exchanges
+ The withdraw manager can be interacted with via GRPC commands such as `WithdrawFiatRequest` and `WithdrawCryptoRequest`
+ Supports caching of responses to allow for quick viewing of withdrawal events via GRPC

View File

@@ -661,6 +661,7 @@ func (b *Binance) GetAccount() (*Account, error) {
return &resp.Account, nil
}
// GetMarginAccount returns account information for margin accounts
func (b *Binance) GetMarginAccount() (*MarginAccount, error) {
var resp MarginAccount
params := url.Values{}
@@ -688,6 +689,8 @@ func (b *Binance) SendHTTPRequest(ePath exchange.URL, path string, f request.End
Endpoint: f})
}
// SendAPIKeyHTTPRequest is a special API request where the api key is
// appended to the headers without a secret
func (b *Binance) SendAPIKeyHTTPRequest(ePath exchange.URL, path string, f request.EndpointLimit, result interface{}) error {
endpointPath, err := b.API.Endpoints.GetURL(ePath)
if err != nil {

View File

@@ -1522,7 +1522,11 @@ func (b *Binance) GetHistoricCandlesExtended(pair currency.Pair, a asset.Item, s
Asset: a,
Interval: interval,
}
dates := kline.CalculateCandleDateRanges(start, end, interval, b.Features.Enabled.Kline.ResultLimit)
dates, err := kline.CalculateCandleDateRanges(start, end, interval, b.Features.Enabled.Kline.ResultLimit)
if err != nil {
return kline.Item{}, err
}
var candles []CandleStick
for x := range dates.Ranges {
req := KlinesRequestParams{
Interval: b.FormatExchangeKlineInterval(interval),
@@ -1532,7 +1536,7 @@ func (b *Binance) GetHistoricCandlesExtended(pair currency.Pair, a asset.Item, s
Limit: int(b.Features.Enabled.Kline.ResultLimit),
}
candles, err := b.GetSpotKline(&req)
candles, err = b.GetSpotKline(&req)
if err != nil {
return kline.Item{}, err
}
@@ -1554,11 +1558,11 @@ func (b *Binance) GetHistoricCandlesExtended(pair currency.Pair, a asset.Item, s
}
}
err := dates.VerifyResultsHaveData(ret.Candles)
if err != nil {
log.Warnf(log.ExchangeSys, "%s - %s", b.Name, err)
dates.SetHasDataFromCandles(ret.Candles)
summary := dates.DataSummary(false)
if len(summary) > 0 {
log.Warnf(log.ExchangeSys, "%v - %v", b.Name, summary)
}
ret.RemoveDuplicates()
ret.RemoveOutsideRange(start, end)
ret.SortCandlesByTimestamp(false)

View File

@@ -532,8 +532,8 @@ func (b *Bitfinex) GetHistoricTrades(p currency.Pair, assetType asset.Item, time
if assetType == asset.MarginFunding {
return nil, fmt.Errorf("asset type '%v' not supported", assetType)
}
if timestampStart.Equal(timestampEnd) || timestampEnd.After(time.Now()) || timestampEnd.Before(timestampStart) {
return nil, fmt.Errorf("invalid time range supplied. Start: %v End %v", timestampStart, timestampEnd)
if err := common.StartEndTimeCheck(timestampStart, timestampEnd); err != nil {
return nil, fmt.Errorf("invalid time range supplied. Start: %v End %v %w", timestampStart, timestampEnd, err)
}
var err error
p, err = b.FormatExchangeCurrency(p, assetType)
@@ -1025,7 +1025,10 @@ func (b *Bitfinex) GetHistoricCandlesExtended(pair currency.Pair, a asset.Item,
Interval: interval,
}
dates := kline.CalculateCandleDateRanges(start, end, interval, b.Features.Enabled.Kline.ResultLimit)
dates, err := kline.CalculateCandleDateRanges(start, end, interval, b.Features.Enabled.Kline.ResultLimit)
if err != nil {
return kline.Item{}, err
}
cf, err := b.fixCasing(pair, a)
if err != nil {
return kline.Item{}, err
@@ -1051,9 +1054,10 @@ func (b *Bitfinex) GetHistoricCandlesExtended(pair currency.Pair, a asset.Item,
})
}
}
err = dates.VerifyResultsHaveData(ret.Candles)
if err != nil {
log.Warnf(log.ExchangeSys, "%s - %s", b.Name, err)
dates.SetHasDataFromCandles(ret.Candles)
summary := dates.DataSummary(false)
if len(summary) > 0 {
log.Warnf(log.ExchangeSys, "%v - %v", b.Name, summary)
}
ret.RemoveDuplicates()
ret.RemoveOutsideRange(start, end)

Some files were not shown because too many files have changed in this diff Show More