Files
gocryptotrader/database/models/postgres/candle.go
Andrew 39bbd8dfc9 (Feature) OHLCV data storage and retrieval (#522)
* WIP

* end of day WIP started migration of trade history

* added kline support to hitbtc huobi lbank

* added exchangehistory to all supported exchanges started work on coinbase 300 candles/request method

* end of day WIP

* removed unused ta and misc changes to flag ready for review

* yobit cleanup

* revert coinbase changES

* general code clean up and added zb support

* poloniex support added

* renamed method to FormatExchangeKlineInterval other misc fixes

* linter fixes

* linter fixes

* removed verbose

* fixed poloniex test coverage

* revert poloniex mock data

* regenerated poloniex mock data

* a very verbose clean up

* binance mock clean up

* removed unneeded t.Log()

* setting verbose to true to debug CI issue

* first pass changes addressed

* common.ErrNotYetImplemented implemented :D

* comments added

* WIP-addressed exchange requests and reverted previous GetExchangeHistory changes

* WIP-addressed exchange requests and reverted previous GetExchangeHistory changes

* increased test coverage added kraken support

* OKGroup support completed started work on address GetExchangeHistory feedback and migrating to own PR under https://github.com/xtda/gocryptotrader/tree/exchange_history

* convert zb ratelimits

* gofmt run on okcoin

* increased delay on rate limit

* gofmt package

* fixed panic with coinbene and bithumb if conversion fails

* very broken end of day WIP

* added support for GetHistoricCandlesEx to coinbase and binance

* gofmt package

* coinbase, btcmarkets, zb ex wrapper function added

* added all exchange support for ex regenerated mock data

* update bithumb to return wrapper method

* gofmt package

* end of day started work on changes

* models created for exchange/asset/currency/currencypair, new seed system created

* reworked test coverage added okgroup support general fixes/change requests addressed

* Added OneMonth

* limit checks on supportedexchanges

* reverted getexchangehistory

* started work on currency seeding and insertion

* reworked binance tesT

* added workaround for kraken panic

* :D extremely broken WIP

* renamed command to extended removed interval check on non-implemented commands

* added wrapperconfig back

* increased test coverage for FormatExchangeKlineInterval

* WIP

* increased test coverage for FormatExchangeKlineInterval bitfinex/gateio/huobi

* linter fixes

* zb kraken lbank coinbene btcmarkets support added

* removed verbose

* OK group support for other asset types added

* swapped margin to use spot endpoint

* index support added test coverage added for asset types

* added asset type to okcoin test

* gofmt

* add asset to extended method

* removed verbose

* Very broken WIP models need to be regenerated

* add support for coinbene swap increase test coverage

* removed verbose

* small clean up of okgroup wrapper functions

* verbose to troubleshoot CI issues

* removed verbose

* added error check reverted coinbasechanges

* attempting to fix broken model generation

* readme updated

* :D i broke so much

* model regeneration fixed & complete

* candle model filled out

* removed unused start/finish started work on decoupling api requests from kline package

* restructured coinbene, bithumb methods, added bitstamp support

* kraken time fix

* BTCMarkets restructure

* typo fix

* removed test for futures due to contact changing

* removed test for futures due to contact changing

* added start/end date to extended method over range

* WIP

* added sync option to candles

* converted to assettranslator

* removed verbose

* removed verbose

* removed invalid char

* reverted incorrectly removed return

* added import

* further template updates

* macos hates my keyboard :D

* misc canges

* started work on creating kline from databases eed

* x -> i

* removed verbose

* updated fixCasing to allocate var before checks

* sqlite3 supported work started

* removed time conversion

* further work on tets

* sort all outgoing kline candles

* fixCasing fix

* after/before checks added

* added parallel to test

* logic check on BTCmarkets

* removed unused param, used correct iterator

* converted HitBTC to use time.Time

* test update

* add iszero false check to candle times

* Seed exchange & OHCLV data for test usage

* updated resultlimit to 5000

* new line added

* added comment to exported const

* move date forward

* use configured ratelimit

* fixed pair for test

* panic fixed WIP on fixCasing

* fixCasing rework, started work on readme docs

* enable rate limiter for wrapper issues tool

* docs updated

* removed unused vars from tests

* removed err from return and formatted currency

* updated Yobit supported status

* Updated HitBTC to use onehour candles due to test exeuction times

* added further details to gctcli output

* added link to docs

* added link to tempalte

* disable FTX websocket in config_example

* fix poloneix

* regenerated poloniex mock data

* removed recording flag

* format on package

* moved exchange var outside of method scope

* reworked seeding into package

* verbose output improved

* removed verbose from candles

* Added comments to exported functions

* removed verbose output

* Reworking of tests

* end of day commit

* added SQLite migration for asset, test updates for exchange, added support for withdrawal for new exchange_name_id relation

* regenerated database models

* WIP

* test rework, sqlite migration updates for withdrawal

* Reworked error returns to stop duplications, format all output to UTC, changed gctscript OHLCV output to be unix timestmap, started work on seeding tool

* gofmt

* dbseed command for seeding exchanges added, removed seed from dbmigrate, LoadCSV method added for exchange

* go mod tidy

* added import candle from csv command to dbseed

* Removed reset & duplicate migrate command from helper, renamed struct to Item/Candle over Candle/Tick, added test coverage to dbseed, improved withdraw tests

* remove broken tests due to ORM generation of Fk, removed go unneeded goroutine for inserting records

* removed t.Cleanup usage because appveyor

* added test coverage to StoreInDatabase()

* removed unneeded data from config for test

* added new line

* Added down migration support to candle/asset removal, return original error and display rollback error

* removed unneeded err assignment, break out of loop on error

* add err check to method for test

* first pass changes

* WIP

* Updated migrations for both sqlite3 & postgres to create exchanges if any withdrawal_history records are found, removed migrate command

* removed argusage as usage information is provided by flags

* added inserted records return count and test coverage

* new line added

* Database: comment config details out to disable local postgresl testing by default

* added asset support

* Database: added error return when no exchanges are found, title exchange name across queries

* Fixed test data

* Database: removed migration bool from ConnectToDatabase(), removed empty line, insert asset on test

* Database: verbose linter :D

* Database/OHLCV: removed go module from dbseed command and reverted back to main module, converted interval to int64 to match other parts of code base, provided migrations to update database, poloniex fix to skip first candle

* dbseed: add completed message to output

* Database: added migration to add asset to uniq index for candle table

* Database: database -> exchange

* Database: add asset to upsert conflict

* Poloneix: fix for invalid interval

* regenerated poloniex mock data

* Database: added down migration for candle interval update

* OHCLV/Database: WIP

* OHLCV/Datastore: added new ValidateKline() method to check that asset, pair, interval are enabled/supported by exchange and updated tests

* revert configtest changes

* OHCLV/Kline: pointer assignment to ErrorKline & format pairs on check

* goimports

* migration updates

* Database/Candle: updated tests

* revert configtest changes

* ZB: updaed defaults to use uppercase pairs

* ZB: updaed defaults to use uppercase pairs

* revert pair formatting

* Switch over to .Cotains() method from pairManagement

* Added comment & ftx back 😆

* OHLCV/Datastore: (Candles): added not null to asset, (WithdrawalHistory): added not null to exchange_name_id), reworked KlineError return

* set verbose to false

* updated btc markets test

* Updated readme

* removed ability to have blank exchange name as indexing requires it

* remove old usedb and empty exchange check, return error on no data instead

* Updated kline test to match date range

* Renamed candles.exchange_id column to candles.xchange_name_id to match withdrawal table, return err on unwrap, zb fix revert

* regenerated sqlite3 models

* force UTC for sqlite.... because sqlite

* OHLCV/Datastore: upper case pairs on insert for consistency, remove unneeded rollback call on commit failure as it has no effect, move rollback outside of insertsqlite/insertpsql methods

* add error check for no candles

* nil length/cap is 0 in golang :D

* OHLCV/Datastore: updated wording on examples, removed duplicate testfile

* OHLCV/Datastore: updated README with links to dbseed

* dbtool -> dbseed
2020-09-03 17:13:53 +10:00

1096 lines
30 KiB
Go

// Code generated by SQLBoiler 3.5.0-gct (https://github.com/thrasher-corp/sqlboiler). DO NOT EDIT.
// This file is meant to be re-generated in place and/or deleted at any time.
package postgres
import (
"context"
"database/sql"
"fmt"
"reflect"
"strconv"
"strings"
"sync"
"time"
"github.com/pkg/errors"
"github.com/thrasher-corp/sqlboiler/boil"
"github.com/thrasher-corp/sqlboiler/queries"
"github.com/thrasher-corp/sqlboiler/queries/qm"
"github.com/thrasher-corp/sqlboiler/queries/qmhelper"
"github.com/thrasher-corp/sqlboiler/strmangle"
)
// Candle is an object representing the database table.
type Candle struct {
ID string `boil:"id" json:"id" toml:"id" yaml:"id"`
ExchangeNameID string `boil:"exchange_name_id" json:"exchange_name_id" toml:"exchange_name_id" yaml:"exchange_name_id"`
Base string `boil:"base" json:"base" toml:"base" yaml:"base"`
Quote string `boil:"quote" json:"quote" toml:"quote" yaml:"quote"`
Interval int64 `boil:"interval" json:"interval" toml:"interval" yaml:"interval"`
Timestamp time.Time `boil:"timestamp" json:"timestamp" toml:"timestamp" yaml:"timestamp"`
Open float64 `boil:"open" json:"open" toml:"open" yaml:"open"`
High float64 `boil:"high" json:"high" toml:"high" yaml:"high"`
Low float64 `boil:"low" json:"low" toml:"low" yaml:"low"`
Close float64 `boil:"close" json:"close" toml:"close" yaml:"close"`
Volume float64 `boil:"volume" json:"volume" toml:"volume" yaml:"volume"`
Asset string `boil:"asset" json:"asset" toml:"asset" yaml:"asset"`
R *candleR `boil:"-" json:"-" toml:"-" yaml:"-"`
L candleL `boil:"-" json:"-" toml:"-" yaml:"-"`
}
var CandleColumns = struct {
ID string
ExchangeNameID string
Base string
Quote string
Interval string
Timestamp string
Open string
High string
Low string
Close string
Volume string
Asset string
}{
ID: "id",
ExchangeNameID: "exchange_name_id",
Base: "base",
Quote: "quote",
Interval: "interval",
Timestamp: "timestamp",
Open: "open",
High: "high",
Low: "low",
Close: "close",
Volume: "volume",
Asset: "asset",
}
// Generated where
type whereHelperfloat64 struct{ field string }
func (w whereHelperfloat64) EQ(x float64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.EQ, x) }
func (w whereHelperfloat64) NEQ(x float64) qm.QueryMod {
return qmhelper.Where(w.field, qmhelper.NEQ, x)
}
func (w whereHelperfloat64) LT(x float64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LT, x) }
func (w whereHelperfloat64) LTE(x float64) qm.QueryMod {
return qmhelper.Where(w.field, qmhelper.LTE, x)
}
func (w whereHelperfloat64) GT(x float64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GT, x) }
func (w whereHelperfloat64) GTE(x float64) qm.QueryMod {
return qmhelper.Where(w.field, qmhelper.GTE, x)
}
var CandleWhere = struct {
ID whereHelperstring
ExchangeNameID whereHelperstring
Base whereHelperstring
Quote whereHelperstring
Interval whereHelperint64
Timestamp whereHelpertime_Time
Open whereHelperfloat64
High whereHelperfloat64
Low whereHelperfloat64
Close whereHelperfloat64
Volume whereHelperfloat64
Asset whereHelperstring
}{
ID: whereHelperstring{field: "\"candle\".\"id\""},
ExchangeNameID: whereHelperstring{field: "\"candle\".\"exchange_name_id\""},
Base: whereHelperstring{field: "\"candle\".\"base\""},
Quote: whereHelperstring{field: "\"candle\".\"quote\""},
Interval: whereHelperint64{field: "\"candle\".\"interval\""},
Timestamp: whereHelpertime_Time{field: "\"candle\".\"timestamp\""},
Open: whereHelperfloat64{field: "\"candle\".\"open\""},
High: whereHelperfloat64{field: "\"candle\".\"high\""},
Low: whereHelperfloat64{field: "\"candle\".\"low\""},
Close: whereHelperfloat64{field: "\"candle\".\"close\""},
Volume: whereHelperfloat64{field: "\"candle\".\"volume\""},
Asset: whereHelperstring{field: "\"candle\".\"asset\""},
}
// CandleRels is where relationship names are stored.
var CandleRels = struct {
ExchangeName string
}{
ExchangeName: "ExchangeName",
}
// candleR is where relationships are stored.
type candleR struct {
ExchangeName *Exchange
}
// NewStruct creates a new relationship struct
func (*candleR) NewStruct() *candleR {
return &candleR{}
}
// candleL is where Load methods for each relationship are stored.
type candleL struct{}
var (
candleAllColumns = []string{"id", "exchange_name_id", "base", "quote", "interval", "timestamp", "open", "high", "low", "close", "volume", "asset"}
candleColumnsWithoutDefault = []string{"exchange_name_id", "base", "quote", "interval", "timestamp", "open", "high", "low", "close", "volume", "asset"}
candleColumnsWithDefault = []string{"id"}
candlePrimaryKeyColumns = []string{"id"}
)
type (
// CandleSlice is an alias for a slice of pointers to Candle.
// This should generally be used opposed to []Candle.
CandleSlice []*Candle
// CandleHook is the signature for custom Candle hook methods
CandleHook func(context.Context, boil.ContextExecutor, *Candle) error
candleQuery struct {
*queries.Query
}
)
// Cache for insert, update and upsert
var (
candleType = reflect.TypeOf(&Candle{})
candleMapping = queries.MakeStructMapping(candleType)
candlePrimaryKeyMapping, _ = queries.BindMapping(candleType, candleMapping, candlePrimaryKeyColumns)
candleInsertCacheMut sync.RWMutex
candleInsertCache = make(map[string]insertCache)
candleUpdateCacheMut sync.RWMutex
candleUpdateCache = make(map[string]updateCache)
candleUpsertCacheMut sync.RWMutex
candleUpsertCache = make(map[string]insertCache)
)
var (
// Force time package dependency for automated UpdatedAt/CreatedAt.
_ = time.Second
// Force qmhelper dependency for where clause generation (which doesn't
// always happen)
_ = qmhelper.Where
)
var candleBeforeInsertHooks []CandleHook
var candleBeforeUpdateHooks []CandleHook
var candleBeforeDeleteHooks []CandleHook
var candleBeforeUpsertHooks []CandleHook
var candleAfterInsertHooks []CandleHook
var candleAfterSelectHooks []CandleHook
var candleAfterUpdateHooks []CandleHook
var candleAfterDeleteHooks []CandleHook
var candleAfterUpsertHooks []CandleHook
// doBeforeInsertHooks executes all "before insert" hooks.
func (o *Candle) doBeforeInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range candleBeforeInsertHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doBeforeUpdateHooks executes all "before Update" hooks.
func (o *Candle) doBeforeUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range candleBeforeUpdateHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doBeforeDeleteHooks executes all "before Delete" hooks.
func (o *Candle) doBeforeDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range candleBeforeDeleteHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doBeforeUpsertHooks executes all "before Upsert" hooks.
func (o *Candle) doBeforeUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range candleBeforeUpsertHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doAfterInsertHooks executes all "after Insert" hooks.
func (o *Candle) doAfterInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range candleAfterInsertHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doAfterSelectHooks executes all "after Select" hooks.
func (o *Candle) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range candleAfterSelectHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doAfterUpdateHooks executes all "after Update" hooks.
func (o *Candle) doAfterUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range candleAfterUpdateHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doAfterDeleteHooks executes all "after Delete" hooks.
func (o *Candle) doAfterDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range candleAfterDeleteHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// doAfterUpsertHooks executes all "after Upsert" hooks.
func (o *Candle) doAfterUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
if boil.HooksAreSkipped(ctx) {
return nil
}
for _, hook := range candleAfterUpsertHooks {
if err := hook(ctx, exec, o); err != nil {
return err
}
}
return nil
}
// AddCandleHook registers your hook function for all future operations.
func AddCandleHook(hookPoint boil.HookPoint, candleHook CandleHook) {
switch hookPoint {
case boil.BeforeInsertHook:
candleBeforeInsertHooks = append(candleBeforeInsertHooks, candleHook)
case boil.BeforeUpdateHook:
candleBeforeUpdateHooks = append(candleBeforeUpdateHooks, candleHook)
case boil.BeforeDeleteHook:
candleBeforeDeleteHooks = append(candleBeforeDeleteHooks, candleHook)
case boil.BeforeUpsertHook:
candleBeforeUpsertHooks = append(candleBeforeUpsertHooks, candleHook)
case boil.AfterInsertHook:
candleAfterInsertHooks = append(candleAfterInsertHooks, candleHook)
case boil.AfterSelectHook:
candleAfterSelectHooks = append(candleAfterSelectHooks, candleHook)
case boil.AfterUpdateHook:
candleAfterUpdateHooks = append(candleAfterUpdateHooks, candleHook)
case boil.AfterDeleteHook:
candleAfterDeleteHooks = append(candleAfterDeleteHooks, candleHook)
case boil.AfterUpsertHook:
candleAfterUpsertHooks = append(candleAfterUpsertHooks, candleHook)
}
}
// One returns a single candle record from the query.
func (q candleQuery) One(ctx context.Context, exec boil.ContextExecutor) (*Candle, error) {
o := &Candle{}
queries.SetLimit(q.Query, 1)
err := q.Bind(ctx, exec, o)
if err != nil {
if errors.Cause(err) == sql.ErrNoRows {
return nil, sql.ErrNoRows
}
return nil, errors.Wrap(err, "postgres: failed to execute a one query for candle")
}
if err := o.doAfterSelectHooks(ctx, exec); err != nil {
return o, err
}
return o, nil
}
// All returns all Candle records from the query.
func (q candleQuery) All(ctx context.Context, exec boil.ContextExecutor) (CandleSlice, error) {
var o []*Candle
err := q.Bind(ctx, exec, &o)
if err != nil {
return nil, errors.Wrap(err, "postgres: failed to assign all query results to Candle slice")
}
if len(candleAfterSelectHooks) != 0 {
for _, obj := range o {
if err := obj.doAfterSelectHooks(ctx, exec); err != nil {
return o, err
}
}
}
return o, nil
}
// Count returns the count of all Candle records in the query.
func (q candleQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
var count int64
queries.SetSelect(q.Query, nil)
queries.SetCount(q.Query)
err := q.Query.QueryRowContext(ctx, exec).Scan(&count)
if err != nil {
return 0, errors.Wrap(err, "postgres: failed to count candle rows")
}
return count, nil
}
// Exists checks if the row exists in the table.
func (q candleQuery) Exists(ctx context.Context, exec boil.ContextExecutor) (bool, error) {
var count int64
queries.SetSelect(q.Query, nil)
queries.SetCount(q.Query)
queries.SetLimit(q.Query, 1)
err := q.Query.QueryRowContext(ctx, exec).Scan(&count)
if err != nil {
return false, errors.Wrap(err, "postgres: failed to check if candle exists")
}
return count > 0, nil
}
// ExchangeName pointed to by the foreign key.
func (o *Candle) ExchangeName(mods ...qm.QueryMod) exchangeQuery {
queryMods := []qm.QueryMod{
qm.Where("\"id\" = ?", o.ExchangeNameID),
}
queryMods = append(queryMods, mods...)
query := Exchanges(queryMods...)
queries.SetFrom(query.Query, "\"exchange\"")
return query
}
// LoadExchangeName allows an eager lookup of values, cached into the
// loaded structs of the objects. This is for an N-1 relationship.
func (candleL) LoadExchangeName(ctx context.Context, e boil.ContextExecutor, singular bool, maybeCandle interface{}, mods queries.Applicator) error {
var slice []*Candle
var object *Candle
if singular {
object = maybeCandle.(*Candle)
} else {
slice = *maybeCandle.(*[]*Candle)
}
args := make([]interface{}, 0, 1)
if singular {
if object.R == nil {
object.R = &candleR{}
}
args = append(args, object.ExchangeNameID)
} else {
Outer:
for _, obj := range slice {
if obj.R == nil {
obj.R = &candleR{}
}
for _, a := range args {
if a == obj.ExchangeNameID {
continue Outer
}
}
args = append(args, obj.ExchangeNameID)
}
}
if len(args) == 0 {
return nil
}
query := NewQuery(qm.From(`exchange`), qm.WhereIn(`exchange.id in ?`, args...))
if mods != nil {
mods.Apply(query)
}
results, err := query.QueryContext(ctx, e)
if err != nil {
return errors.Wrap(err, "failed to eager load Exchange")
}
var resultSlice []*Exchange
if err = queries.Bind(results, &resultSlice); err != nil {
return errors.Wrap(err, "failed to bind eager loaded slice Exchange")
}
if err = results.Close(); err != nil {
return errors.Wrap(err, "failed to close results of eager load for exchange")
}
if err = results.Err(); err != nil {
return errors.Wrap(err, "error occurred during iteration of eager loaded relations for exchange")
}
if len(candleAfterSelectHooks) != 0 {
for _, obj := range resultSlice {
if err := obj.doAfterSelectHooks(ctx, e); err != nil {
return err
}
}
}
if len(resultSlice) == 0 {
return nil
}
if singular {
foreign := resultSlice[0]
object.R.ExchangeName = foreign
if foreign.R == nil {
foreign.R = &exchangeR{}
}
foreign.R.ExchangeNameCandles = append(foreign.R.ExchangeNameCandles, object)
return nil
}
for _, local := range slice {
for _, foreign := range resultSlice {
if local.ExchangeNameID == foreign.ID {
local.R.ExchangeName = foreign
if foreign.R == nil {
foreign.R = &exchangeR{}
}
foreign.R.ExchangeNameCandles = append(foreign.R.ExchangeNameCandles, local)
break
}
}
}
return nil
}
// SetExchangeName of the candle to the related item.
// Sets o.R.ExchangeName to related.
// Adds o to related.R.ExchangeNameCandles.
func (o *Candle) SetExchangeName(ctx context.Context, exec boil.ContextExecutor, insert bool, related *Exchange) error {
var err error
if insert {
if err = related.Insert(ctx, exec, boil.Infer()); err != nil {
return errors.Wrap(err, "failed to insert into foreign table")
}
}
updateQuery := fmt.Sprintf(
"UPDATE \"candle\" SET %s WHERE %s",
strmangle.SetParamNames("\"", "\"", 1, []string{"exchange_name_id"}),
strmangle.WhereClause("\"", "\"", 2, candlePrimaryKeyColumns),
)
values := []interface{}{related.ID, o.ID}
if boil.DebugMode {
fmt.Fprintln(boil.DebugWriter, updateQuery)
fmt.Fprintln(boil.DebugWriter, values)
}
if _, err = exec.ExecContext(ctx, updateQuery, values...); err != nil {
return errors.Wrap(err, "failed to update local table")
}
o.ExchangeNameID = related.ID
if o.R == nil {
o.R = &candleR{
ExchangeName: related,
}
} else {
o.R.ExchangeName = related
}
if related.R == nil {
related.R = &exchangeR{
ExchangeNameCandles: CandleSlice{o},
}
} else {
related.R.ExchangeNameCandles = append(related.R.ExchangeNameCandles, o)
}
return nil
}
// Candles retrieves all the records using an executor.
func Candles(mods ...qm.QueryMod) candleQuery {
mods = append(mods, qm.From("\"candle\""))
return candleQuery{NewQuery(mods...)}
}
// FindCandle retrieves a single record by ID with an executor.
// If selectCols is empty Find will return all columns.
func FindCandle(ctx context.Context, exec boil.ContextExecutor, iD string, selectCols ...string) (*Candle, error) {
candleObj := &Candle{}
sel := "*"
if len(selectCols) > 0 {
sel = strings.Join(strmangle.IdentQuoteSlice(dialect.LQ, dialect.RQ, selectCols), ",")
}
query := fmt.Sprintf(
"select %s from \"candle\" where \"id\"=$1", sel,
)
q := queries.Raw(query, iD)
err := q.Bind(ctx, exec, candleObj)
if err != nil {
if errors.Cause(err) == sql.ErrNoRows {
return nil, sql.ErrNoRows
}
return nil, errors.Wrap(err, "postgres: unable to select from candle")
}
return candleObj, nil
}
// Insert a single record using an executor.
// See boil.Columns.InsertColumnSet documentation to understand column list inference for inserts.
func (o *Candle) Insert(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) error {
if o == nil {
return errors.New("postgres: no candle provided for insertion")
}
var err error
if err := o.doBeforeInsertHooks(ctx, exec); err != nil {
return err
}
nzDefaults := queries.NonZeroDefaultSet(candleColumnsWithDefault, o)
key := makeCacheKey(columns, nzDefaults)
candleInsertCacheMut.RLock()
cache, cached := candleInsertCache[key]
candleInsertCacheMut.RUnlock()
if !cached {
wl, returnColumns := columns.InsertColumnSet(
candleAllColumns,
candleColumnsWithDefault,
candleColumnsWithoutDefault,
nzDefaults,
)
cache.valueMapping, err = queries.BindMapping(candleType, candleMapping, wl)
if err != nil {
return err
}
cache.retMapping, err = queries.BindMapping(candleType, candleMapping, returnColumns)
if err != nil {
return err
}
if len(wl) != 0 {
cache.query = fmt.Sprintf("INSERT INTO \"candle\" (\"%s\") %%sVALUES (%s)%%s", strings.Join(wl, "\",\""), strmangle.Placeholders(dialect.UseIndexPlaceholders, len(wl), 1, 1))
} else {
cache.query = "INSERT INTO \"candle\" %sDEFAULT VALUES%s"
}
var queryOutput, queryReturning string
if len(cache.retMapping) != 0 {
queryReturning = fmt.Sprintf(" RETURNING \"%s\"", strings.Join(returnColumns, "\",\""))
}
cache.query = fmt.Sprintf(cache.query, queryOutput, queryReturning)
}
value := reflect.Indirect(reflect.ValueOf(o))
vals := queries.ValuesFromMapping(value, cache.valueMapping)
if boil.DebugMode {
fmt.Fprintln(boil.DebugWriter, cache.query)
fmt.Fprintln(boil.DebugWriter, vals)
}
if len(cache.retMapping) != 0 {
err = exec.QueryRowContext(ctx, cache.query, vals...).Scan(queries.PtrsFromMapping(value, cache.retMapping)...)
} else {
_, err = exec.ExecContext(ctx, cache.query, vals...)
}
if err != nil {
return errors.Wrap(err, "postgres: unable to insert into candle")
}
if !cached {
candleInsertCacheMut.Lock()
candleInsertCache[key] = cache
candleInsertCacheMut.Unlock()
}
return o.doAfterInsertHooks(ctx, exec)
}
// Update uses an executor to update the Candle.
// See boil.Columns.UpdateColumnSet documentation to understand column list inference for updates.
// Update does not automatically update the record in case of default values. Use .Reload() to refresh the records.
func (o *Candle) Update(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) (int64, error) {
var err error
if err = o.doBeforeUpdateHooks(ctx, exec); err != nil {
return 0, err
}
key := makeCacheKey(columns, nil)
candleUpdateCacheMut.RLock()
cache, cached := candleUpdateCache[key]
candleUpdateCacheMut.RUnlock()
if !cached {
wl := columns.UpdateColumnSet(
candleAllColumns,
candlePrimaryKeyColumns,
)
if len(wl) == 0 {
return 0, errors.New("postgres: unable to update candle, could not build whitelist")
}
cache.query = fmt.Sprintf("UPDATE \"candle\" SET %s WHERE %s",
strmangle.SetParamNames("\"", "\"", 1, wl),
strmangle.WhereClause("\"", "\"", len(wl)+1, candlePrimaryKeyColumns),
)
cache.valueMapping, err = queries.BindMapping(candleType, candleMapping, append(wl, candlePrimaryKeyColumns...))
if err != nil {
return 0, err
}
}
values := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping)
if boil.DebugMode {
fmt.Fprintln(boil.DebugWriter, cache.query)
fmt.Fprintln(boil.DebugWriter, values)
}
var result sql.Result
result, err = exec.ExecContext(ctx, cache.query, values...)
if err != nil {
return 0, errors.Wrap(err, "postgres: unable to update candle row")
}
rowsAff, err := result.RowsAffected()
if err != nil {
return 0, errors.Wrap(err, "postgres: failed to get rows affected by update for candle")
}
if !cached {
candleUpdateCacheMut.Lock()
candleUpdateCache[key] = cache
candleUpdateCacheMut.Unlock()
}
return rowsAff, o.doAfterUpdateHooks(ctx, exec)
}
// UpdateAll updates all rows with the specified column values.
func (q candleQuery) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) {
queries.SetUpdate(q.Query, cols)
result, err := q.Query.ExecContext(ctx, exec)
if err != nil {
return 0, errors.Wrap(err, "postgres: unable to update all for candle")
}
rowsAff, err := result.RowsAffected()
if err != nil {
return 0, errors.Wrap(err, "postgres: unable to retrieve rows affected for candle")
}
return rowsAff, nil
}
// UpdateAll updates all rows with the specified column values, using an executor.
func (o CandleSlice) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) {
ln := int64(len(o))
if ln == 0 {
return 0, nil
}
if len(cols) == 0 {
return 0, errors.New("postgres: update all requires at least one column argument")
}
colNames := make([]string, len(cols))
args := make([]interface{}, len(cols))
i := 0
for name, value := range cols {
colNames[i] = name
args[i] = value
i++
}
// Append all of the primary key values for each column
for _, obj := range o {
pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), candlePrimaryKeyMapping)
args = append(args, pkeyArgs...)
}
sql := fmt.Sprintf("UPDATE \"candle\" SET %s WHERE %s",
strmangle.SetParamNames("\"", "\"", 1, colNames),
strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), len(colNames)+1, candlePrimaryKeyColumns, len(o)))
if boil.DebugMode {
fmt.Fprintln(boil.DebugWriter, sql)
fmt.Fprintln(boil.DebugWriter, args...)
}
result, err := exec.ExecContext(ctx, sql, args...)
if err != nil {
return 0, errors.Wrap(err, "postgres: unable to update all in candle slice")
}
rowsAff, err := result.RowsAffected()
if err != nil {
return 0, errors.Wrap(err, "postgres: unable to retrieve rows affected all in update all candle")
}
return rowsAff, nil
}
// Upsert attempts an insert using an executor, and does an update or ignore on conflict.
// See boil.Columns documentation for how to properly use updateColumns and insertColumns.
func (o *Candle) Upsert(ctx context.Context, exec boil.ContextExecutor, updateOnConflict bool, conflictColumns []string, updateColumns, insertColumns boil.Columns) error {
if o == nil {
return errors.New("postgres: no candle provided for upsert")
}
if err := o.doBeforeUpsertHooks(ctx, exec); err != nil {
return err
}
nzDefaults := queries.NonZeroDefaultSet(candleColumnsWithDefault, o)
// Build cache key in-line uglily - mysql vs psql problems
buf := strmangle.GetBuffer()
if updateOnConflict {
buf.WriteByte('t')
} else {
buf.WriteByte('f')
}
buf.WriteByte('.')
for _, c := range conflictColumns {
buf.WriteString(c)
}
buf.WriteByte('.')
buf.WriteString(strconv.Itoa(updateColumns.Kind))
for _, c := range updateColumns.Cols {
buf.WriteString(c)
}
buf.WriteByte('.')
buf.WriteString(strconv.Itoa(insertColumns.Kind))
for _, c := range insertColumns.Cols {
buf.WriteString(c)
}
buf.WriteByte('.')
for _, c := range nzDefaults {
buf.WriteString(c)
}
key := buf.String()
strmangle.PutBuffer(buf)
candleUpsertCacheMut.RLock()
cache, cached := candleUpsertCache[key]
candleUpsertCacheMut.RUnlock()
var err error
if !cached {
insert, ret := insertColumns.InsertColumnSet(
candleAllColumns,
candleColumnsWithDefault,
candleColumnsWithoutDefault,
nzDefaults,
)
update := updateColumns.UpdateColumnSet(
candleAllColumns,
candlePrimaryKeyColumns,
)
if updateOnConflict && len(update) == 0 {
return errors.New("postgres: unable to upsert candle, could not build update column list")
}
conflict := conflictColumns
if len(conflict) == 0 {
conflict = make([]string, len(candlePrimaryKeyColumns))
copy(conflict, candlePrimaryKeyColumns)
}
cache.query = buildUpsertQueryPostgres(dialect, "\"candle\"", updateOnConflict, ret, update, conflict, insert)
cache.valueMapping, err = queries.BindMapping(candleType, candleMapping, insert)
if err != nil {
return err
}
if len(ret) != 0 {
cache.retMapping, err = queries.BindMapping(candleType, candleMapping, ret)
if err != nil {
return err
}
}
}
value := reflect.Indirect(reflect.ValueOf(o))
vals := queries.ValuesFromMapping(value, cache.valueMapping)
var returns []interface{}
if len(cache.retMapping) != 0 {
returns = queries.PtrsFromMapping(value, cache.retMapping)
}
if boil.DebugMode {
fmt.Fprintln(boil.DebugWriter, cache.query)
fmt.Fprintln(boil.DebugWriter, vals)
}
if len(cache.retMapping) != 0 {
err = exec.QueryRowContext(ctx, cache.query, vals...).Scan(returns...)
if err == sql.ErrNoRows {
err = nil // Postgres doesn't return anything when there's no update
}
} else {
_, err = exec.ExecContext(ctx, cache.query, vals...)
}
if err != nil {
return errors.Wrap(err, "postgres: unable to upsert candle")
}
if !cached {
candleUpsertCacheMut.Lock()
candleUpsertCache[key] = cache
candleUpsertCacheMut.Unlock()
}
return o.doAfterUpsertHooks(ctx, exec)
}
// Delete deletes a single Candle record with an executor.
// Delete will match against the primary key column to find the record to delete.
func (o *Candle) Delete(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
if o == nil {
return 0, errors.New("postgres: no Candle provided for delete")
}
if err := o.doBeforeDeleteHooks(ctx, exec); err != nil {
return 0, err
}
args := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), candlePrimaryKeyMapping)
sql := "DELETE FROM \"candle\" WHERE \"id\"=$1"
if boil.DebugMode {
fmt.Fprintln(boil.DebugWriter, sql)
fmt.Fprintln(boil.DebugWriter, args...)
}
result, err := exec.ExecContext(ctx, sql, args...)
if err != nil {
return 0, errors.Wrap(err, "postgres: unable to delete from candle")
}
rowsAff, err := result.RowsAffected()
if err != nil {
return 0, errors.Wrap(err, "postgres: failed to get rows affected by delete for candle")
}
if err := o.doAfterDeleteHooks(ctx, exec); err != nil {
return 0, err
}
return rowsAff, nil
}
// DeleteAll deletes all matching rows.
func (q candleQuery) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
if q.Query == nil {
return 0, errors.New("postgres: no candleQuery provided for delete all")
}
queries.SetDelete(q.Query)
result, err := q.Query.ExecContext(ctx, exec)
if err != nil {
return 0, errors.Wrap(err, "postgres: unable to delete all from candle")
}
rowsAff, err := result.RowsAffected()
if err != nil {
return 0, errors.Wrap(err, "postgres: failed to get rows affected by deleteall for candle")
}
return rowsAff, nil
}
// DeleteAll deletes all rows in the slice, using an executor.
func (o CandleSlice) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
if len(o) == 0 {
return 0, nil
}
if len(candleBeforeDeleteHooks) != 0 {
for _, obj := range o {
if err := obj.doBeforeDeleteHooks(ctx, exec); err != nil {
return 0, err
}
}
}
var args []interface{}
for _, obj := range o {
pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), candlePrimaryKeyMapping)
args = append(args, pkeyArgs...)
}
sql := "DELETE FROM \"candle\" WHERE " +
strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 1, candlePrimaryKeyColumns, len(o))
if boil.DebugMode {
fmt.Fprintln(boil.DebugWriter, sql)
fmt.Fprintln(boil.DebugWriter, args)
}
result, err := exec.ExecContext(ctx, sql, args...)
if err != nil {
return 0, errors.Wrap(err, "postgres: unable to delete all from candle slice")
}
rowsAff, err := result.RowsAffected()
if err != nil {
return 0, errors.Wrap(err, "postgres: failed to get rows affected by deleteall for candle")
}
if len(candleAfterDeleteHooks) != 0 {
for _, obj := range o {
if err := obj.doAfterDeleteHooks(ctx, exec); err != nil {
return 0, err
}
}
}
return rowsAff, nil
}
// Reload refetches the object from the database
// using the primary keys with an executor.
func (o *Candle) Reload(ctx context.Context, exec boil.ContextExecutor) error {
ret, err := FindCandle(ctx, exec, o.ID)
if err != nil {
return err
}
*o = *ret
return nil
}
// ReloadAll refetches every row with matching primary key column values
// and overwrites the original object slice with the newly updated slice.
func (o *CandleSlice) ReloadAll(ctx context.Context, exec boil.ContextExecutor) error {
if o == nil || len(*o) == 0 {
return nil
}
slice := CandleSlice{}
var args []interface{}
for _, obj := range *o {
pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), candlePrimaryKeyMapping)
args = append(args, pkeyArgs...)
}
sql := "SELECT \"candle\".* FROM \"candle\" WHERE " +
strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 1, candlePrimaryKeyColumns, len(*o))
q := queries.Raw(sql, args...)
err := q.Bind(ctx, exec, &slice)
if err != nil {
return errors.Wrap(err, "postgres: unable to reload all in CandleSlice")
}
*o = slice
return nil
}
// CandleExists checks if the Candle row exists.
func CandleExists(ctx context.Context, exec boil.ContextExecutor, iD string) (bool, error) {
var exists bool
sql := "select exists(select 1 from \"candle\" where \"id\"=$1 limit 1)"
if boil.DebugMode {
fmt.Fprintln(boil.DebugWriter, sql)
fmt.Fprintln(boil.DebugWriter, iD)
}
row := exec.QueryRowContext(ctx, sql, iD)
err := row.Scan(&exists)
if err != nil {
return false, errors.Wrap(err, "postgres: unable to check if candle exists")
}
return exists, nil
}