mirror of
https://github.com/grafana/grafana.git
synced 2025-08-01 02:21:50 +08:00
@ -1,284 +0,0 @@
|
||||
// Copyright 2015 The Xorm Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package xorm
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"xorm.io/core"
|
||||
)
|
||||
|
||||
// LRUCacher implments cache object facilities
|
||||
type LRUCacher struct {
|
||||
idList *list.List
|
||||
sqlList *list.List
|
||||
idIndex map[string]map[string]*list.Element
|
||||
sqlIndex map[string]map[string]*list.Element
|
||||
store core.CacheStore
|
||||
mutex sync.Mutex
|
||||
MaxElementSize int
|
||||
Expired time.Duration
|
||||
GcInterval time.Duration
|
||||
}
|
||||
|
||||
// NewLRUCacher creates a cacher
|
||||
func NewLRUCacher(store core.CacheStore, maxElementSize int) *LRUCacher {
|
||||
return NewLRUCacher2(store, 3600*time.Second, maxElementSize)
|
||||
}
|
||||
|
||||
// NewLRUCacher2 creates a cache include different params
|
||||
func NewLRUCacher2(store core.CacheStore, expired time.Duration, maxElementSize int) *LRUCacher {
|
||||
cacher := &LRUCacher{store: store, idList: list.New(),
|
||||
sqlList: list.New(), Expired: expired,
|
||||
GcInterval: core.CacheGcInterval, MaxElementSize: maxElementSize,
|
||||
sqlIndex: make(map[string]map[string]*list.Element),
|
||||
idIndex: make(map[string]map[string]*list.Element),
|
||||
}
|
||||
cacher.RunGC()
|
||||
return cacher
|
||||
}
|
||||
|
||||
// RunGC run once every m.GcInterval
|
||||
func (m *LRUCacher) RunGC() {
|
||||
time.AfterFunc(m.GcInterval, func() {
|
||||
m.RunGC()
|
||||
m.GC()
|
||||
})
|
||||
}
|
||||
|
||||
// GC check ids lit and sql list to remove all element expired
|
||||
func (m *LRUCacher) GC() {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
var removedNum int
|
||||
for e := m.idList.Front(); e != nil; {
|
||||
if removedNum <= core.CacheGcMaxRemoved &&
|
||||
time.Now().Sub(e.Value.(*idNode).lastVisit) > m.Expired {
|
||||
removedNum++
|
||||
next := e.Next()
|
||||
node := e.Value.(*idNode)
|
||||
m.delBean(node.tbName, node.id)
|
||||
e = next
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
removedNum = 0
|
||||
for e := m.sqlList.Front(); e != nil; {
|
||||
if removedNum <= core.CacheGcMaxRemoved &&
|
||||
time.Now().Sub(e.Value.(*sqlNode).lastVisit) > m.Expired {
|
||||
removedNum++
|
||||
next := e.Next()
|
||||
node := e.Value.(*sqlNode)
|
||||
m.delIds(node.tbName, node.sql)
|
||||
e = next
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetIds returns all bean's ids according to sql and parameter from cache
|
||||
func (m *LRUCacher) GetIds(tableName, sql string) interface{} {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
if _, ok := m.sqlIndex[tableName]; !ok {
|
||||
m.sqlIndex[tableName] = make(map[string]*list.Element)
|
||||
}
|
||||
if v, err := m.store.Get(sql); err == nil {
|
||||
if el, ok := m.sqlIndex[tableName][sql]; !ok {
|
||||
el = m.sqlList.PushBack(newSQLNode(tableName, sql))
|
||||
m.sqlIndex[tableName][sql] = el
|
||||
} else {
|
||||
lastTime := el.Value.(*sqlNode).lastVisit
|
||||
// if expired, remove the node and return nil
|
||||
if time.Now().Sub(lastTime) > m.Expired {
|
||||
m.delIds(tableName, sql)
|
||||
return nil
|
||||
}
|
||||
m.sqlList.MoveToBack(el)
|
||||
el.Value.(*sqlNode).lastVisit = time.Now()
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
m.delIds(tableName, sql)
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetBean returns bean according tableName and id from cache
|
||||
func (m *LRUCacher) GetBean(tableName string, id string) interface{} {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
if _, ok := m.idIndex[tableName]; !ok {
|
||||
m.idIndex[tableName] = make(map[string]*list.Element)
|
||||
}
|
||||
tid := genID(tableName, id)
|
||||
if v, err := m.store.Get(tid); err == nil {
|
||||
if el, ok := m.idIndex[tableName][id]; ok {
|
||||
lastTime := el.Value.(*idNode).lastVisit
|
||||
// if expired, remove the node and return nil
|
||||
if time.Now().Sub(lastTime) > m.Expired {
|
||||
m.delBean(tableName, id)
|
||||
return nil
|
||||
}
|
||||
m.idList.MoveToBack(el)
|
||||
el.Value.(*idNode).lastVisit = time.Now()
|
||||
} else {
|
||||
el = m.idList.PushBack(newIDNode(tableName, id))
|
||||
m.idIndex[tableName][id] = el
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// store bean is not exist, then remove memory's index
|
||||
m.delBean(tableName, id)
|
||||
return nil
|
||||
}
|
||||
|
||||
// clearIds clears all sql-ids mapping on table tableName from cache
|
||||
func (m *LRUCacher) clearIds(tableName string) {
|
||||
if tis, ok := m.sqlIndex[tableName]; ok {
|
||||
for sql, v := range tis {
|
||||
m.sqlList.Remove(v)
|
||||
m.store.Del(sql)
|
||||
}
|
||||
}
|
||||
m.sqlIndex[tableName] = make(map[string]*list.Element)
|
||||
}
|
||||
|
||||
// ClearIds clears all sql-ids mapping on table tableName from cache
|
||||
func (m *LRUCacher) ClearIds(tableName string) {
|
||||
m.mutex.Lock()
|
||||
m.clearIds(tableName)
|
||||
m.mutex.Unlock()
|
||||
}
|
||||
|
||||
func (m *LRUCacher) clearBeans(tableName string) {
|
||||
if tis, ok := m.idIndex[tableName]; ok {
|
||||
for id, v := range tis {
|
||||
m.idList.Remove(v)
|
||||
tid := genID(tableName, id)
|
||||
m.store.Del(tid)
|
||||
}
|
||||
}
|
||||
m.idIndex[tableName] = make(map[string]*list.Element)
|
||||
}
|
||||
|
||||
// ClearBeans clears all beans in some table
|
||||
func (m *LRUCacher) ClearBeans(tableName string) {
|
||||
m.mutex.Lock()
|
||||
m.clearBeans(tableName)
|
||||
m.mutex.Unlock()
|
||||
}
|
||||
|
||||
// PutIds pus ids into table
|
||||
func (m *LRUCacher) PutIds(tableName, sql string, ids interface{}) {
|
||||
m.mutex.Lock()
|
||||
if _, ok := m.sqlIndex[tableName]; !ok {
|
||||
m.sqlIndex[tableName] = make(map[string]*list.Element)
|
||||
}
|
||||
if el, ok := m.sqlIndex[tableName][sql]; !ok {
|
||||
el = m.sqlList.PushBack(newSQLNode(tableName, sql))
|
||||
m.sqlIndex[tableName][sql] = el
|
||||
} else {
|
||||
el.Value.(*sqlNode).lastVisit = time.Now()
|
||||
}
|
||||
m.store.Put(sql, ids)
|
||||
if m.sqlList.Len() > m.MaxElementSize {
|
||||
e := m.sqlList.Front()
|
||||
node := e.Value.(*sqlNode)
|
||||
m.delIds(node.tbName, node.sql)
|
||||
}
|
||||
m.mutex.Unlock()
|
||||
}
|
||||
|
||||
// PutBean puts beans into table
|
||||
func (m *LRUCacher) PutBean(tableName string, id string, obj interface{}) {
|
||||
m.mutex.Lock()
|
||||
var el *list.Element
|
||||
var ok bool
|
||||
|
||||
if el, ok = m.idIndex[tableName][id]; !ok {
|
||||
el = m.idList.PushBack(newIDNode(tableName, id))
|
||||
m.idIndex[tableName][id] = el
|
||||
} else {
|
||||
el.Value.(*idNode).lastVisit = time.Now()
|
||||
}
|
||||
|
||||
m.store.Put(genID(tableName, id), obj)
|
||||
if m.idList.Len() > m.MaxElementSize {
|
||||
e := m.idList.Front()
|
||||
node := e.Value.(*idNode)
|
||||
m.delBean(node.tbName, node.id)
|
||||
}
|
||||
m.mutex.Unlock()
|
||||
}
|
||||
|
||||
func (m *LRUCacher) delIds(tableName, sql string) {
|
||||
if _, ok := m.sqlIndex[tableName]; ok {
|
||||
if el, ok := m.sqlIndex[tableName][sql]; ok {
|
||||
delete(m.sqlIndex[tableName], sql)
|
||||
m.sqlList.Remove(el)
|
||||
}
|
||||
}
|
||||
m.store.Del(sql)
|
||||
}
|
||||
|
||||
// DelIds deletes ids
|
||||
func (m *LRUCacher) DelIds(tableName, sql string) {
|
||||
m.mutex.Lock()
|
||||
m.delIds(tableName, sql)
|
||||
m.mutex.Unlock()
|
||||
}
|
||||
|
||||
func (m *LRUCacher) delBean(tableName string, id string) {
|
||||
tid := genID(tableName, id)
|
||||
if el, ok := m.idIndex[tableName][id]; ok {
|
||||
delete(m.idIndex[tableName], id)
|
||||
m.idList.Remove(el)
|
||||
m.clearIds(tableName)
|
||||
}
|
||||
m.store.Del(tid)
|
||||
}
|
||||
|
||||
// DelBean deletes beans in some table
|
||||
func (m *LRUCacher) DelBean(tableName string, id string) {
|
||||
m.mutex.Lock()
|
||||
m.delBean(tableName, id)
|
||||
m.mutex.Unlock()
|
||||
}
|
||||
|
||||
type idNode struct {
|
||||
tbName string
|
||||
id string
|
||||
lastVisit time.Time
|
||||
}
|
||||
|
||||
type sqlNode struct {
|
||||
tbName string
|
||||
sql string
|
||||
lastVisit time.Time
|
||||
}
|
||||
|
||||
func genSQLKey(sql string, args interface{}) string {
|
||||
return fmt.Sprintf("%v-%v", sql, args)
|
||||
}
|
||||
|
||||
func genID(prefix string, id string) string {
|
||||
return fmt.Sprintf("%v-%v", prefix, id)
|
||||
}
|
||||
|
||||
func newIDNode(tbName string, id string) *idNode {
|
||||
return &idNode{tbName, id, time.Now()}
|
||||
}
|
||||
|
||||
func newSQLNode(tbName, sql string) *sqlNode {
|
||||
return &sqlNode{tbName, sql, time.Now()}
|
||||
}
|
@ -1,51 +0,0 @@
|
||||
// Copyright 2015 The Xorm Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package xorm
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"xorm.io/core"
|
||||
)
|
||||
|
||||
var _ core.CacheStore = NewMemoryStore()
|
||||
|
||||
// MemoryStore represents in-memory store
|
||||
type MemoryStore struct {
|
||||
store map[interface{}]interface{}
|
||||
mutex sync.RWMutex
|
||||
}
|
||||
|
||||
// NewMemoryStore creates a new store in memory
|
||||
func NewMemoryStore() *MemoryStore {
|
||||
return &MemoryStore{store: make(map[interface{}]interface{})}
|
||||
}
|
||||
|
||||
// Put puts object into store
|
||||
func (s *MemoryStore) Put(key string, value interface{}) error {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
s.store[key] = value
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get gets object from store
|
||||
func (s *MemoryStore) Get(key string) (interface{}, error) {
|
||||
s.mutex.RLock()
|
||||
defer s.mutex.RUnlock()
|
||||
if v, ok := s.store[key]; ok {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
return nil, ErrNotExist
|
||||
}
|
||||
|
||||
// Del deletes object
|
||||
func (s *MemoryStore) Del(key string) error {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
delete(s.store, key)
|
||||
return nil
|
||||
}
|
@ -1,30 +0,0 @@
|
||||
// Copyright 2018 The Xorm Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package xorm
|
||||
|
||||
// ContextCache is the interface that operates the cache data.
|
||||
type ContextCache interface {
|
||||
// Put puts value into cache with key.
|
||||
Put(key string, val interface{})
|
||||
// Get gets cached value by given key.
|
||||
Get(key string) interface{}
|
||||
}
|
||||
|
||||
type memoryContextCache map[string]interface{}
|
||||
|
||||
// NewMemoryContextCache return memoryContextCache
|
||||
func NewMemoryContextCache() memoryContextCache {
|
||||
return make(map[string]interface{})
|
||||
}
|
||||
|
||||
// Put puts value into cache with key.
|
||||
func (m memoryContextCache) Put(key string, val interface{}) {
|
||||
m[key] = val
|
||||
}
|
||||
|
||||
// Get gets cached value by given key.
|
||||
func (m memoryContextCache) Get(key string) interface{} {
|
||||
return m[key]
|
||||
}
|
@ -45,34 +45,11 @@ type Engine struct {
|
||||
TZLocation *time.Location // The timezone of the application
|
||||
DatabaseTZ *time.Location // The timezone of the database
|
||||
|
||||
disableGlobalCache bool
|
||||
|
||||
tagHandlers map[string]tagHandler
|
||||
|
||||
cachers map[string]core.Cacher
|
||||
cacherLock sync.RWMutex
|
||||
|
||||
defaultContext context.Context
|
||||
}
|
||||
|
||||
func (engine *Engine) setCacher(tableName string, cacher core.Cacher) {
|
||||
engine.cacherLock.Lock()
|
||||
engine.cachers[tableName] = cacher
|
||||
engine.cacherLock.Unlock()
|
||||
}
|
||||
|
||||
func (engine *Engine) getCacher(tableName string) core.Cacher {
|
||||
var cacher core.Cacher
|
||||
var ok bool
|
||||
engine.cacherLock.RLock()
|
||||
cacher, ok = engine.cachers[tableName]
|
||||
engine.cacherLock.RUnlock()
|
||||
if !ok && !engine.disableGlobalCache {
|
||||
cacher = engine.Cacher
|
||||
}
|
||||
return cacher
|
||||
}
|
||||
|
||||
// BufferSize sets buffer size for iterate
|
||||
func (engine *Engine) BufferSize(size int) *Session {
|
||||
session := engine.NewSession()
|
||||
@ -270,11 +247,6 @@ func (engine *Engine) SetMaxIdleConns(conns int) {
|
||||
engine.db.SetMaxIdleConns(conns)
|
||||
}
|
||||
|
||||
// SetDefaultCacher set the default cacher. Xorm's default not enable cacher.
|
||||
func (engine *Engine) SetDefaultCacher(cacher core.Cacher) {
|
||||
engine.Cacher = cacher
|
||||
}
|
||||
|
||||
// NoCache If you has set default cacher, and you want temporilly stop use cache,
|
||||
// you can use NoCache()
|
||||
func (engine *Engine) NoCache() *Session {
|
||||
@ -715,7 +687,6 @@ func (engine *Engine) mapType(v reflect.Value) (*core.Table, error) {
|
||||
table.Name = getTableName(engine.TableMapper, v)
|
||||
|
||||
var idFieldColName string
|
||||
var hasCacheTag, hasNoCacheTag bool
|
||||
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
tag := t.Field(i).Tag
|
||||
@ -809,13 +780,6 @@ func (engine *Engine) mapType(v reflect.Value) (*core.Table, error) {
|
||||
col.Name = key
|
||||
}
|
||||
}
|
||||
|
||||
if ctx.hasCacheTag {
|
||||
hasCacheTag = true
|
||||
}
|
||||
if ctx.hasNoCacheTag {
|
||||
hasNoCacheTag = true
|
||||
}
|
||||
}
|
||||
|
||||
if col.SQLType.Name == "" {
|
||||
@ -879,20 +843,6 @@ func (engine *Engine) mapType(v reflect.Value) (*core.Table, error) {
|
||||
table.AutoIncrement = col.Name
|
||||
}
|
||||
|
||||
if hasCacheTag {
|
||||
if engine.Cacher != nil { // !nash! use engine's cacher if provided
|
||||
engine.logger.Info("enable cache on table:", table.Name)
|
||||
engine.setCacher(table.Name, engine.Cacher)
|
||||
} else {
|
||||
engine.logger.Info("enable LRU cache on table:", table.Name)
|
||||
engine.setCacher(table.Name, NewLRUCacher2(NewMemoryStore(), time.Hour, 10000))
|
||||
}
|
||||
}
|
||||
if hasNoCacheTag {
|
||||
engine.logger.Info("disable cache on table:", table.Name)
|
||||
engine.setCacher(table.Name, nil)
|
||||
}
|
||||
|
||||
return table, nil
|
||||
}
|
||||
|
||||
|
@ -1,21 +0,0 @@
|
||||
// Copyright 2017 The Xorm Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package xorm
|
||||
|
||||
import "time"
|
||||
|
||||
const (
|
||||
zeroTime0 = "0000-00-00 00:00:00"
|
||||
zeroTime1 = "0001-01-01 00:00:00"
|
||||
)
|
||||
|
||||
func formatTime(t time.Time) string {
|
||||
return t.Format("2006-01-02 15:04:05")
|
||||
}
|
||||
|
||||
func isTimeZero(t time.Time) bool {
|
||||
return t.IsZero() || formatTime(t) == zeroTime0 ||
|
||||
formatTime(t) == zeroTime1
|
||||
}
|
@ -87,7 +87,6 @@ type EngineInterface interface {
|
||||
Quote(string) string
|
||||
SetConnMaxLifetime(time.Duration)
|
||||
SetColumnMapper(core.IMapper)
|
||||
SetDefaultCacher(core.Cacher)
|
||||
SetLogger(logger core.ILogger)
|
||||
SetMapper(core.IMapper)
|
||||
SetMaxOpenConns(int)
|
||||
|
@ -17,12 +17,6 @@ import (
|
||||
"xorm.io/core"
|
||||
)
|
||||
|
||||
type sessionType int
|
||||
|
||||
const (
|
||||
engineSession sessionType = iota
|
||||
)
|
||||
|
||||
// Session keep a pointer to sql.DB and provides all execution of all
|
||||
// kind of database operations.
|
||||
type Session struct {
|
||||
@ -53,19 +47,11 @@ type Session struct {
|
||||
stmtCache map[uint32]*core.Stmt //key: hash.Hash32 of (queryStr, len(queryStr))
|
||||
|
||||
// !evalphobia! stored the last executed query on this session
|
||||
//beforeSQLExec func(string, ...interface{})
|
||||
lastSQL string
|
||||
lastSQLArgs []interface{}
|
||||
showSQL bool
|
||||
|
||||
ctx context.Context
|
||||
sessionType sessionType
|
||||
}
|
||||
|
||||
// Clone copy all the session's content and return a new session
|
||||
func (session *Session) Clone() *Session {
|
||||
var sess = *session
|
||||
return &sess
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
// Init reset the session as the init status.
|
||||
@ -113,12 +99,6 @@ func (session *Session) Close() {
|
||||
}
|
||||
}
|
||||
|
||||
// ContextCache enable context cache or not
|
||||
func (session *Session) ContextCache(context ContextCache) *Session {
|
||||
session.statement.context = context
|
||||
return session
|
||||
}
|
||||
|
||||
// IsClosed returns if session is closed
|
||||
func (session *Session) IsClosed() bool {
|
||||
return session.db == nil
|
||||
@ -277,19 +257,6 @@ func cleanupProcessorsClosures(slices *[]func(interface{})) {
|
||||
}
|
||||
}
|
||||
|
||||
func (session *Session) canCache() bool {
|
||||
if session.statement.RefTable == nil ||
|
||||
session.statement.JoinStr != "" ||
|
||||
session.statement.RawSQL != "" ||
|
||||
!session.statement.UseCache ||
|
||||
session.statement.IsForUpdate ||
|
||||
session.tx != nil ||
|
||||
len(session.statement.selectStr) > 0 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (session *Session) doPrepare(db *core.DB, sqlStr string) (stmt *core.Stmt, err error) {
|
||||
crc := crc32.ChecksumIEEE([]byte(sqlStr))
|
||||
// TODO try hash(sqlStr+len(sqlStr))
|
||||
|
@ -17,6 +17,11 @@ import (
|
||||
"xorm.io/core"
|
||||
)
|
||||
|
||||
const (
|
||||
zeroTime0 = "0000-00-00 00:00:00"
|
||||
zeroTime1 = "0001-01-01 00:00:00"
|
||||
)
|
||||
|
||||
func (session *Session) str2Time(col *core.Column, data string) (outTime time.Time, outErr error) {
|
||||
sdata := strings.TrimSpace(data)
|
||||
var x time.Time
|
||||
@ -627,7 +632,7 @@ func (session *Session) value2Interface(col *core.Column, fieldValue reflect.Val
|
||||
}
|
||||
return bytes, nil
|
||||
}
|
||||
return nil, fmt.Errorf("Unsupported type %v", fieldValue.Type())
|
||||
return nil, fmt.Errorf("unsupported type %v", fieldValue.Type())
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
bytes, err := DefaultJSONHandler.Marshal(fieldValue.Interface())
|
||||
if err != nil {
|
||||
|
@ -5,74 +5,11 @@
|
||||
package xorm
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"xorm.io/core"
|
||||
)
|
||||
|
||||
func (session *Session) cacheDelete(table *core.Table, tableName, sqlStr string, args ...interface{}) error {
|
||||
if table == nil ||
|
||||
session.tx != nil {
|
||||
return ErrCacheFailed
|
||||
}
|
||||
|
||||
for _, filter := range session.engine.dialect.Filters() {
|
||||
sqlStr = filter.Do(sqlStr, session.engine.dialect, table)
|
||||
}
|
||||
|
||||
newsql := session.statement.convertIDSQL(sqlStr)
|
||||
if newsql == "" {
|
||||
return ErrCacheFailed
|
||||
}
|
||||
|
||||
cacher := session.engine.getCacher(tableName)
|
||||
pkColumns := table.PKColumns()
|
||||
ids, err := core.GetCacheSql(cacher, tableName, newsql, args)
|
||||
if err != nil {
|
||||
resultsSlice, err := session.queryBytes(newsql, args...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ids = make([]core.PK, 0)
|
||||
if len(resultsSlice) > 0 {
|
||||
for _, data := range resultsSlice {
|
||||
var id int64
|
||||
var pk core.PK = make([]interface{}, 0)
|
||||
for _, col := range pkColumns {
|
||||
if v, ok := data[col.Name]; !ok {
|
||||
return errors.New("no id")
|
||||
} else if col.SQLType.IsText() {
|
||||
pk = append(pk, string(v))
|
||||
} else if col.SQLType.IsNumeric() {
|
||||
id, err = strconv.ParseInt(string(v), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pk = append(pk, id)
|
||||
} else {
|
||||
return errors.New("not supported primary key type")
|
||||
}
|
||||
}
|
||||
ids = append(ids, pk)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, id := range ids {
|
||||
session.engine.logger.Debug("[cacheDelete] delete cache obj:", tableName, id)
|
||||
sid, err := id.ToString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cacher.DelBean(tableName, sid)
|
||||
}
|
||||
session.engine.logger.Debug("[cacheDelete] clear cache table:", tableName)
|
||||
cacher.ClearIds(tableName)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete records, bean's non-empty fields are conditions
|
||||
func (session *Session) Delete(bean interface{}) (int64, error) {
|
||||
if session.isAutoClose {
|
||||
@ -150,16 +87,10 @@ func (session *Session) Delete(bean interface{}) (int64, error) {
|
||||
}
|
||||
|
||||
var realSQL string
|
||||
argsForCache := make([]interface{}, 0, len(condArgs)*2)
|
||||
|
||||
if session.statement.unscoped || table.DeletedColumn() == nil { // tag "deleted" is disabled
|
||||
realSQL = deleteSQL
|
||||
copy(argsForCache, condArgs)
|
||||
argsForCache = append(condArgs, argsForCache...)
|
||||
} else {
|
||||
// !oinume! sqlStrForCache and argsForCache is needed to behave as executing "DELETE FROM ..." for cache.
|
||||
copy(argsForCache, condArgs)
|
||||
argsForCache = append(condArgs, argsForCache...)
|
||||
|
||||
deletedColumn := table.DeletedColumn()
|
||||
realSQL = fmt.Sprintf("UPDATE %v SET %v = ? WHERE %v",
|
||||
session.engine.Quote(session.statement.TableName()),
|
||||
@ -204,11 +135,6 @@ func (session *Session) Delete(bean interface{}) (int64, error) {
|
||||
setColumnTime(bean, col, t)
|
||||
})
|
||||
}
|
||||
|
||||
if cacher := session.engine.getCacher(tableNameNoQuote); cacher != nil && session.statement.UseCache {
|
||||
session.cacheDelete(table, tableNameNoQuote, deleteSQL, argsForCache...)
|
||||
}
|
||||
|
||||
session.statement.RefTable = table
|
||||
res, err := session.exec(realSQL, condArgs...)
|
||||
if err != nil {
|
||||
|
@ -6,7 +6,6 @@ package xorm
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
@ -128,7 +127,7 @@ func (session *Session) find(rowsSlicePtr interface{}, condiBean ...interface{})
|
||||
|
||||
var sqlStr string
|
||||
var args []interface{}
|
||||
var err error
|
||||
// var err error
|
||||
if session.statement.RawSQL == "" {
|
||||
if len(session.statement.TableName()) <= 0 {
|
||||
return ErrTableNotFound
|
||||
@ -181,18 +180,18 @@ func (session *Session) find(rowsSlicePtr interface{}, condiBean ...interface{})
|
||||
args = session.statement.RawParams
|
||||
}
|
||||
|
||||
if session.canCache() {
|
||||
if cacher := session.engine.getCacher(session.statement.TableName()); cacher != nil &&
|
||||
!session.statement.IsDistinct &&
|
||||
!session.statement.unscoped {
|
||||
err = session.cacheFind(sliceElementType, sqlStr, rowsSlicePtr, args...)
|
||||
if err != ErrCacheFailed {
|
||||
return err
|
||||
}
|
||||
err = nil // !nashtsai! reset err to nil for ErrCacheFailed
|
||||
session.engine.logger.Warn("Cache Find Failed")
|
||||
}
|
||||
}
|
||||
// if session.canCache() {
|
||||
// if cacher := session.engine.getCacher(session.statement.TableName()); cacher != nil &&
|
||||
// !session.statement.IsDistinct &&
|
||||
// !session.statement.unscoped {
|
||||
// err = session.cacheFind(sliceElementType, sqlStr, rowsSlicePtr, args...)
|
||||
// if err != ErrCacheFailed {
|
||||
// return err
|
||||
// }
|
||||
// err = nil // !nashtsai! reset err to nil for ErrCacheFailed
|
||||
// session.engine.logger.Warn("Cache Find Failed")
|
||||
// }
|
||||
// }
|
||||
|
||||
return session.noCacheFind(table, sliceValue, sqlStr, args...)
|
||||
}
|
||||
@ -320,203 +319,205 @@ func convertPKToValue(table *core.Table, dst interface{}, pk core.PK) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (session *Session) cacheFind(t reflect.Type, sqlStr string, rowsSlicePtr interface{}, args ...interface{}) (err error) {
|
||||
if !session.canCache() ||
|
||||
indexNoCase(sqlStr, "having") != -1 ||
|
||||
indexNoCase(sqlStr, "group by") != -1 {
|
||||
return ErrCacheFailed
|
||||
}
|
||||
// func (session *Session) cacheFind(t reflect.Type, sqlStr string, rowsSlicePtr interface{}, args ...interface{}) (err error) {
|
||||
// if !session.canCache() ||
|
||||
// indexNoCase(sqlStr, "having") != -1 ||
|
||||
// indexNoCase(sqlStr, "group by") != -1 {
|
||||
// return ErrCacheFailed
|
||||
// }
|
||||
|
||||
tableName := session.statement.TableName()
|
||||
cacher := session.engine.getCacher(tableName)
|
||||
if cacher == nil {
|
||||
return nil
|
||||
}
|
||||
// tableName := session.statement.TableName()
|
||||
// return nil
|
||||
// // cacher := session.engine.getCacher(tableName)
|
||||
// // if cacher == nil {
|
||||
// // return nil
|
||||
// // }
|
||||
|
||||
for _, filter := range session.engine.dialect.Filters() {
|
||||
sqlStr = filter.Do(sqlStr, session.engine.dialect, session.statement.RefTable)
|
||||
}
|
||||
// for _, filter := range session.engine.dialect.Filters() {
|
||||
// sqlStr = filter.Do(sqlStr, session.engine.dialect, session.statement.RefTable)
|
||||
// }
|
||||
|
||||
newsql := session.statement.convertIDSQL(sqlStr)
|
||||
if newsql == "" {
|
||||
return ErrCacheFailed
|
||||
}
|
||||
// newsql := session.statement.convertIDSQL(sqlStr)
|
||||
// if newsql == "" {
|
||||
// return ErrCacheFailed
|
||||
// }
|
||||
|
||||
table := session.statement.RefTable
|
||||
ids, err := core.GetCacheSql(cacher, tableName, newsql, args)
|
||||
if err != nil {
|
||||
rows, err := session.queryRows(newsql, args...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
// table := session.statement.RefTable
|
||||
// var cacher core.Cacher
|
||||
// ids, err := core.GetCacheSql(cacher, tableName, newsql, args)
|
||||
// if err != nil {
|
||||
// rows, err := session.queryRows(newsql, args...)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// defer rows.Close()
|
||||
|
||||
var i int
|
||||
ids = make([]core.PK, 0)
|
||||
for rows.Next() {
|
||||
i++
|
||||
if i > 500 {
|
||||
session.engine.logger.Debug("[cacheFind] ids length > 500, no cache")
|
||||
return ErrCacheFailed
|
||||
}
|
||||
var res = make([]string, len(table.PrimaryKeys))
|
||||
err = rows.ScanSlice(&res)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var pk core.PK = make([]interface{}, len(table.PrimaryKeys))
|
||||
for i, col := range table.PKColumns() {
|
||||
pk[i], err = session.engine.idTypeAssertion(col, res[i])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// var i int
|
||||
// ids = make([]core.PK, 0)
|
||||
// for rows.Next() {
|
||||
// i++
|
||||
// if i > 500 {
|
||||
// session.engine.logger.Debug("[cacheFind] ids length > 500, no cache")
|
||||
// return ErrCacheFailed
|
||||
// }
|
||||
// var res = make([]string, len(table.PrimaryKeys))
|
||||
// err = rows.ScanSlice(&res)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// var pk core.PK = make([]interface{}, len(table.PrimaryKeys))
|
||||
// for i, col := range table.PKColumns() {
|
||||
// pk[i], err = session.engine.idTypeAssertion(col, res[i])
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// }
|
||||
|
||||
ids = append(ids, pk)
|
||||
}
|
||||
if rows.Err() != nil {
|
||||
return rows.Err()
|
||||
}
|
||||
// ids = append(ids, pk)
|
||||
// }
|
||||
// if rows.Err() != nil {
|
||||
// return rows.Err()
|
||||
// }
|
||||
|
||||
session.engine.logger.Debug("[cacheFind] cache sql:", ids, tableName, sqlStr, newsql, args)
|
||||
err = core.PutCacheSql(cacher, ids, tableName, newsql, args)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
session.engine.logger.Debug("[cacheFind] cache hit sql:", tableName, sqlStr, newsql, args)
|
||||
}
|
||||
// session.engine.logger.Debug("[cacheFind] cache sql:", ids, tableName, sqlStr, newsql, args)
|
||||
// err = core.PutCacheSql(cacher, ids, tableName, newsql, args)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// } else {
|
||||
// session.engine.logger.Debug("[cacheFind] cache hit sql:", tableName, sqlStr, newsql, args)
|
||||
// }
|
||||
|
||||
sliceValue := reflect.Indirect(reflect.ValueOf(rowsSlicePtr))
|
||||
// sliceValue := reflect.Indirect(reflect.ValueOf(rowsSlicePtr))
|
||||
|
||||
ididxes := make(map[string]int)
|
||||
var ides []core.PK
|
||||
var temps = make([]interface{}, len(ids))
|
||||
// ididxes := make(map[string]int)
|
||||
// var ides []core.PK
|
||||
// var temps = make([]interface{}, len(ids))
|
||||
|
||||
for idx, id := range ids {
|
||||
sid, err := id.ToString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bean := cacher.GetBean(tableName, sid)
|
||||
// for idx, id := range ids {
|
||||
// sid, err := id.ToString()
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// bean := cacher.GetBean(tableName, sid)
|
||||
|
||||
// fix issue #894
|
||||
isHit := func() (ht bool) {
|
||||
if bean == nil {
|
||||
ht = false
|
||||
return
|
||||
}
|
||||
ckb := reflect.ValueOf(bean).Elem().Type()
|
||||
ht = ckb == t
|
||||
if !ht && t.Kind() == reflect.Ptr {
|
||||
ht = t.Elem() == ckb
|
||||
}
|
||||
return
|
||||
}
|
||||
if !isHit() {
|
||||
ides = append(ides, id)
|
||||
ididxes[sid] = idx
|
||||
} else {
|
||||
session.engine.logger.Debug("[cacheFind] cache hit bean:", tableName, id, bean)
|
||||
// // fix issue #894
|
||||
// isHit := func() (ht bool) {
|
||||
// if bean == nil {
|
||||
// ht = false
|
||||
// return
|
||||
// }
|
||||
// ckb := reflect.ValueOf(bean).Elem().Type()
|
||||
// ht = ckb == t
|
||||
// if !ht && t.Kind() == reflect.Ptr {
|
||||
// ht = t.Elem() == ckb
|
||||
// }
|
||||
// return
|
||||
// }
|
||||
// if !isHit() {
|
||||
// ides = append(ides, id)
|
||||
// ididxes[sid] = idx
|
||||
// } else {
|
||||
// session.engine.logger.Debug("[cacheFind] cache hit bean:", tableName, id, bean)
|
||||
|
||||
pk := session.engine.IdOf(bean)
|
||||
xid, err := pk.ToString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// pk := session.engine.IdOf(bean)
|
||||
// xid, err := pk.ToString()
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
|
||||
if sid != xid {
|
||||
session.engine.logger.Error("[cacheFind] error cache", xid, sid, bean)
|
||||
return ErrCacheFailed
|
||||
}
|
||||
temps[idx] = bean
|
||||
}
|
||||
}
|
||||
// if sid != xid {
|
||||
// session.engine.logger.Error("[cacheFind] error cache", xid, sid, bean)
|
||||
// return ErrCacheFailed
|
||||
// }
|
||||
// temps[idx] = bean
|
||||
// }
|
||||
// }
|
||||
|
||||
if len(ides) > 0 {
|
||||
slices := reflect.New(reflect.SliceOf(t))
|
||||
beans := slices.Interface()
|
||||
// if len(ides) > 0 {
|
||||
// slices := reflect.New(reflect.SliceOf(t))
|
||||
// beans := slices.Interface()
|
||||
|
||||
if len(table.PrimaryKeys) == 1 {
|
||||
ff := make([]interface{}, 0, len(ides))
|
||||
for _, ie := range ides {
|
||||
ff = append(ff, ie[0])
|
||||
}
|
||||
// if len(table.PrimaryKeys) == 1 {
|
||||
// ff := make([]interface{}, 0, len(ides))
|
||||
// for _, ie := range ides {
|
||||
// ff = append(ff, ie[0])
|
||||
// }
|
||||
|
||||
session.In("`"+table.PrimaryKeys[0]+"`", ff...)
|
||||
} else {
|
||||
for _, ie := range ides {
|
||||
cond := builder.NewCond()
|
||||
for i, name := range table.PrimaryKeys {
|
||||
cond = cond.And(builder.Eq{"`" + name + "`": ie[i]})
|
||||
}
|
||||
session.Or(cond)
|
||||
}
|
||||
}
|
||||
// session.In("`"+table.PrimaryKeys[0]+"`", ff...)
|
||||
// } else {
|
||||
// for _, ie := range ides {
|
||||
// cond := builder.NewCond()
|
||||
// for i, name := range table.PrimaryKeys {
|
||||
// cond = cond.And(builder.Eq{"`" + name + "`": ie[i]})
|
||||
// }
|
||||
// session.Or(cond)
|
||||
// }
|
||||
// }
|
||||
|
||||
err = session.NoCache().Table(tableName).find(beans)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// err = session.NoCache().Table(tableName).find(beans)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
|
||||
vs := reflect.Indirect(reflect.ValueOf(beans))
|
||||
for i := 0; i < vs.Len(); i++ {
|
||||
rv := vs.Index(i)
|
||||
if rv.Kind() != reflect.Ptr {
|
||||
rv = rv.Addr()
|
||||
}
|
||||
id, err := session.engine.idOfV(rv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sid, err := id.ToString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// vs := reflect.Indirect(reflect.ValueOf(beans))
|
||||
// for i := 0; i < vs.Len(); i++ {
|
||||
// rv := vs.Index(i)
|
||||
// if rv.Kind() != reflect.Ptr {
|
||||
// rv = rv.Addr()
|
||||
// }
|
||||
// id, err := session.engine.idOfV(rv)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// sid, err := id.ToString()
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
|
||||
bean := rv.Interface()
|
||||
temps[ididxes[sid]] = bean
|
||||
session.engine.logger.Debug("[cacheFind] cache bean:", tableName, id, bean, temps)
|
||||
cacher.PutBean(tableName, sid, bean)
|
||||
}
|
||||
}
|
||||
// bean := rv.Interface()
|
||||
// temps[ididxes[sid]] = bean
|
||||
// session.engine.logger.Debug("[cacheFind] cache bean:", tableName, id, bean, temps)
|
||||
// cacher.PutBean(tableName, sid, bean)
|
||||
// }
|
||||
// }
|
||||
|
||||
for j := 0; j < len(temps); j++ {
|
||||
bean := temps[j]
|
||||
if bean == nil {
|
||||
session.engine.logger.Warn("[cacheFind] cache no hit:", tableName, ids[j], temps)
|
||||
// return errors.New("cache error") // !nashtsai! no need to return error, but continue instead
|
||||
continue
|
||||
}
|
||||
if sliceValue.Kind() == reflect.Slice {
|
||||
if t.Kind() == reflect.Ptr {
|
||||
sliceValue.Set(reflect.Append(sliceValue, reflect.ValueOf(bean)))
|
||||
} else {
|
||||
sliceValue.Set(reflect.Append(sliceValue, reflect.Indirect(reflect.ValueOf(bean))))
|
||||
}
|
||||
} else if sliceValue.Kind() == reflect.Map {
|
||||
var key = ids[j]
|
||||
keyType := sliceValue.Type().Key()
|
||||
var ikey interface{}
|
||||
if len(key) == 1 {
|
||||
ikey, err = str2PK(fmt.Sprintf("%v", key[0]), keyType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if keyType.Kind() != reflect.Slice {
|
||||
return errors.New("table have multiple primary keys, key is not core.PK or slice")
|
||||
}
|
||||
ikey = key
|
||||
}
|
||||
// for j := 0; j < len(temps); j++ {
|
||||
// bean := temps[j]
|
||||
// if bean == nil {
|
||||
// session.engine.logger.Warn("[cacheFind] cache no hit:", tableName, ids[j], temps)
|
||||
// // return errors.New("cache error") // !nashtsai! no need to return error, but continue instead
|
||||
// continue
|
||||
// }
|
||||
// if sliceValue.Kind() == reflect.Slice {
|
||||
// if t.Kind() == reflect.Ptr {
|
||||
// sliceValue.Set(reflect.Append(sliceValue, reflect.ValueOf(bean)))
|
||||
// } else {
|
||||
// sliceValue.Set(reflect.Append(sliceValue, reflect.Indirect(reflect.ValueOf(bean))))
|
||||
// }
|
||||
// } else if sliceValue.Kind() == reflect.Map {
|
||||
// var key = ids[j]
|
||||
// keyType := sliceValue.Type().Key()
|
||||
// var ikey interface{}
|
||||
// if len(key) == 1 {
|
||||
// ikey, err = str2PK(fmt.Sprintf("%v", key[0]), keyType)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// } else {
|
||||
// if keyType.Kind() != reflect.Slice {
|
||||
// return errors.New("table have multiple primary keys, key is not core.PK or slice")
|
||||
// }
|
||||
// ikey = key
|
||||
// }
|
||||
|
||||
if t.Kind() == reflect.Ptr {
|
||||
sliceValue.SetMapIndex(reflect.ValueOf(ikey), reflect.ValueOf(bean))
|
||||
} else {
|
||||
sliceValue.SetMapIndex(reflect.ValueOf(ikey), reflect.Indirect(reflect.ValueOf(bean)))
|
||||
}
|
||||
}
|
||||
}
|
||||
// if t.Kind() == reflect.Ptr {
|
||||
// sliceValue.SetMapIndex(reflect.ValueOf(ikey), reflect.ValueOf(bean))
|
||||
// } else {
|
||||
// sliceValue.SetMapIndex(reflect.ValueOf(ikey), reflect.Indirect(reflect.ValueOf(bean)))
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
return nil
|
||||
}
|
||||
// return nil
|
||||
// }
|
||||
|
@ -7,9 +7,7 @@ package xorm
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
|
||||
"xorm.io/core"
|
||||
)
|
||||
@ -63,39 +61,11 @@ func (session *Session) get(bean interface{}) (bool, error) {
|
||||
|
||||
table := session.statement.RefTable
|
||||
|
||||
if session.canCache() && beanValue.Elem().Kind() == reflect.Struct {
|
||||
if cacher := session.engine.getCacher(session.statement.TableName()); cacher != nil &&
|
||||
!session.statement.unscoped {
|
||||
has, err := session.cacheGet(bean, sqlStr, args...)
|
||||
if err != ErrCacheFailed {
|
||||
return has, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
context := session.statement.context
|
||||
if context != nil {
|
||||
res := context.Get(fmt.Sprintf("%v-%v", sqlStr, args))
|
||||
if res != nil {
|
||||
session.engine.logger.Debug("hit context cache", sqlStr)
|
||||
|
||||
structValue := reflect.Indirect(reflect.ValueOf(bean))
|
||||
structValue.Set(reflect.Indirect(reflect.ValueOf(res)))
|
||||
session.lastSQL = ""
|
||||
session.lastSQLArgs = nil
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
has, err := session.nocacheGet(beanValue.Elem().Kind(), table, bean, sqlStr, args...)
|
||||
if err != nil || !has {
|
||||
return has, err
|
||||
}
|
||||
|
||||
if context != nil {
|
||||
context.Put(fmt.Sprintf("%v-%v", sqlStr, args), bean)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@ -113,7 +83,7 @@ func (session *Session) nocacheGet(beanKind reflect.Kind, table *core.Table, bea
|
||||
return false, nil
|
||||
}
|
||||
|
||||
switch bean.(type) {
|
||||
switch bean := bean.(type) {
|
||||
case sql.NullInt64, sql.NullBool, sql.NullFloat64, sql.NullString:
|
||||
return true, rows.Scan(&bean)
|
||||
case *sql.NullInt64, *sql.NullBool, *sql.NullFloat64, *sql.NullString:
|
||||
@ -124,7 +94,7 @@ func (session *Session) nocacheGet(beanKind reflect.Kind, table *core.Table, bea
|
||||
return true, err
|
||||
}
|
||||
if res.Valid {
|
||||
*(bean.(*string)) = res.String
|
||||
*bean = res.String
|
||||
}
|
||||
return true, nil
|
||||
case *int:
|
||||
@ -133,7 +103,7 @@ func (session *Session) nocacheGet(beanKind reflect.Kind, table *core.Table, bea
|
||||
return true, err
|
||||
}
|
||||
if res.Valid {
|
||||
*(bean.(*int)) = int(res.Int64)
|
||||
*bean = int(res.Int64)
|
||||
}
|
||||
return true, nil
|
||||
case *int8:
|
||||
@ -142,7 +112,7 @@ func (session *Session) nocacheGet(beanKind reflect.Kind, table *core.Table, bea
|
||||
return true, err
|
||||
}
|
||||
if res.Valid {
|
||||
*(bean.(*int8)) = int8(res.Int64)
|
||||
*bean = int8(res.Int64)
|
||||
}
|
||||
return true, nil
|
||||
case *int16:
|
||||
@ -151,7 +121,7 @@ func (session *Session) nocacheGet(beanKind reflect.Kind, table *core.Table, bea
|
||||
return true, err
|
||||
}
|
||||
if res.Valid {
|
||||
*(bean.(*int16)) = int16(res.Int64)
|
||||
*bean = int16(res.Int64)
|
||||
}
|
||||
return true, nil
|
||||
case *int32:
|
||||
@ -160,7 +130,7 @@ func (session *Session) nocacheGet(beanKind reflect.Kind, table *core.Table, bea
|
||||
return true, err
|
||||
}
|
||||
if res.Valid {
|
||||
*(bean.(*int32)) = int32(res.Int64)
|
||||
*bean = int32(res.Int64)
|
||||
}
|
||||
return true, nil
|
||||
case *int64:
|
||||
@ -169,7 +139,7 @@ func (session *Session) nocacheGet(beanKind reflect.Kind, table *core.Table, bea
|
||||
return true, err
|
||||
}
|
||||
if res.Valid {
|
||||
*(bean.(*int64)) = int64(res.Int64)
|
||||
*bean = int64(res.Int64)
|
||||
}
|
||||
return true, nil
|
||||
case *uint:
|
||||
@ -178,7 +148,7 @@ func (session *Session) nocacheGet(beanKind reflect.Kind, table *core.Table, bea
|
||||
return true, err
|
||||
}
|
||||
if res.Valid {
|
||||
*(bean.(*uint)) = uint(res.Int64)
|
||||
*bean = uint(res.Int64)
|
||||
}
|
||||
return true, nil
|
||||
case *uint8:
|
||||
@ -187,7 +157,7 @@ func (session *Session) nocacheGet(beanKind reflect.Kind, table *core.Table, bea
|
||||
return true, err
|
||||
}
|
||||
if res.Valid {
|
||||
*(bean.(*uint8)) = uint8(res.Int64)
|
||||
*bean = uint8(res.Int64)
|
||||
}
|
||||
return true, nil
|
||||
case *uint16:
|
||||
@ -196,7 +166,7 @@ func (session *Session) nocacheGet(beanKind reflect.Kind, table *core.Table, bea
|
||||
return true, err
|
||||
}
|
||||
if res.Valid {
|
||||
*(bean.(*uint16)) = uint16(res.Int64)
|
||||
*bean = uint16(res.Int64)
|
||||
}
|
||||
return true, nil
|
||||
case *uint32:
|
||||
@ -205,7 +175,7 @@ func (session *Session) nocacheGet(beanKind reflect.Kind, table *core.Table, bea
|
||||
return true, err
|
||||
}
|
||||
if res.Valid {
|
||||
*(bean.(*uint32)) = uint32(res.Int64)
|
||||
*(bean) = uint32(res.Int64)
|
||||
}
|
||||
return true, nil
|
||||
case *uint64:
|
||||
@ -214,7 +184,7 @@ func (session *Session) nocacheGet(beanKind reflect.Kind, table *core.Table, bea
|
||||
return true, err
|
||||
}
|
||||
if res.Valid {
|
||||
*(bean.(*uint64)) = uint64(res.Int64)
|
||||
*bean = uint64(res.Int64)
|
||||
}
|
||||
return true, nil
|
||||
case *bool:
|
||||
@ -223,7 +193,7 @@ func (session *Session) nocacheGet(beanKind reflect.Kind, table *core.Table, bea
|
||||
return true, err
|
||||
}
|
||||
if res.Valid {
|
||||
*(bean.(*bool)) = res.Bool
|
||||
*bean = res.Bool
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
@ -263,94 +233,3 @@ func (session *Session) nocacheGet(beanKind reflect.Kind, table *core.Table, bea
|
||||
|
||||
return true, err
|
||||
}
|
||||
|
||||
func (session *Session) cacheGet(bean interface{}, sqlStr string, args ...interface{}) (has bool, err error) {
|
||||
// if has no reftable, then don't use cache currently
|
||||
if !session.canCache() {
|
||||
return false, ErrCacheFailed
|
||||
}
|
||||
|
||||
for _, filter := range session.engine.dialect.Filters() {
|
||||
sqlStr = filter.Do(sqlStr, session.engine.dialect, session.statement.RefTable)
|
||||
}
|
||||
newsql := session.statement.convertIDSQL(sqlStr)
|
||||
if newsql == "" {
|
||||
return false, ErrCacheFailed
|
||||
}
|
||||
|
||||
tableName := session.statement.TableName()
|
||||
cacher := session.engine.getCacher(tableName)
|
||||
|
||||
session.engine.logger.Debug("[cacheGet] find sql:", newsql, args)
|
||||
table := session.statement.RefTable
|
||||
ids, err := core.GetCacheSql(cacher, tableName, newsql, args)
|
||||
if err != nil {
|
||||
var res = make([]string, len(table.PrimaryKeys))
|
||||
rows, err := session.NoCache().queryRows(newsql, args...)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
if rows.Next() {
|
||||
err = rows.ScanSlice(&res)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
} else {
|
||||
return false, ErrCacheFailed
|
||||
}
|
||||
|
||||
var pk core.PK = make([]interface{}, len(table.PrimaryKeys))
|
||||
for i, col := range table.PKColumns() {
|
||||
if col.SQLType.IsText() {
|
||||
pk[i] = res[i]
|
||||
} else if col.SQLType.IsNumeric() {
|
||||
n, err := strconv.ParseInt(res[i], 10, 64)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
pk[i] = n
|
||||
} else {
|
||||
return false, errors.New("unsupported")
|
||||
}
|
||||
}
|
||||
|
||||
ids = []core.PK{pk}
|
||||
session.engine.logger.Debug("[cacheGet] cache ids:", newsql, ids)
|
||||
err = core.PutCacheSql(cacher, ids, tableName, newsql, args)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
} else {
|
||||
session.engine.logger.Debug("[cacheGet] cache hit sql:", newsql, ids)
|
||||
}
|
||||
|
||||
if len(ids) > 0 {
|
||||
structValue := reflect.Indirect(reflect.ValueOf(bean))
|
||||
id := ids[0]
|
||||
session.engine.logger.Debug("[cacheGet] get bean:", tableName, id)
|
||||
sid, err := id.ToString()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
cacheBean := cacher.GetBean(tableName, sid)
|
||||
if cacheBean == nil {
|
||||
cacheBean = bean
|
||||
has, err = session.nocacheGet(reflect.Struct, table, cacheBean, sqlStr, args...)
|
||||
if err != nil || !has {
|
||||
return has, err
|
||||
}
|
||||
|
||||
session.engine.logger.Debug("[cacheGet] cache bean:", tableName, id, cacheBean)
|
||||
cacher.PutBean(tableName, sid, cacheBean)
|
||||
} else {
|
||||
session.engine.logger.Debug("[cacheGet] cache hit bean:", tableName, id, cacheBean)
|
||||
has = true
|
||||
}
|
||||
structValue.Set(reflect.Indirect(reflect.ValueOf(cacheBean)))
|
||||
|
||||
return has, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
@ -17,7 +17,7 @@ import (
|
||||
)
|
||||
|
||||
// ErrNoElementsOnSlice represents an error there is no element when insert
|
||||
var ErrNoElementsOnSlice = errors.New("No element on slice when insert")
|
||||
var ErrNoElementsOnSlice = errors.New("no element on slice when insert")
|
||||
|
||||
// Insert insert one or more beans
|
||||
func (session *Session) Insert(beans ...interface{}) (int64, error) {
|
||||
@ -35,32 +35,30 @@ func (session *Session) Insert(beans ...interface{}) (int64, error) {
|
||||
}()
|
||||
|
||||
for _, bean := range beans {
|
||||
switch bean.(type) {
|
||||
switch bean := bean.(type) {
|
||||
case map[string]interface{}:
|
||||
cnt, err := session.insertMapInterface(bean.(map[string]interface{}))
|
||||
cnt, err := session.insertMapInterface(bean)
|
||||
if err != nil {
|
||||
return affected, err
|
||||
}
|
||||
affected += cnt
|
||||
case []map[string]interface{}:
|
||||
s := bean.([]map[string]interface{})
|
||||
for i := 0; i < len(s); i++ {
|
||||
cnt, err := session.insertMapInterface(s[i])
|
||||
for i := 0; i < len(bean); i++ {
|
||||
cnt, err := session.insertMapInterface(bean[i])
|
||||
if err != nil {
|
||||
return affected, err
|
||||
}
|
||||
affected += cnt
|
||||
}
|
||||
case map[string]string:
|
||||
cnt, err := session.insertMapString(bean.(map[string]string))
|
||||
cnt, err := session.insertMapString(bean)
|
||||
if err != nil {
|
||||
return affected, err
|
||||
}
|
||||
affected += cnt
|
||||
case []map[string]string:
|
||||
s := bean.([]map[string]string)
|
||||
for i := 0; i < len(s); i++ {
|
||||
cnt, err := session.insertMapString(s[i])
|
||||
for i := 0; i < len(bean); i++ {
|
||||
cnt, err := session.insertMapString(bean[i])
|
||||
if err != nil {
|
||||
return affected, err
|
||||
}
|
||||
@ -144,7 +142,6 @@ func (session *Session) innerInsertMulti(rowsSlicePtr interface{}) (int64, error
|
||||
if processor, ok := interface{}(elemValue).(BeforeInsertProcessor); ok {
|
||||
processor.BeforeInsert()
|
||||
}
|
||||
// --
|
||||
|
||||
if i == 0 {
|
||||
for _, col := range table.Columns() {
|
||||
@ -270,8 +267,6 @@ func (session *Session) innerInsertMulti(rowsSlicePtr interface{}) (int64, error
|
||||
return 0, err
|
||||
}
|
||||
|
||||
session.cacheInsert(tableName)
|
||||
|
||||
lenAfterClosures := len(session.afterClosures)
|
||||
for i := 0; i < size; i++ {
|
||||
elemValue := reflect.Indirect(sliceValue.Index(i)).Addr().Interface()
|
||||
@ -477,8 +472,6 @@ func (session *Session) innerInsert(bean interface{}) (int64, error) {
|
||||
|
||||
defer handleAfterInsertProcessorFunc(bean)
|
||||
|
||||
session.cacheInsert(tableName)
|
||||
|
||||
if table.Version != "" && session.statement.checkVersion {
|
||||
verValue, err := table.VersionColumn().ValueOf(bean)
|
||||
if err != nil {
|
||||
@ -518,8 +511,6 @@ func (session *Session) innerInsert(bean interface{}) (int64, error) {
|
||||
}
|
||||
defer handleAfterInsertProcessorFunc(bean)
|
||||
|
||||
session.cacheInsert(tableName)
|
||||
|
||||
if table.Version != "" && session.statement.checkVersion {
|
||||
verValue, err := table.VersionColumn().ValueOf(bean)
|
||||
if err != nil {
|
||||
@ -559,8 +550,6 @@ func (session *Session) innerInsert(bean interface{}) (int64, error) {
|
||||
|
||||
defer handleAfterInsertProcessorFunc(bean)
|
||||
|
||||
session.cacheInsert(tableName)
|
||||
|
||||
if table.Version != "" && session.statement.checkVersion {
|
||||
verValue, err := table.VersionColumn().ValueOf(bean)
|
||||
if err != nil {
|
||||
@ -606,19 +595,6 @@ func (session *Session) InsertOne(bean interface{}) (int64, error) {
|
||||
return session.innerInsert(bean)
|
||||
}
|
||||
|
||||
func (session *Session) cacheInsert(table string) error {
|
||||
if !session.statement.UseCache {
|
||||
return nil
|
||||
}
|
||||
cacher := session.engine.getCacher(table)
|
||||
if cacher == nil {
|
||||
return nil
|
||||
}
|
||||
session.engine.logger.Debug("[cache] clear sql:", table)
|
||||
cacher.ClearIds(table)
|
||||
return nil
|
||||
}
|
||||
|
||||
// genInsertColumns generates insert needed columns
|
||||
func (session *Session) genInsertColumns(bean interface{}) ([]string, []interface{}, error) {
|
||||
table := session.statement.RefTable
|
||||
@ -839,10 +815,6 @@ func (session *Session) insertMap(columns []string, args []interface{}) (int64,
|
||||
sql := w.String()
|
||||
args = w.Args()
|
||||
|
||||
if err := session.cacheInsert(tableName); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
res, err := session.exec(sql, args...)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
|
@ -8,138 +8,12 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"xorm.io/builder"
|
||||
"xorm.io/core"
|
||||
)
|
||||
|
||||
func (session *Session) cacheUpdate(table *core.Table, tableName, sqlStr string, args ...interface{}) error {
|
||||
if table == nil ||
|
||||
session.tx != nil {
|
||||
return ErrCacheFailed
|
||||
}
|
||||
|
||||
oldhead, newsql := session.statement.convertUpdateSQL(sqlStr)
|
||||
if newsql == "" {
|
||||
return ErrCacheFailed
|
||||
}
|
||||
for _, filter := range session.engine.dialect.Filters() {
|
||||
newsql = filter.Do(newsql, session.engine.dialect, table)
|
||||
}
|
||||
session.engine.logger.Debug("[cacheUpdate] new sql", oldhead, newsql)
|
||||
|
||||
var nStart int
|
||||
if len(args) > 0 {
|
||||
if strings.Index(sqlStr, "?") > -1 {
|
||||
nStart = strings.Count(oldhead, "?")
|
||||
} else {
|
||||
// only for pq, TODO: if any other databse?
|
||||
nStart = strings.Count(oldhead, "$")
|
||||
}
|
||||
}
|
||||
|
||||
cacher := session.engine.getCacher(tableName)
|
||||
session.engine.logger.Debug("[cacheUpdate] get cache sql", newsql, args[nStart:])
|
||||
ids, err := core.GetCacheSql(cacher, tableName, newsql, args[nStart:])
|
||||
if err != nil {
|
||||
rows, err := session.NoCache().queryRows(newsql, args[nStart:]...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
ids = make([]core.PK, 0)
|
||||
for rows.Next() {
|
||||
var res = make([]string, len(table.PrimaryKeys))
|
||||
err = rows.ScanSlice(&res)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var pk core.PK = make([]interface{}, len(table.PrimaryKeys))
|
||||
for i, col := range table.PKColumns() {
|
||||
if col.SQLType.IsNumeric() {
|
||||
n, err := strconv.ParseInt(res[i], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pk[i] = n
|
||||
} else if col.SQLType.IsText() {
|
||||
pk[i] = res[i]
|
||||
} else {
|
||||
return errors.New("not supported")
|
||||
}
|
||||
}
|
||||
|
||||
ids = append(ids, pk)
|
||||
}
|
||||
if rows.Err() != nil {
|
||||
return rows.Err()
|
||||
}
|
||||
session.engine.logger.Debug("[cacheUpdate] find updated id", ids)
|
||||
} /*else {
|
||||
session.engine.LogDebug("[xorm:cacheUpdate] del cached sql:", tableName, newsql, args)
|
||||
cacher.DelIds(tableName, genSqlKey(newsql, args))
|
||||
}*/
|
||||
|
||||
for _, id := range ids {
|
||||
sid, err := id.ToString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if bean := cacher.GetBean(tableName, sid); bean != nil {
|
||||
sqls := splitNNoCase(sqlStr, "where", 2)
|
||||
if len(sqls) == 0 || len(sqls) > 2 {
|
||||
return ErrCacheFailed
|
||||
}
|
||||
|
||||
sqls = splitNNoCase(sqls[0], "set", 2)
|
||||
if len(sqls) != 2 {
|
||||
return ErrCacheFailed
|
||||
}
|
||||
kvs := strings.Split(strings.TrimSpace(sqls[1]), ",")
|
||||
|
||||
for idx, kv := range kvs {
|
||||
sps := strings.SplitN(kv, "=", 2)
|
||||
sps2 := strings.Split(sps[0], ".")
|
||||
colName := sps2[len(sps2)-1]
|
||||
// treat quote prefix, suffix and '`' as quotes
|
||||
quotes := append(strings.Split(session.engine.Quote(""), ""), "`")
|
||||
if strings.ContainsAny(colName, strings.Join(quotes, "")) {
|
||||
colName = strings.TrimSpace(eraseAny(colName, quotes...))
|
||||
} else {
|
||||
session.engine.logger.Debug("[cacheUpdate] cannot find column", tableName, colName)
|
||||
return ErrCacheFailed
|
||||
}
|
||||
|
||||
if col := table.GetColumn(colName); col != nil {
|
||||
fieldValue, err := col.ValueOf(bean)
|
||||
if err != nil {
|
||||
session.engine.logger.Error(err)
|
||||
} else {
|
||||
session.engine.logger.Debug("[cacheUpdate] set bean field", bean, colName, fieldValue.Interface())
|
||||
if col.IsVersion && session.statement.checkVersion {
|
||||
session.incrVersionFieldValue(fieldValue)
|
||||
} else {
|
||||
fieldValue.Set(reflect.ValueOf(args[idx]))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
session.engine.logger.Errorf("[cacheUpdate] ERROR: column %v is not table %v's",
|
||||
colName, table.Name)
|
||||
}
|
||||
}
|
||||
|
||||
session.engine.logger.Debug("[cacheUpdate] update cache", tableName, id, bean)
|
||||
cacher.PutBean(tableName, sid, bean)
|
||||
}
|
||||
}
|
||||
session.engine.logger.Debug("[cacheUpdate] clear cached table sql:", tableName)
|
||||
cacher.ClearIds(tableName)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update records, bean's non-empty fields are updated contents,
|
||||
// condiBean' non-empty filds are conditions
|
||||
// CAUTION:
|
||||
@ -389,7 +263,7 @@ func (session *Session) Update(bean interface{}, condiBean ...interface{}) (int6
|
||||
}
|
||||
|
||||
if len(colNames) <= 0 {
|
||||
return 0, errors.New("No content found to be updated")
|
||||
return 0, errors.New("no content found to be updated")
|
||||
}
|
||||
|
||||
var tableAlias = session.engine.Quote(tableName)
|
||||
@ -420,13 +294,6 @@ func (session *Session) Update(bean interface{}, condiBean ...interface{}) (int6
|
||||
}
|
||||
}
|
||||
|
||||
if cacher := session.engine.getCacher(tableName); cacher != nil && session.statement.UseCache {
|
||||
// session.cacheUpdate(table, tableName, sqlStr, args...)
|
||||
session.engine.logger.Debug("[cacheUpdate] clear table ", tableName)
|
||||
cacher.ClearIds(tableName)
|
||||
cacher.ClearBeans(tableName)
|
||||
}
|
||||
|
||||
// handle after update processors
|
||||
if session.isAutoCommit {
|
||||
for _, closure := range session.afterClosures {
|
||||
|
@ -57,7 +57,6 @@ type Statement struct {
|
||||
exprColumns exprParams
|
||||
cond builder.Cond
|
||||
bufferSize int
|
||||
context ContextCache
|
||||
lastError error
|
||||
}
|
||||
|
||||
@ -99,7 +98,6 @@ func (statement *Statement) Init() {
|
||||
statement.exprColumns = exprParams{}
|
||||
statement.cond = builder.NewCond()
|
||||
statement.bufferSize = 0
|
||||
statement.context = nil
|
||||
statement.lastError = nil
|
||||
}
|
||||
|
||||
|
@ -25,8 +25,6 @@ type tagContext struct {
|
||||
isUnique bool
|
||||
indexNames map[string]int
|
||||
engine *Engine
|
||||
hasCacheTag bool
|
||||
hasNoCacheTag bool
|
||||
ignoreNext bool
|
||||
}
|
||||
|
||||
@ -52,8 +50,6 @@ var (
|
||||
"NOTNULL": NotNullTagHandler,
|
||||
"INDEX": IndexTagHandler,
|
||||
"UNIQUE": UniqueTagHandler,
|
||||
"CACHE": CacheTagHandler,
|
||||
"NOCACHE": NoCacheTagHandler,
|
||||
"COMMENT": CommentTagHandler,
|
||||
}
|
||||
)
|
||||
@ -103,17 +99,6 @@ func NotNullTagHandler(ctx *tagContext) error {
|
||||
// AutoIncrTagHandler describes autoincr tag handler
|
||||
func AutoIncrTagHandler(ctx *tagContext) error {
|
||||
ctx.col.IsAutoIncrement = true
|
||||
/*
|
||||
if len(ctx.params) > 0 {
|
||||
autoStartInt, err := strconv.Atoi(ctx.params[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ctx.col.AutoIncrStart = autoStartInt
|
||||
} else {
|
||||
ctx.col.AutoIncrStart = 1
|
||||
}
|
||||
*/
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -293,19 +278,3 @@ func ExtendsTagHandler(ctx *tagContext) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CacheTagHandler describes cache tag handler
|
||||
func CacheTagHandler(ctx *tagContext) error {
|
||||
if !ctx.hasCacheTag {
|
||||
ctx.hasCacheTag = true
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NoCacheTagHandler describes nocache tag handler
|
||||
func NoCacheTagHandler(ctx *tagContext) error {
|
||||
if !ctx.hasNoCacheTag {
|
||||
ctx.hasNoCacheTag = true
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -92,7 +92,6 @@ func NewEngine(driverName string, dataSourceName string) (*Engine, error) {
|
||||
TagIdentifier: "xorm",
|
||||
TZLocation: time.Local,
|
||||
tagHandlers: defaultTagHandlers,
|
||||
cachers: make(map[string]core.Cacher),
|
||||
defaultContext: context.Background(),
|
||||
}
|
||||
|
||||
@ -118,8 +117,3 @@ func NewEngineWithParams(driverName string, dataSourceName string, params map[st
|
||||
engine.dialect.SetParams(params)
|
||||
return engine, err
|
||||
}
|
||||
|
||||
// Clone clone an engine
|
||||
func (engine *Engine) Clone() (*Engine, error) {
|
||||
return NewEngine(engine.DriverName(), engine.DataSourceName())
|
||||
}
|
||||
|
Reference in New Issue
Block a user