You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
376 lines
7.7 KiB
376 lines
7.7 KiB
// Code generated by $GOPATH/src/go-common/app/tool/cache/gen. DO NOT EDIT. |
|
|
|
/* |
|
Package dao is a generated cache proxy package. |
|
It is generated from: |
|
type _cache interface { |
|
Info(c context.Context, key int64) (*v1.Info, error) |
|
//cache: -batch=50 -max_group=10 -batch_err=continue |
|
Infos(c context.Context, keys []int64) (map[int64]*v1.Info, error) |
|
Card(c context.Context, key int64) (*v1.Card, error) |
|
//cache: -batch=50 -max_group=10 -batch_err=continue |
|
Cards(c context.Context, keys []int64) (map[int64]*v1.Card, error) |
|
Vip(c context.Context, key int64) (*v1.VipInfo, error) |
|
//cache: -batch=50 -max_group=10 -batch_err=continue |
|
Vips(c context.Context, keys []int64) (map[int64]*v1.VipInfo, error) |
|
Profile(c context.Context, key int64) (*v1.Profile, error) |
|
} |
|
*/ |
|
|
|
package dao |
|
|
|
import ( |
|
"context" |
|
"sync" |
|
|
|
v1 "go-common/app/service/main/account/api" |
|
"go-common/library/stat/prom" |
|
"go-common/library/sync/errgroup" |
|
) |
|
|
|
var _ _cache |
|
|
|
// Info get data from cache if miss will call source method, then add to cache. |
|
func (d *Dao) Info(c context.Context, id int64) (res *v1.Info, err error) { |
|
addCache := true |
|
res, err = d.CacheInfo(c, id) |
|
if err != nil { |
|
addCache = false |
|
err = nil |
|
} |
|
if res != nil { |
|
prom.CacheHit.Incr("Info") |
|
return |
|
} |
|
prom.CacheMiss.Incr("Info") |
|
res, err = d.RawInfo(c, id) |
|
if err != nil { |
|
return |
|
} |
|
miss := res |
|
if !addCache { |
|
return |
|
} |
|
d.cache.Do(c, func(ctx context.Context) { |
|
d.AddCacheInfo(ctx, id, miss) |
|
}) |
|
return |
|
} |
|
|
|
// Infos get data from cache if miss will call source method, then add to cache. |
|
func (d *Dao) Infos(c context.Context, keys []int64) (res map[int64]*v1.Info, err error) { |
|
if len(keys) == 0 { |
|
return |
|
} |
|
addCache := true |
|
res, err = d.CacheInfos(c, keys) |
|
if err != nil { |
|
addCache = false |
|
res = nil |
|
err = nil |
|
} |
|
var miss []int64 |
|
for _, key := range keys { |
|
if (res == nil) || (res[key] == nil) { |
|
miss = append(miss, key) |
|
} |
|
} |
|
prom.CacheHit.Add("Infos", int64(len(keys)-len(miss))) |
|
if len(miss) == 0 { |
|
return |
|
} |
|
var missData map[int64]*v1.Info |
|
missLen := len(miss) |
|
prom.CacheMiss.Add("Infos", int64(missLen)) |
|
mutex := sync.Mutex{} |
|
for i := 0; i < missLen; i += 50 * 10 { |
|
var subKeys []int64 |
|
group := &errgroup.Group{} |
|
ctx := c |
|
if (i + 50*10) > missLen { |
|
subKeys = miss[i:] |
|
} else { |
|
subKeys = miss[i : i+50*10] |
|
} |
|
missSubLen := len(subKeys) |
|
for j := 0; j < missSubLen; j += 50 { |
|
var ks []int64 |
|
if (j + 50) > missSubLen { |
|
ks = subKeys[j:] |
|
} else { |
|
ks = subKeys[j : j+50] |
|
} |
|
group.Go(func() (err error) { |
|
data, err := d.RawInfos(ctx, ks) |
|
mutex.Lock() |
|
for k, v := range data { |
|
if missData == nil { |
|
missData = make(map[int64]*v1.Info, len(keys)) |
|
} |
|
missData[k] = v |
|
} |
|
mutex.Unlock() |
|
return |
|
}) |
|
} |
|
err1 := group.Wait() |
|
if err1 != nil { |
|
err = err1 |
|
} |
|
} |
|
if res == nil { |
|
res = make(map[int64]*v1.Info) |
|
} |
|
for k, v := range missData { |
|
res[k] = v |
|
} |
|
if err != nil { |
|
return |
|
} |
|
if !addCache { |
|
return |
|
} |
|
d.cache.Do(c, func(ctx context.Context) { |
|
d.AddCacheInfos(ctx, missData) |
|
}) |
|
return |
|
} |
|
|
|
// Card cache: -batch=50 -max_group=10 -batch_err=continue |
|
func (d *Dao) Card(c context.Context, id int64) (res *v1.Card, err error) { |
|
addCache := true |
|
res, err = d.CacheCard(c, id) |
|
if err != nil { |
|
addCache = false |
|
err = nil |
|
} |
|
if res != nil { |
|
prom.CacheHit.Incr("Card") |
|
return |
|
} |
|
prom.CacheMiss.Incr("Card") |
|
res, err = d.RawCard(c, id) |
|
if err != nil { |
|
return |
|
} |
|
miss := res |
|
if !addCache { |
|
return |
|
} |
|
d.cache.Do(c, func(ctx context.Context) { |
|
d.AddCacheCard(ctx, id, miss) |
|
}) |
|
return |
|
} |
|
|
|
// Cards get data from cache if miss will call source method, then add to cache. |
|
func (d *Dao) Cards(c context.Context, keys []int64) (res map[int64]*v1.Card, err error) { |
|
if len(keys) == 0 { |
|
return |
|
} |
|
addCache := true |
|
res, err = d.CacheCards(c, keys) |
|
if err != nil { |
|
addCache = false |
|
res = nil |
|
err = nil |
|
} |
|
var miss []int64 |
|
for _, key := range keys { |
|
if (res == nil) || (res[key] == nil) { |
|
miss = append(miss, key) |
|
} |
|
} |
|
prom.CacheHit.Add("Cards", int64(len(keys)-len(miss))) |
|
if len(miss) == 0 { |
|
return |
|
} |
|
var missData map[int64]*v1.Card |
|
missLen := len(miss) |
|
prom.CacheMiss.Add("Cards", int64(missLen)) |
|
mutex := sync.Mutex{} |
|
for i := 0; i < missLen; i += 50 * 10 { |
|
var subKeys []int64 |
|
group := &errgroup.Group{} |
|
ctx := c |
|
if (i + 50*10) > missLen { |
|
subKeys = miss[i:] |
|
} else { |
|
subKeys = miss[i : i+50*10] |
|
} |
|
missSubLen := len(subKeys) |
|
for j := 0; j < missSubLen; j += 50 { |
|
var ks []int64 |
|
if (j + 50) > missSubLen { |
|
ks = subKeys[j:] |
|
} else { |
|
ks = subKeys[j : j+50] |
|
} |
|
group.Go(func() (err error) { |
|
data, err := d.RawCards(ctx, ks) |
|
mutex.Lock() |
|
for k, v := range data { |
|
if missData == nil { |
|
missData = make(map[int64]*v1.Card, len(keys)) |
|
} |
|
missData[k] = v |
|
} |
|
mutex.Unlock() |
|
return |
|
}) |
|
} |
|
err1 := group.Wait() |
|
if err1 != nil { |
|
err = err1 |
|
} |
|
} |
|
if res == nil { |
|
res = make(map[int64]*v1.Card) |
|
} |
|
for k, v := range missData { |
|
res[k] = v |
|
} |
|
if err != nil { |
|
return |
|
} |
|
if !addCache { |
|
return |
|
} |
|
d.cache.Do(c, func(ctx context.Context) { |
|
d.AddCacheCards(ctx, missData) |
|
}) |
|
return |
|
} |
|
|
|
// Vip cache: -batch=50 -max_group=10 -batch_err=continue |
|
func (d *Dao) Vip(c context.Context, id int64) (res *v1.VipInfo, err error) { |
|
addCache := true |
|
res, err = d.CacheVip(c, id) |
|
if err != nil { |
|
addCache = false |
|
err = nil |
|
} |
|
if res != nil { |
|
prom.CacheHit.Incr("Vip") |
|
return |
|
} |
|
prom.CacheMiss.Incr("Vip") |
|
res, err = d.RawVip(c, id) |
|
if err != nil { |
|
return |
|
} |
|
miss := res |
|
if !addCache { |
|
return |
|
} |
|
d.cache.Do(c, func(ctx context.Context) { |
|
d.AddCacheVip(ctx, id, miss) |
|
}) |
|
return |
|
} |
|
|
|
// Vips get data from cache if miss will call source method, then add to cache. |
|
func (d *Dao) Vips(c context.Context, keys []int64) (res map[int64]*v1.VipInfo, err error) { |
|
if len(keys) == 0 { |
|
return |
|
} |
|
addCache := true |
|
res, err = d.CacheVips(c, keys) |
|
if err != nil { |
|
addCache = false |
|
res = nil |
|
err = nil |
|
} |
|
var miss []int64 |
|
for _, key := range keys { |
|
if (res == nil) || (res[key] == nil) { |
|
miss = append(miss, key) |
|
} |
|
} |
|
prom.CacheHit.Add("Vips", int64(len(keys)-len(miss))) |
|
if len(miss) == 0 { |
|
return |
|
} |
|
var missData map[int64]*v1.VipInfo |
|
missLen := len(miss) |
|
prom.CacheMiss.Add("Vips", int64(missLen)) |
|
mutex := sync.Mutex{} |
|
for i := 0; i < missLen; i += 50 * 10 { |
|
var subKeys []int64 |
|
group := &errgroup.Group{} |
|
ctx := c |
|
if (i + 50*10) > missLen { |
|
subKeys = miss[i:] |
|
} else { |
|
subKeys = miss[i : i+50*10] |
|
} |
|
missSubLen := len(subKeys) |
|
for j := 0; j < missSubLen; j += 50 { |
|
var ks []int64 |
|
if (j + 50) > missSubLen { |
|
ks = subKeys[j:] |
|
} else { |
|
ks = subKeys[j : j+50] |
|
} |
|
group.Go(func() (err error) { |
|
data, err := d.RawVips(ctx, ks) |
|
mutex.Lock() |
|
for k, v := range data { |
|
if missData == nil { |
|
missData = make(map[int64]*v1.VipInfo, len(keys)) |
|
} |
|
missData[k] = v |
|
} |
|
mutex.Unlock() |
|
return |
|
}) |
|
} |
|
err1 := group.Wait() |
|
if err1 != nil { |
|
err = err1 |
|
} |
|
} |
|
if res == nil { |
|
res = make(map[int64]*v1.VipInfo) |
|
} |
|
for k, v := range missData { |
|
res[k] = v |
|
} |
|
if err != nil { |
|
return |
|
} |
|
if !addCache { |
|
return |
|
} |
|
d.cache.Do(c, func(ctx context.Context) { |
|
d.AddCacheVips(ctx, missData) |
|
}) |
|
return |
|
} |
|
|
|
// Profile cache: -batch=50 -max_group=10 -batch_err=continue |
|
func (d *Dao) Profile(c context.Context, id int64) (res *v1.Profile, err error) { |
|
addCache := true |
|
res, err = d.CacheProfile(c, id) |
|
if err != nil { |
|
addCache = false |
|
err = nil |
|
} |
|
if res != nil { |
|
prom.CacheHit.Incr("Profile") |
|
return |
|
} |
|
prom.CacheMiss.Incr("Profile") |
|
res, err = d.RawProfile(c, id) |
|
if err != nil { |
|
return |
|
} |
|
miss := res |
|
if !addCache { |
|
return |
|
} |
|
d.cache.Do(c, func(ctx context.Context) { |
|
d.AddCacheProfile(ctx, id, miss) |
|
}) |
|
return |
|
}
|
|
|