Skip to content

Commit d9e85fa

Browse files
committed
feat: add KeepTTL constant for record expiration management
Introduce KeepTTL constant to allow records to retain their current expiration time when updated. This enhances the flexibility of the Set method in managing cache records.
1 parent 847b1dc commit d9e85fa

1 file changed

Lines changed: 71 additions & 11 deletions

File tree

cache.go

Lines changed: 71 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,10 @@ const (
2424
LGSH
2525
)
2626

27+
// KeepTTL is used for setting expiration time to current expiration time.
28+
// It means that record will be updated with the same expiration time.
29+
const KeepTTL = time.Duration(-1)
30+
2731
// AtomicCache structure represents whole cache memory.
2832
type AtomicCache struct {
2933
// RWMutex is used for access to shards array.
@@ -154,50 +158,106 @@ func initShardsSection(shardsSection *ShardsLookup, maxShards, maxRecords, recor
154158
// space for data. If there is no empty space, new shard is allocated. Otherwise
155159
// some valid record (FIFO queue) is deleted and new one is stored.
156160
func (a *AtomicCache) Set(key string, data []byte, expire time.Duration) error {
161+
// Reject if data is too large for any shard
157162
if len(data) > int(a.RecordSizeLarge) {
158163
return ErrDataLimit
159164
}
160165

166+
// Track if this is a new record and if garbage collection should be triggered
161167
new := false
162168
collectGarbage := false
169+
170+
// Select the appropriate shard section based on data size
163171
shardSection, shardSectionID := a.getShardsSectionBySize(len(data))
164172

165-
a.Lock()
166-
if val, ok := a.lookup[key]; !ok {
173+
var (
174+
exists bool
175+
val LookupRecord
176+
)
177+
178+
// Only lock for shared state mutation: check if key exists in lookup
179+
a.RLock()
180+
val, exists = a.lookup[key]
181+
a.RUnlock()
182+
183+
// Determine expiration time: if KeepTTL and record exists, preserve old
184+
// expiration; otherwise, calculate new.
185+
var expireTime time.Time
186+
if expire == KeepTTL && exists {
187+
expireTime = val.Expiration
188+
} else {
189+
expireTime = a.getExprTime(expire)
190+
}
191+
192+
if !exists {
193+
// Key is new, will allocate new record
167194
new = true
168195
} else {
169196
if val.ShardSection != shardSectionID {
197+
// Key exists but data size changed: move to new section, free old record.
198+
// Explaination: If the record size changed and data should be stored in a different
199+
// shard section, we need to free the old record and allocate a new record in
200+
// the correct shard section.
201+
a.Lock()
170202
shardSection.shards[val.ShardIndex].Free(val.RecordIndex)
171203
val.RecordIndex = shardSection.shards[val.ShardIndex].Set(data)
172-
a.lookup[key] = LookupRecord{ShardIndex: val.ShardIndex, ShardSection: shardSectionID, RecordIndex: val.RecordIndex, Expiration: a.getExprTime(expire)}
204+
a.lookup[key] = LookupRecord{ShardIndex: val.ShardIndex, ShardSection: shardSectionID, RecordIndex: val.RecordIndex, Expiration: expireTime}
205+
a.Unlock()
173206
} else {
174-
prevShardSection := a.getShardsSectionByID(val.ShardSection)
175-
prevShardSection.shards[val.ShardIndex].Free(val.RecordIndex)
176-
new = true
207+
// Key exists in same section: update existing record.
208+
// Explaination: If the record size is the same, we can simply update the existing record
209+
// in the same shard section without needing to free it first.
210+
// This is more efficient as it avoids unnecessary memory allocation and deallocation.
211+
// This is a performance optimization to avoid unnecessary memory allocation and deallocation.
212+
// It assumes that the record size has not changed and we can safely update it.
213+
a.Lock()
214+
shardSection.shards[val.ShardIndex].Seti(val.RecordIndex, data)
215+
a.Unlock()
177216
}
178217
}
179218

180219
if new {
220+
// Allocate new record: try to find a shard with space, or allocate a new shard, or buffer if full
221+
a.Lock()
181222
if si, ok := a.getShard(shardSectionID); ok {
223+
// Found shard with available slot.
224+
// Explaination: If we found a shard with available space, we can simply set the data
225+
// in that shard and update the lookup table with the new record index.
226+
// This avoids unnecessary memory allocation and deallocation, improving performance.
182227
ri := shardSection.shards[si].Set(data)
183-
a.lookup[key] = LookupRecord{ShardIndex: si, ShardSection: shardSectionID, RecordIndex: ri, Expiration: a.getExprTime(expire)}
228+
a.lookup[key] = LookupRecord{ShardIndex: si, ShardSection: shardSectionID, RecordIndex: ri, Expiration: expireTime}
229+
a.Unlock()
184230
} else if si, ok := a.getEmptyShard(shardSectionID); ok {
231+
// No shard with space, allocate new shard.
232+
// Explaination: If there is no shard with available space, we allocate a new shard
233+
// and set the data in that new shard. This is necessary when all existing shards
234+
// are full and we need to create a new shard to accommodate the new record.
235+
// This ensures that we can always store new records, even if it means creating a
236+
// new shard when all existing shards are full.
185237
shardSection.shards[si] = NewShard(a.MaxRecords, a.getRecordSizeByShardSectionID(shardSectionID))
186238
ri := shardSection.shards[si].Set(data)
187-
a.lookup[key] = LookupRecord{ShardIndex: si, ShardSection: shardSectionID, RecordIndex: ri, Expiration: a.getExprTime(expire)}
239+
a.lookup[key] = LookupRecord{ShardIndex: si, ShardSection: shardSectionID, RecordIndex: ri, Expiration: expireTime}
240+
a.Unlock()
188241
} else {
189-
if len(a.buffer) <= int(a.MaxRecords) {
242+
// All shards full, buffer the request or return error if buffer is full.
243+
if len(a.buffer) < int(a.MaxRecords) {
244+
// Buffer the request if there is space in buffer.
245+
// Explaination: If the buffer has space, we can store the request in the buffer
246+
// instead of allocating a new shard. This allows us to handle more requests without
247+
// immediately allocating new memory, which can be more efficient.
248+
// This is useful when the cache is under heavy load and we want to avoid
249+
// allocating new shards for every request.
190250
a.buffer = append(a.buffer, BufferItem{Key: key, Data: data, Expire: expire})
251+
a.Unlock()
191252
} else {
192253
a.Unlock()
193254
return ErrFullMemory
194255
}
195-
196256
collectGarbage = true
197257
}
198258
}
199-
a.Unlock()
200259

260+
// Trigger garbage collection if needed
201261
if (atomic.AddUint32(&a.GcCounter, 1) == a.GcStarter) || collectGarbage {
202262
atomic.StoreUint32(&a.GcCounter, 0)
203263
go a.collectGarbage()

0 commit comments

Comments
 (0)