This repository was archived by the owner on Dec 17, 2025. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcache.py
More file actions
executable file
·359 lines (264 loc) · 14.2 KB
/
cache.py
File metadata and controls
executable file
·359 lines (264 loc) · 14.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
from common import eprint
from linkedlist import LinkedList
import math
from abc import ABC
import heapq
# Abstract Base class acting as template for cache implementations, and contains some common methods
class Cache(ABC):
# Initialises cache state variables
def __init__(self, name, size, line_size, kind, mem_addr_bit_length = 64):
self.name = name
self.size = size
self.line_size = line_size
self.kind = kind
self.num_cache_lines = int(size / line_size)
self.mem_addr_bit_length = mem_addr_bit_length
# Maps the kind of cache to its level of associativity
self.way_associativity = self._map_kind_to_associativity()
# Calculating index, tag and offset bit lengths - assuming memory is byte-addressable (opposed to word-addressable)
self.offset_length = int(math.log2(line_size))
self.number_of_sets = int(self.num_cache_lines / self.way_associativity)
self.index_length = int(math.log2(self.number_of_sets))
self.tag_length = int(mem_addr_bit_length - self.index_length - self.offset_length)
# This cache simulator allows the user to work with memory addresses that aren't 64 bits, so need safety checks
if self.tag_length <= 0 :
eprint(f"[Error] Memory address bit length ({mem_addr_bit_length}) is too small for implementing cache")
eprint(f"[Error] Offset bit length = {self.offset_length}, Index bit length = {self.index_length}")
return None
# Resets cache state for processing trace file
def reset_simulation(self):
# Reset performance metric counters
self.hits = 0
self.misses = 0
# Maps the kind of cache to its level of associativity
def _map_kind_to_associativity(self):
if self.kind == "direct":
return 1 # one-way
elif self.kind == "full":
return self.num_cache_lines
else:
# nway - extracting 'n' value
return int(self.kind[:-3])
# Performs memory operation - W/R type ignored
def step(tag, index, mem_op_size):
pass
# Direct Mapped Cache Implementation
class DirectCache(Cache):
def __init__(self, name, size, line_size, kind, mem_addr_bit_length=64):
super().__init__(name, size, line_size, kind, mem_addr_bit_length)
super().reset_simulation()
# tag_maintainer is an array data structure for storing cache tags during runtime,
# which can be used to check for hits or misses
self.tag_maintainer = [-1] * self.num_cache_lines
# Perform a step of the trace file
def step(self, mem_addr_int, mem_op_size):
# Extract index and tag
index = int((mem_addr_int >> self.offset_length) & ((1 << self.index_length) - 1))
tag = int((mem_addr_int >> (self.offset_length + self.index_length)) & ((1 << self.tag_length) - 1))
# Determine where to start looking for tag in tag_maintainer (which is a contiguous data structure)
target_cache_line_index = (index * self.way_associativity)
# Check if cache line full/valid
if self.tag_maintainer[target_cache_line_index] != -1:
# Check if tags match
if self.tag_maintainer[target_cache_line_index] == tag:
self.hits = self.hits + 1
eprint(f"[Debug - Cache {self.name}] Hit with Tag {tag}")
return True
self.misses = self.misses + 1
eprint(f"[Debug - Cache {self.name}] Miss with Tag {tag}")
self.tag_maintainer[target_cache_line_index] = tag # replace existing tag with new tag
return False
# Associative Caches
# Cache for supporting the Round Robin replacement policy
class RRCache(Cache):
def __init__(self, name, size, line_size, kind, mem_addr_bit_length=64):
super().__init__(name, size, line_size, kind, mem_addr_bit_length)
self.reset_simulation()
def reset_simulation(self):
super().reset_simulation()
# tag_maintainer is an array data structure for storing cache tags during runtime,
# which can be used to check for hits or misses
self.tag_maintainer = [None] * self.num_cache_lines
# Due to the O(1) lookup time complexity of sets, I use them to check for cache hits/misses
self.tag_sets = []
self.victim_counters = [] # A counter for each cache set, which points at the cache line to be replaced
for _ in range(self.number_of_sets):
self.tag_sets.append(set())
self.victim_counters.append([0, 0]) # [Ptr to cache line (in cache set) to be replaced, Ptr to next free space in cache set]
# ^ The second value in victim counters [_,x] gives O(1) lookup time for empty cache lines
# I decided to include this so that my cache doesn't have to perform a linear search (O(n) time complexity)
# - but obviously this comes at the cost of more memory usage
# Adds tag to cache
def _add_tag_to_cache(self, target_tag_string, cache_line_index):
self.tag_maintainer[cache_line_index] = target_tag_string # store tag information
# RR replacement policy implementation
def _follow_replacement_policy(self, target_tag_string, cache_line_start_index, set_index):
# Determine next cache line to be removed
index_cache_line_to_be_removed = cache_line_start_index + self.victim_counters[set_index][0]
# Remove old tag from set
self.tag_sets[set_index].remove(self.tag_maintainer[index_cache_line_to_be_removed])
# Increment victim counter using modulo
self.victim_counters[set_index][0] = (self.victim_counters[set_index][0] + 1) % self.way_associativity
# Update tags
self._add_tag_to_cache(target_tag_string, index_cache_line_to_be_removed)
# Add new tag to set
self.tag_sets[set_index].add(target_tag_string)
# Step through trace file
def step(self, mem_addr_int, mem_op_size):
# Extract tag from mem_address
tag_int = int((mem_addr_int >> (self.offset_length + self.index_length)) & ((1 << self.tag_length) - 1))
if self.index_length == 0: # fully associative cache has only one set
set_index = 0
else:
set_index = int((mem_addr_int >> self.offset_length) & ((1 << self.index_length) - 1))
# Calculate where to start looking in cache (since its a contigous block of memory)
start_index = (set_index * self.way_associativity)
if tag_int in self.tag_sets[set_index]:
# Cache hit
self.hits = self.hits + 1
eprint(f"[Debug - Cache {self.name}] Hit with Tag {tag_int}")
return True
else:
# Cache miss
if len(self.tag_sets[set_index]) == self.way_associativity:
# cache set is full
self._follow_replacement_policy(tag_int, start_index, set_index)
else:
# next empty space
next_free_line_index = self.victim_counters[set_index][1]
self._add_tag_to_cache(tag_int, start_index + next_free_line_index)
# Update victim counter to point to next cache line (FIFO)
self.victim_counters[set_index][1] = (next_free_line_index + 1) % self.way_associativity
# Add new tag to set
self.tag_sets[set_index].add(tag_int)
self.misses = self.misses + 1
eprint(f"[Debug - Cache {self.name}] Miss with Tag {tag_int}")
return False
# Cache for supporting the Least Recently Used replacement policy
class LRUCache(Cache):
def __init__(self, name, size, line_size, kind, mem_addr_bit_length=64):
super().__init__(name, size, line_size, kind, mem_addr_bit_length)
self.reset_simulation()
# I have decided to use a doubly linked list for tracking the order of accesses (nodes = tag values)
# because this allows easy retrieval and updating of cache positions without having to record their indices
# in the cache
def reset_simulation(self):
super().reset_simulation()
self.access_order_tracker = []
self.tag_sets = []
for _ in range(self.number_of_sets):
self.access_order_tracker.append(LinkedList(self.way_associativity))
self.tag_sets.append(set())
# Implementation of replacement policy
def _follow_replacement_policy(self, target_tag, set_index):
# Remove Least recently used cache line
tag = self.access_order_tracker[set_index].pop_cache_line()
# Update tag set
self.tag_sets[set_index].remove(tag)
self.tag_sets[set_index].add(target_tag)
# Register new cache line in access tracker data structure
self.access_order_tracker[set_index].push_cache_line(target_tag)
# Step through trace file
def step(self, mem_addr_int, mem_op_size):
tag_int = int((mem_addr_int >> (self.offset_length + self.index_length)) & ((1 << self.tag_length) - 1))
if self.index_length == 0: # fully associative cache
set_index = 0
else:
set_index = int((mem_addr_int >> self.offset_length) & ((1 << self.index_length) - 1))
if tag_int in self.tag_sets[set_index]:
# Cache hit
self.hits = self.hits + 1
self.access_order_tracker[set_index].update_access_tracker(tag_int) # move tag in list to front (most recently accessed)
eprint(f"[Debug - Cache {self.name}] Hit with Tag {tag_int}")
return True
else:
# Cache miss
if len(self.tag_sets[set_index]) == self.way_associativity:
# cache set is full
self._follow_replacement_policy(tag_int, set_index)
else:
# Add to access order tracker
self.access_order_tracker[set_index].push_cache_line(tag_int)
# Add new tag to set
self.tag_sets[set_index].add(tag_int)
self.misses = self.misses + 1
eprint(f"[Debug - Cache {self.name}] Miss with Tag {tag_int}")
return False
# Cache for supporting the Least Frequently Used replacement policy
class LFUCache(Cache):
def __init__(self, name, size, line_size, kind, mem_addr_bit_length=64):
super().__init__(name, size, line_size, kind, mem_addr_bit_length)
self.reset_simulation()
def reset_simulation(self):
super().reset_simulation()
# To remember tag order
self.tag_maintainer = [None] * self.num_cache_lines
self.frequency_tracker = [0] * self.num_cache_lines # counter for every cache line frequency
# Set to quickly identify whether miss or hit
self.tag_sets = []
self.tag_to_indices = []
for _ in range(self.number_of_sets):
self.tag_sets.append(set())
self.tag_to_indices.append(dict()) # O(1) retrieval for updating the frequency of tag hits
# Implementation of LFU replacement policy
def _follow_replacement_policy(self, target_tag, set_index):
# Remove Least Frequently used cache line
start_index = (set_index * self.way_associativity)
smallest_freq_seen = self.frequency_tracker[start_index]
index_of_smallest_freq = start_index
# Must loop through all possible cache lines to find it
for i in range(1, self.way_associativity):
current_index = start_index + i
freq = self.frequency_tracker[current_index]
if(freq < smallest_freq_seen): # not <= to evict line with lowest index
smallest_freq_seen = freq
index_of_smallest_freq = current_index
# Update tag set
tag_to_remove = self.tag_maintainer[index_of_smallest_freq]
self.tag_sets[set_index].remove(tag_to_remove)
self.tag_sets[set_index].add(target_tag)
# Update tag-to-index
del self.tag_to_indices[set_index][tag_to_remove]
self.tag_to_indices[set_index][target_tag] = index_of_smallest_freq
# Remove and reset frequency tracker
self.frequency_tracker[index_of_smallest_freq] = 0
self.tag_maintainer[index_of_smallest_freq] = target_tag
# Step through trace file
def step(self, mem_addr_int, mem_op_size):
# Extract tag
tag_int = int((mem_addr_int >> (self.offset_length + self.index_length)) & ((1 << self.tag_length) - 1))
if self.index_length == 0: # fully associative cache
set_index = 0
else:
set_index = int((mem_addr_int >> self.offset_length) & ((1 << self.index_length) - 1))
if tag_int in self.tag_sets[set_index]:
# Cache hit
self.hits = self.hits + 1
# Increment freq_tracker
self.frequency_tracker[self.tag_to_indices[set_index][tag_int]] += 1
# decided to optimise this operation using a dictionary since hits occur more often in caches than misses
eprint(f"[Debug - Cache {self.name}] Hit with Tag {tag_int}")
return True
else:
# Cache miss
if len(self.tag_sets[set_index]) == self.way_associativity:
# cache set is full
self._follow_replacement_policy(tag_int, set_index)
else:
start_index = (set_index * self.way_associativity)
for i in range(0, self.way_associativity): # linear search when filling up cache set -> only happens when filling up
current_index = start_index + i
if(self.tag_maintainer[current_index] == None):
empty_index = current_index
break
# Add to frequency tracker
self.frequency_tracker[empty_index] = 0
# Add to tag maintainer
self.tag_maintainer[empty_index] = tag_int
# Add new tag to set
self.tag_sets[set_index].add(tag_int)
# Add to tag-to-index tracker
self.tag_to_indices[set_index][tag_int] = empty_index
self.misses = self.misses + 1
eprint(f"[Debug - Cache {self.name}] Miss with Tag {tag_int}")
return False