HDF5  1.12.0
H5Cpkg.h
Go to the documentation of this file.
1 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
2  * Copyright by The HDF Group. *
3  * Copyright by the Board of Trustees of the University of Illinois. *
4  * All rights reserved. *
5  * *
6  * This file is part of HDF5. The full HDF5 copyright notice, including *
7  * terms governing use, modification, and redistribution, is contained in *
8  * the COPYING file, which can be found at the root of the source code *
9  * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
10  * If you do not have access to either file, you may request a copy from *
11  * help@hdfgroup.org. *
12  * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
13 
14 /*
15  * Programmer: John Mainzer -- 10/12/04
16  *
17  * Purpose: This file contains declarations which are normally visible
18  * only within the H5C package.
19  *
20  * Source files outside the H5C package should include
21  * H5Cprivate.h instead.
22  *
23  * The one exception to this rule is test/cache.c. The test
24  * code is easier to write if it can look at the cache's
25  * internal data structures. Indeed, this is the main
26  * reason why this file was created.
27  */
28 
29 #if !(defined H5C_FRIEND || defined H5C_MODULE)
30 #error "Do not include this file outside the H5C package!"
31 #endif
32 
33 #ifndef _H5Cpkg_H
34 #define _H5Cpkg_H
35 
36 /* Get package's private header */
37 #include "H5Cprivate.h"
38 
39 /* Other private headers needed by this file */
40 #include "H5Clog.h" /* Cache logging */
41 #include "H5SLprivate.h" /* Skip lists */
42 
43 /**************************/
44 /* Package Private Macros */
45 /**************************/
46 
47 /* Number of epoch markers active */
48 #define H5C__MAX_EPOCH_MARKERS 10
49 
50 /* Cache configuration settings */
51 #define H5C__HASH_TABLE_LEN (64 * 1024) /* must be a power of 2 */
52 #define H5C__H5C_T_MAGIC 0x005CAC0E
53 
54 /* Initial allocated size of the "flush_dep_parent" array */
55 #define H5C_FLUSH_DEP_PARENT_INIT 8
56 
57 /****************************************************************************
58  *
59  * We maintain doubly linked lists of instances of H5C_cache_entry_t for a
60  * variety of reasons -- protected list, LRU list, and the clean and dirty
61  * LRU lists at present. The following macros support linking and unlinking
62  * of instances of H5C_cache_entry_t by both their regular and auxiliary next
63  * and previous pointers.
64  *
65  * The size and length fields are also maintained.
66  *
67  * Note that the relevant pair of prev and next pointers are presumed to be
68  * NULL on entry in the insertion macros.
69  *
70  * Finally, observe that the sanity checking macros evaluate to the empty
71  * string when H5C_DO_SANITY_CHECKS is FALSE. They also contain calls
72  * to the HGOTO_ERROR macro, which may not be appropriate in all cases.
73  * If so, we will need versions of the insertion and deletion macros which
74  * do not reference the sanity checking macros.
75  * JRM - 5/5/04
76  *
77  * Changes:
78  *
79  * - Removed the line:
80  *
81  * ( ( (Size) == (entry_ptr)->size ) && ( (len) != 1 ) ) ||
82  *
83  * from the H5C__DLL_PRE_REMOVE_SC macro. With the addition of the
84  * epoch markers used in the age out based cache size reduction algorithm,
85  * this invariant need not hold, as the epoch markers are of size 0.
86  *
87  * One could argue that I should have given the epoch markers a positive
88  * size, but this would break the index_size = LRU_list_size + pl_size
89  * + pel_size invariant.
90  *
91  * Alternatively, I could pass the current decr_mode in to the macro,
92  * and just skip the check whenever epoch markers may be in use.
93  *
94  * However, any size errors should be caught when the cache is flushed
95  * and destroyed. Until we are tracking such an error, this should be
96  * good enough.
97  * JRM - 12/9/04
98  *
99  *
100  * - In the H5C__DLL_PRE_INSERT_SC macro, replaced the lines:
101  *
102  * ( ( (len) == 1 ) &&
103  * ( ( (head_ptr) != (tail_ptr) ) || ( (Size) <= 0 ) ||
104  * ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) )
105  * )
106  * ) ||
107  *
108  * with:
109  *
110  * ( ( (len) == 1 ) &&
111  * ( ( (head_ptr) != (tail_ptr) ) ||
112  * ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) )
113  * )
114  * ) ||
115  *
116  * Epoch markers have size 0, so we can now have a non-empty list with
117  * zero size. Hence the "( (Size) <= 0 )" clause cause false failures
118  * in the sanity check. Since "Size" is typically a size_t, it can't
119  * take on negative values, and thus the revised clause "( (Size) < 0 )"
120  * caused compiler warnings.
121  * JRM - 12/22/04
122  *
123  * - In the H5C__DLL_SC macro, replaced the lines:
124  *
125  * ( ( (len) == 1 ) &&
126  * ( ( (head_ptr) != (tail_ptr) ) || ( (cache_ptr)->size <= 0 ) ||
127  * ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) )
128  * )
129  * ) ||
130  *
131  * with
132  *
133  * ( ( (len) == 1 ) &&
134  * ( ( (head_ptr) != (tail_ptr) ) ||
135  * ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) )
136  * )
137  * ) ||
138  *
139  * Epoch markers have size 0, so we can now have a non-empty list with
140  * zero size. Hence the "( (Size) <= 0 )" clause cause false failures
141  * in the sanity check. Since "Size" is typically a size_t, it can't
142  * take on negative values, and thus the revised clause "( (Size) < 0 )"
143  * caused compiler warnings.
144  * JRM - 1/10/05
145  *
146  * - Added the H5C__DLL_UPDATE_FOR_SIZE_CHANGE macro and the associated
147  * sanity checking macros. These macro are used to update the size of
148  * a DLL when one of its entries changes size.
149  *
150  * JRM - 9/8/05
151  *
152  * - Added macros supporting the index list -- a doubly liked list of
153  * all entries in the index. This list is necessary to reduce the
154  * cost of visiting all entries in the cache, which was previously
155  * done via a scan of the hash table.
156  *
157  * JRM - 10/15/15
158  *
159  ****************************************************************************/
160 
161 #if H5C_DO_SANITY_CHECKS
162 
163 #define H5C__DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \
164 if ( ( (head_ptr) == NULL ) || \
165  ( (tail_ptr) == NULL ) || \
166  ( (entry_ptr) == NULL ) || \
167  ( (len) <= 0 ) || \
168  ( (Size) < (entry_ptr)->size ) || \
169  ( ( (entry_ptr)->prev == NULL ) && ( (head_ptr) != (entry_ptr) ) ) || \
170  ( ( (entry_ptr)->next == NULL ) && ( (tail_ptr) != (entry_ptr) ) ) || \
171  ( ( (len) == 1 ) && \
172  ( ! ( ( (head_ptr) == (entry_ptr) ) && \
173  ( (tail_ptr) == (entry_ptr) ) && \
174  ( (entry_ptr)->next == NULL ) && \
175  ( (entry_ptr)->prev == NULL ) && \
176  ( (Size) == (entry_ptr)->size ) \
177  ) \
178  ) \
179  ) \
180  ) { \
181  HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "DLL pre remove SC failed") \
182 }
183 
184 #define H5C__DLL_SC(head_ptr, tail_ptr, len, Size, fv) \
185 if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \
186  ( (head_ptr) != (tail_ptr) ) \
187  ) || \
188  ( (len) < 0 ) || \
189  ( (Size) < 0 ) || \
190  ( ( (len) == 1 ) && \
191  ( ( (head_ptr) != (tail_ptr) ) || \
192  ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) ) \
193  ) \
194  ) || \
195  ( ( (len) >= 1 ) && \
196  ( ( (head_ptr) == NULL ) || ( (head_ptr)->prev != NULL ) || \
197  ( (tail_ptr) == NULL ) || ( (tail_ptr)->next != NULL ) \
198  ) \
199  ) \
200  ) { \
201  HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "DLL sanity check failed") \
202 }
203 
204 #define H5C__DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \
205 if ( ( (entry_ptr) == NULL ) || \
206  ( (entry_ptr)->next != NULL ) || \
207  ( (entry_ptr)->prev != NULL ) || \
208  ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \
209  ( (head_ptr) != (tail_ptr) ) \
210  ) || \
211  ( ( (len) == 1 ) && \
212  ( ( (head_ptr) != (tail_ptr) ) || \
213  ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) ) \
214  ) \
215  ) || \
216  ( ( (len) >= 1 ) && \
217  ( ( (head_ptr) == NULL ) || ( (head_ptr)->prev != NULL ) || \
218  ( (tail_ptr) == NULL ) || ( (tail_ptr)->next != NULL ) \
219  ) \
220  ) \
221  ) { \
222  HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "DLL pre insert SC failed") \
223 }
224 
225 #define H5C__DLL_PRE_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size) \
226 if ( ( (dll_len) <= 0 ) || \
227  ( (dll_size) <= 0 ) || \
228  ( (old_size) <= 0 ) || \
229  ( (old_size) > (dll_size) ) || \
230  ( (new_size) <= 0 ) || \
231  ( ( (dll_len) == 1 ) && ( (old_size) != (dll_size) ) ) ) { \
232  HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "DLL pre size update SC failed") \
233 }
234 
235 #define H5C__DLL_POST_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size) \
236 if ( ( (new_size) > (dll_size) ) || \
237  ( ( (dll_len) == 1 ) && ( (new_size) != (dll_size) ) ) ) { \
238  HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "DLL post size update SC failed") \
239 }
240 
241 #else /* H5C_DO_SANITY_CHECKS */
242 
243 #define H5C__DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv)
244 #define H5C__DLL_SC(head_ptr, tail_ptr, len, Size, fv)
245 #define H5C__DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv)
246 #define H5C__DLL_PRE_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size)
247 #define H5C__DLL_POST_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size)
248 
249 #endif /* H5C_DO_SANITY_CHECKS */
250 
251 
252 #define H5C__DLL_APPEND(entry_ptr, head_ptr, tail_ptr, len, Size, fail_val) \
253 { \
254  H5C__DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, \
255  fail_val) \
256  if ( (head_ptr) == NULL ) \
257  { \
258  (head_ptr) = (entry_ptr); \
259  (tail_ptr) = (entry_ptr); \
260  } \
261  else \
262  { \
263  (tail_ptr)->next = (entry_ptr); \
264  (entry_ptr)->prev = (tail_ptr); \
265  (tail_ptr) = (entry_ptr); \
266  } \
267  (len)++; \
268  (Size) += (entry_ptr)->size; \
269 } /* H5C__DLL_APPEND() */
270 
271 #define H5C__DLL_PREPEND(entry_ptr, head_ptr, tail_ptr, len, Size, fail_val) \
272 { \
273  H5C__DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, \
274  fail_val) \
275  if ( (head_ptr) == NULL ) \
276  { \
277  (head_ptr) = (entry_ptr); \
278  (tail_ptr) = (entry_ptr); \
279  } \
280  else \
281  { \
282  (head_ptr)->prev = (entry_ptr); \
283  (entry_ptr)->next = (head_ptr); \
284  (head_ptr) = (entry_ptr); \
285  } \
286  (len)++; \
287  (Size) += entry_ptr->size; \
288 } /* H5C__DLL_PREPEND() */
289 
290 #define H5C__DLL_REMOVE(entry_ptr, head_ptr, tail_ptr, len, Size, fail_val) \
291 { \
292  H5C__DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, Size, \
293  fail_val) \
294  { \
295  if ( (head_ptr) == (entry_ptr) ) \
296  { \
297  (head_ptr) = (entry_ptr)->next; \
298  if ( (head_ptr) != NULL ) \
299  (head_ptr)->prev = NULL; \
300  } \
301  else \
302  (entry_ptr)->prev->next = (entry_ptr)->next; \
303  if ( (tail_ptr) == (entry_ptr) ) \
304  { \
305  (tail_ptr) = (entry_ptr)->prev; \
306  if ( (tail_ptr) != NULL ) \
307  (tail_ptr)->next = NULL; \
308  } \
309  else \
310  (entry_ptr)->next->prev = (entry_ptr)->prev; \
311  entry_ptr->next = NULL; \
312  entry_ptr->prev = NULL; \
313  (len)--; \
314  (Size) -= entry_ptr->size; \
315  } \
316 } /* H5C__DLL_REMOVE() */
317 
318 #define H5C__DLL_UPDATE_FOR_SIZE_CHANGE(dll_len, dll_size, old_size, new_size) \
319 { \
320  H5C__DLL_PRE_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size) \
321  (dll_size) -= (old_size); \
322  (dll_size) += (new_size); \
323  H5C__DLL_POST_SIZE_UPDATE_SC(dll_len, dll_size, old_size, new_size) \
324 } /* H5C__DLL_UPDATE_FOR_SIZE_CHANGE() */
325 
326 #if H5C_DO_SANITY_CHECKS
327 
328 #define H5C__AUX_DLL_PRE_REMOVE_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv) \
329 if ( ( (hd_ptr) == NULL ) || \
330  ( (tail_ptr) == NULL ) || \
331  ( (entry_ptr) == NULL ) || \
332  ( (len) <= 0 ) || \
333  ( (Size) < (entry_ptr)->size ) || \
334  ( ( (Size) == (entry_ptr)->size ) && ( ! ( (len) == 1 ) ) ) || \
335  ( ( (entry_ptr)->aux_prev == NULL ) && ( (hd_ptr) != (entry_ptr) ) ) || \
336  ( ( (entry_ptr)->aux_next == NULL ) && ( (tail_ptr) != (entry_ptr) ) ) || \
337  ( ( (len) == 1 ) && \
338  ( ! ( ( (hd_ptr) == (entry_ptr) ) && ( (tail_ptr) == (entry_ptr) ) && \
339  ( (entry_ptr)->aux_next == NULL ) && \
340  ( (entry_ptr)->aux_prev == NULL ) && \
341  ( (Size) == (entry_ptr)->size ) \
342  ) \
343  ) \
344  ) \
345  ) { \
346  HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "aux DLL pre remove SC failed") \
347 }
348 
349 #define H5C__AUX_DLL_SC(head_ptr, tail_ptr, len, Size, fv) \
350 if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \
351  ( (head_ptr) != (tail_ptr) ) \
352  ) || \
353  ( (len) < 0 ) || \
354  ( (Size) < 0 ) || \
355  ( ( (len) == 1 ) && \
356  ( ( (head_ptr) != (tail_ptr) ) || ( (Size) <= 0 ) || \
357  ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) ) \
358  ) \
359  ) || \
360  ( ( (len) >= 1 ) && \
361  ( ( (head_ptr) == NULL ) || ( (head_ptr)->aux_prev != NULL ) || \
362  ( (tail_ptr) == NULL ) || ( (tail_ptr)->aux_next != NULL ) \
363  ) \
364  ) \
365  ) { \
366  HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "AUX DLL sanity check failed") \
367 }
368 
369 #define H5C__AUX_DLL_PRE_INSERT_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv) \
370 if ( ( (entry_ptr) == NULL ) || \
371  ( (entry_ptr)->aux_next != NULL ) || \
372  ( (entry_ptr)->aux_prev != NULL ) || \
373  ( ( ( (hd_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \
374  ( (hd_ptr) != (tail_ptr) ) \
375  ) || \
376  ( ( (len) == 1 ) && \
377  ( ( (hd_ptr) != (tail_ptr) ) || ( (Size) <= 0 ) || \
378  ( (hd_ptr) == NULL ) || ( (hd_ptr)->size != (Size) ) \
379  ) \
380  ) || \
381  ( ( (len) >= 1 ) && \
382  ( ( (hd_ptr) == NULL ) || ( (hd_ptr)->aux_prev != NULL ) || \
383  ( (tail_ptr) == NULL ) || ( (tail_ptr)->aux_next != NULL ) \
384  ) \
385  ) \
386  ) { \
387  HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "AUX DLL pre insert SC failed") \
388 }
389 
390 #else /* H5C_DO_SANITY_CHECKS */
391 
392 #define H5C__AUX_DLL_PRE_REMOVE_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv)
393 #define H5C__AUX_DLL_SC(head_ptr, tail_ptr, len, Size, fv)
394 #define H5C__AUX_DLL_PRE_INSERT_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv)
395 
396 #endif /* H5C_DO_SANITY_CHECKS */
397 
398 
399 #define H5C__AUX_DLL_APPEND(entry_ptr, head_ptr, tail_ptr, len, Size, fail_val)\
400 { \
401  H5C__AUX_DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, \
402  fail_val) \
403  if ( (head_ptr) == NULL ) \
404  { \
405  (head_ptr) = (entry_ptr); \
406  (tail_ptr) = (entry_ptr); \
407  } \
408  else \
409  { \
410  (tail_ptr)->aux_next = (entry_ptr); \
411  (entry_ptr)->aux_prev = (tail_ptr); \
412  (tail_ptr) = (entry_ptr); \
413  } \
414  (len)++; \
415  (Size) += entry_ptr->size; \
416 } /* H5C__AUX_DLL_APPEND() */
417 
418 #define H5C__AUX_DLL_PREPEND(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \
419 { \
420  H5C__AUX_DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \
421  if ( (head_ptr) == NULL ) \
422  { \
423  (head_ptr) = (entry_ptr); \
424  (tail_ptr) = (entry_ptr); \
425  } \
426  else \
427  { \
428  (head_ptr)->aux_prev = (entry_ptr); \
429  (entry_ptr)->aux_next = (head_ptr); \
430  (head_ptr) = (entry_ptr); \
431  } \
432  (len)++; \
433  (Size) += entry_ptr->size; \
434 } /* H5C__AUX_DLL_PREPEND() */
435 
436 #define H5C__AUX_DLL_REMOVE(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \
437 { \
438  H5C__AUX_DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \
439  { \
440  if ( (head_ptr) == (entry_ptr) ) \
441  { \
442  (head_ptr) = (entry_ptr)->aux_next; \
443  if ( (head_ptr) != NULL ) \
444  (head_ptr)->aux_prev = NULL; \
445  } \
446  else \
447  (entry_ptr)->aux_prev->aux_next = (entry_ptr)->aux_next; \
448  if ( (tail_ptr) == (entry_ptr) ) \
449  { \
450  (tail_ptr) = (entry_ptr)->aux_prev; \
451  if ( (tail_ptr) != NULL ) \
452  (tail_ptr)->aux_next = NULL; \
453  } \
454  else \
455  (entry_ptr)->aux_next->aux_prev = (entry_ptr)->aux_prev; \
456  entry_ptr->aux_next = NULL; \
457  entry_ptr->aux_prev = NULL; \
458  (len)--; \
459  (Size) -= entry_ptr->size; \
460  } \
461 } /* H5C__AUX_DLL_REMOVE() */
462 
463 #if H5C_DO_SANITY_CHECKS
464 
465 #define H5C__IL_DLL_PRE_REMOVE_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv) \
466 if ( ( (hd_ptr) == NULL ) || \
467  ( (tail_ptr) == NULL ) || \
468  ( (entry_ptr) == NULL ) || \
469  ( (len) <= 0 ) || \
470  ( (Size) < (entry_ptr)->size ) || \
471  ( ( (Size) == (entry_ptr)->size ) && ( ! ( (len) == 1 ) ) ) || \
472  ( ( (entry_ptr)->il_prev == NULL ) && ( (hd_ptr) != (entry_ptr) ) ) || \
473  ( ( (entry_ptr)->il_next == NULL ) && ( (tail_ptr) != (entry_ptr) ) ) || \
474  ( ( (len) == 1 ) && \
475  ( ! ( ( (hd_ptr) == (entry_ptr) ) && ( (tail_ptr) == (entry_ptr) ) && \
476  ( (entry_ptr)->il_next == NULL ) && \
477  ( (entry_ptr)->il_prev == NULL ) && \
478  ( (Size) == (entry_ptr)->size ) \
479  ) \
480  ) \
481  ) \
482  ) { \
483  HDassert(0 && "il DLL pre remove SC failed"); \
484  HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "il DLL pre remove SC failed") \
485 }
486 
487 #define H5C__IL_DLL_PRE_INSERT_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv) \
488 if ( ( (entry_ptr) == NULL ) || \
489  ( (entry_ptr)->il_next != NULL ) || \
490  ( (entry_ptr)->il_prev != NULL ) || \
491  ( ( ( (hd_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \
492  ( (hd_ptr) != (tail_ptr) ) \
493  ) || \
494  ( ( (len) == 1 ) && \
495  ( ( (hd_ptr) != (tail_ptr) ) || ( (Size) <= 0 ) || \
496  ( (hd_ptr) == NULL ) || ( (hd_ptr)->size != (Size) ) \
497  ) \
498  ) || \
499  ( ( (len) >= 1 ) && \
500  ( ( (hd_ptr) == NULL ) || ( (hd_ptr)->il_prev != NULL ) || \
501  ( (tail_ptr) == NULL ) || ( (tail_ptr)->il_next != NULL ) \
502  ) \
503  ) \
504  ) { \
505  HDassert(0 && "IL DLL pre insert SC failed"); \
506  HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "IL DLL pre insert SC failed") \
507 }
508 
509 #define H5C__IL_DLL_SC(head_ptr, tail_ptr, len, Size, fv) \
510 if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \
511  ( (head_ptr) != (tail_ptr) ) \
512  ) || \
513  ( ( (len) == 1 ) && \
514  ( ( (head_ptr) != (tail_ptr) ) || \
515  ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) ) \
516  ) \
517  ) || \
518  ( ( (len) >= 1 ) && \
519  ( ( (head_ptr) == NULL ) || ( (head_ptr)->il_prev != NULL ) || \
520  ( (tail_ptr) == NULL ) || ( (tail_ptr)->il_next != NULL ) \
521  ) \
522  ) \
523  ) { \
524  HDassert(0 && "IL DLL sanity check failed"); \
525  HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "IL DLL sanity check failed") \
526 }
527 
528 #else /* H5C_DO_SANITY_CHECKS */
529 
530 #define H5C__IL_DLL_PRE_REMOVE_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv)
531 #define H5C__IL_DLL_PRE_INSERT_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv)
532 #define H5C__IL_DLL_SC(head_ptr, tail_ptr, len, Size, fv)
533 
534 #endif /* H5C_DO_SANITY_CHECKS */
535 
536 
537 #define H5C__IL_DLL_APPEND(entry_ptr, head_ptr, tail_ptr, len, Size, fail_val)\
538 { \
539  H5C__IL_DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, \
540  fail_val) \
541  if ( (head_ptr) == NULL ) \
542  { \
543  (head_ptr) = (entry_ptr); \
544  (tail_ptr) = (entry_ptr); \
545  } \
546  else \
547  { \
548  (tail_ptr)->il_next = (entry_ptr); \
549  (entry_ptr)->il_prev = (tail_ptr); \
550  (tail_ptr) = (entry_ptr); \
551  } \
552  (len)++; \
553  (Size) += entry_ptr->size; \
554  H5C__IL_DLL_SC(head_ptr, tail_ptr, len, Size, fail_val) \
555 } /* H5C__IL_DLL_APPEND() */
556 
557 #define H5C__IL_DLL_REMOVE(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \
558 { \
559  H5C__IL_DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \
560  { \
561  if ( (head_ptr) == (entry_ptr) ) \
562  { \
563  (head_ptr) = (entry_ptr)->il_next; \
564  if ( (head_ptr) != NULL ) \
565  (head_ptr)->il_prev = NULL; \
566  } \
567  else \
568  (entry_ptr)->il_prev->il_next = (entry_ptr)->il_next; \
569  if ( (tail_ptr) == (entry_ptr) ) \
570  { \
571  (tail_ptr) = (entry_ptr)->il_prev; \
572  if ( (tail_ptr) != NULL ) \
573  (tail_ptr)->il_next = NULL; \
574  } \
575  else \
576  (entry_ptr)->il_next->il_prev = (entry_ptr)->il_prev; \
577  entry_ptr->il_next = NULL; \
578  entry_ptr->il_prev = NULL; \
579  (len)--; \
580  (Size) -= entry_ptr->size; \
581  } \
582  H5C__IL_DLL_SC(head_ptr, tail_ptr, len, Size, fv) \
583 } /* H5C__IL_DLL_REMOVE() */
584 
585 
586 /***********************************************************************
587  *
588  * Stats collection macros
589  *
590  * The following macros must handle stats collection when this collection
591  * is enabled, and evaluate to the empty string when it is not.
592  *
593  * The sole exception to this rule is
594  * H5C__UPDATE_CACHE_HIT_RATE_STATS(), which is always active as
595  * the cache hit rate stats are always collected and available.
596  *
597  ***********************************************************************/
598 
599 #define H5C__UPDATE_CACHE_HIT_RATE_STATS(cache_ptr, hit) \
600  (cache_ptr->cache_accesses)++; \
601  if ( hit ) { \
602  (cache_ptr->cache_hits)++; \
603  } \
604 
605 #if H5C_COLLECT_CACHE_STATS
606 
607 #define H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \
608  if ( (cache_ptr)->index_size > (cache_ptr)->max_index_size ) \
609  (cache_ptr)->max_index_size = (cache_ptr)->index_size; \
610  if ( (cache_ptr)->clean_index_size > \
611  (cache_ptr)->max_clean_index_size ) \
612  (cache_ptr)->max_clean_index_size = \
613  (cache_ptr)->clean_index_size; \
614  if ( (cache_ptr)->dirty_index_size > \
615  (cache_ptr)->max_dirty_index_size ) \
616  (cache_ptr)->max_dirty_index_size = \
617  (cache_ptr)->dirty_index_size;
618 
619 #define H5C__UPDATE_STATS_FOR_DIRTY_PIN(cache_ptr, entry_ptr) \
620  (((cache_ptr)->dirty_pins)[(entry_ptr)->type->id])++;
621 
622 #define H5C__UPDATE_STATS_FOR_UNPROTECT(cache_ptr) \
623  if ( (cache_ptr)->slist_len > (cache_ptr)->max_slist_len ) \
624  (cache_ptr)->max_slist_len = (cache_ptr)->slist_len; \
625  if ( (cache_ptr)->slist_size > (cache_ptr)->max_slist_size ) \
626  (cache_ptr)->max_slist_size = (cache_ptr)->slist_size; \
627  if ( (cache_ptr)->pel_len > (cache_ptr)->max_pel_len ) \
628  (cache_ptr)->max_pel_len = (cache_ptr)->pel_len; \
629  if ( (cache_ptr)->pel_size > (cache_ptr)->max_pel_size ) \
630  (cache_ptr)->max_pel_size = (cache_ptr)->pel_size;
631 
632 #define H5C__UPDATE_STATS_FOR_MOVE(cache_ptr, entry_ptr) \
633  if ( cache_ptr->flush_in_progress ) \
634  ((cache_ptr)->cache_flush_moves[(entry_ptr)->type->id])++; \
635  if ( entry_ptr->flush_in_progress ) \
636  ((cache_ptr)->entry_flush_moves[(entry_ptr)->type->id])++; \
637  (((cache_ptr)->moves)[(entry_ptr)->type->id])++; \
638  (cache_ptr)->entries_relocated_counter++;
639 
640 #define H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, entry_ptr, new_size)\
641  if ( cache_ptr->flush_in_progress ) \
642  ((cache_ptr)->cache_flush_size_changes[(entry_ptr)->type->id])++; \
643  if ( entry_ptr->flush_in_progress ) \
644  ((cache_ptr)->entry_flush_size_changes[(entry_ptr)->type->id])++; \
645  if ( (entry_ptr)->size < (new_size) ) { \
646  ((cache_ptr)->size_increases[(entry_ptr)->type->id])++; \
647  H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \
648  if ( (cache_ptr)->slist_size > (cache_ptr)->max_slist_size ) \
649  (cache_ptr)->max_slist_size = (cache_ptr)->slist_size; \
650  if ( (cache_ptr)->pl_size > (cache_ptr)->max_pl_size ) \
651  (cache_ptr)->max_pl_size = (cache_ptr)->pl_size; \
652  } else if ( (entry_ptr)->size > (new_size) ) { \
653  ((cache_ptr)->size_decreases[(entry_ptr)->type->id])++; \
654  }
655 
656 #define H5C__UPDATE_STATS_FOR_HT_INSERTION(cache_ptr) \
657  (cache_ptr)->total_ht_insertions++;
658 
659 #define H5C__UPDATE_STATS_FOR_HT_DELETION(cache_ptr) \
660  (cache_ptr)->total_ht_deletions++;
661 
662 #define H5C__UPDATE_STATS_FOR_HT_SEARCH(cache_ptr, success, depth) \
663  if ( success ) { \
664  (cache_ptr)->successful_ht_searches++; \
665  (cache_ptr)->total_successful_ht_search_depth += depth; \
666  } else { \
667  (cache_ptr)->failed_ht_searches++; \
668  (cache_ptr)->total_failed_ht_search_depth += depth; \
669  }
670 
671 #define H5C__UPDATE_STATS_FOR_UNPIN(cache_ptr, entry_ptr) \
672  ((cache_ptr)->unpins)[(entry_ptr)->type->id]++;
673 
674 #define H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr) \
675  ((cache_ptr)->slist_scan_restarts)++;
676 
677 #define H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr) \
678  ((cache_ptr)->LRU_scan_restarts)++;
679 
680 #define H5C__UPDATE_STATS_FOR_INDEX_SCAN_RESTART(cache_ptr) \
681  ((cache_ptr)->index_scan_restarts)++;
682 
683 #define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_CREATE(cache_ptr) \
684 { \
685  (cache_ptr)->images_created++; \
686 }
687 
688 #define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_READ(cache_ptr) \
689 { \
690  /* make sure image len is still good */ \
691  HDassert((cache_ptr)->image_len > 0); \
692  (cache_ptr)->images_read++; \
693 }
694 
695 #define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_LOAD(cache_ptr) \
696 { \
697  /* make sure image len is still good */ \
698  HDassert((cache_ptr)->image_len > 0); \
699  (cache_ptr)->images_loaded++; \
700  (cache_ptr)->last_image_size = (cache_ptr)->image_len; \
701 }
702 
703 #define H5C__UPDATE_STATS_FOR_PREFETCH(cache_ptr, dirty) \
704 { \
705  (cache_ptr)->prefetches++; \
706  if ( dirty ) \
707  (cache_ptr)->dirty_prefetches++; \
708 }
709 
710 #define H5C__UPDATE_STATS_FOR_PREFETCH_HIT(cache_ptr) \
711 { \
712  (cache_ptr)->prefetch_hits++; \
713 }
714 
715 #if H5C_COLLECT_CACHE_ENTRY_STATS
716 
717 #define H5C__RESET_CACHE_ENTRY_STATS(entry_ptr) \
718 { \
719  (entry_ptr)->accesses = 0; \
720  (entry_ptr)->clears = 0; \
721  (entry_ptr)->flushes = 0; \
722  (entry_ptr)->pins = 0; \
723 }
724 
725 #define H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr) \
726 { \
727  (((cache_ptr)->clears)[(entry_ptr)->type->id])++; \
728  if((entry_ptr)->is_pinned) \
729  (((cache_ptr)->pinned_clears)[(entry_ptr)->type->id])++; \
730  ((entry_ptr)->clears)++; \
731 }
732 
733 #define H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr) \
734 { \
735  (((cache_ptr)->flushes)[(entry_ptr)->type->id])++; \
736  if((entry_ptr)->is_pinned) \
737  (((cache_ptr)->pinned_flushes)[(entry_ptr)->type->id])++; \
738  ((entry_ptr)->flushes)++; \
739 }
740 
741 #define H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr, take_ownership) \
742 { \
743  if ( take_ownership ) \
744  (((cache_ptr)->take_ownerships)[(entry_ptr)->type->id])++; \
745  else \
746  (((cache_ptr)->evictions)[(entry_ptr)->type->id])++; \
747  if ( (entry_ptr)->accesses > \
748  ((cache_ptr)->max_accesses)[(entry_ptr)->type->id] ) \
749  ((cache_ptr)->max_accesses)[(entry_ptr)->type->id] = \
750  (entry_ptr)->accesses; \
751  if ( (entry_ptr)->accesses < \
752  ((cache_ptr)->min_accesses)[(entry_ptr)->type->id] ) \
753  ((cache_ptr)->min_accesses)[(entry_ptr)->type->id] = \
754  (entry_ptr)->accesses; \
755  if ( (entry_ptr)->clears > \
756  ((cache_ptr)->max_clears)[(entry_ptr)->type->id] ) \
757  ((cache_ptr)->max_clears)[(entry_ptr)->type->id] \
758  = (entry_ptr)->clears; \
759  if ( (entry_ptr)->flushes > \
760  ((cache_ptr)->max_flushes)[(entry_ptr)->type->id] ) \
761  ((cache_ptr)->max_flushes)[(entry_ptr)->type->id] \
762  = (entry_ptr)->flushes; \
763  if ( (entry_ptr)->size > \
764  ((cache_ptr)->max_size)[(entry_ptr)->type->id] ) \
765  ((cache_ptr)->max_size)[(entry_ptr)->type->id] \
766  = (entry_ptr)->size; \
767  if ( (entry_ptr)->pins > \
768  ((cache_ptr)->max_pins)[(entry_ptr)->type->id] ) \
769  ((cache_ptr)->max_pins)[(entry_ptr)->type->id] \
770  = (entry_ptr)->pins; \
771 }
772 
773 #define H5C__UPDATE_STATS_FOR_INSERTION(cache_ptr, entry_ptr) \
774 { \
775  (((cache_ptr)->insertions)[(entry_ptr)->type->id])++; \
776  if ( (entry_ptr)->is_pinned ) { \
777  (((cache_ptr)->pinned_insertions)[(entry_ptr)->type->id])++; \
778  ((cache_ptr)->pins)[(entry_ptr)->type->id]++; \
779  (entry_ptr)->pins++; \
780  if ( (cache_ptr)->pel_len > (cache_ptr)->max_pel_len ) \
781  (cache_ptr)->max_pel_len = (cache_ptr)->pel_len; \
782  if ( (cache_ptr)->pel_size > (cache_ptr)->max_pel_size ) \
783  (cache_ptr)->max_pel_size = (cache_ptr)->pel_size; \
784  } \
785  if ( (cache_ptr)->index_len > (cache_ptr)->max_index_len ) \
786  (cache_ptr)->max_index_len = (cache_ptr)->index_len; \
787  H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \
788  if ( (cache_ptr)->slist_len > (cache_ptr)->max_slist_len ) \
789  (cache_ptr)->max_slist_len = (cache_ptr)->slist_len; \
790  if ( (cache_ptr)->slist_size > (cache_ptr)->max_slist_size ) \
791  (cache_ptr)->max_slist_size = (cache_ptr)->slist_size; \
792  if ( (entry_ptr)->size > \
793  ((cache_ptr)->max_size)[(entry_ptr)->type->id] ) \
794  ((cache_ptr)->max_size)[(entry_ptr)->type->id] \
795  = (entry_ptr)->size; \
796  cache_ptr->entries_inserted_counter++; \
797 }
798 
799 #define H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit) \
800 { \
801  if ( hit ) \
802  ((cache_ptr)->hits)[(entry_ptr)->type->id]++; \
803  else \
804  ((cache_ptr)->misses)[(entry_ptr)->type->id]++; \
805  if ( ! ((entry_ptr)->is_read_only) ) { \
806  ((cache_ptr)->write_protects)[(entry_ptr)->type->id]++; \
807  } else { \
808  ((cache_ptr)->read_protects)[(entry_ptr)->type->id]++; \
809  if ( ((entry_ptr)->ro_ref_count) > \
810  ((cache_ptr)->max_read_protects)[(entry_ptr)->type->id] ) \
811  ((cache_ptr)->max_read_protects)[(entry_ptr)->type->id] = \
812  ((entry_ptr)->ro_ref_count); \
813  } \
814  if ( (cache_ptr)->index_len > (cache_ptr)->max_index_len ) \
815  (cache_ptr)->max_index_len = (cache_ptr)->index_len; \
816  H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \
817  if ( (cache_ptr)->pl_len > (cache_ptr)->max_pl_len ) \
818  (cache_ptr)->max_pl_len = (cache_ptr)->pl_len; \
819  if ( (cache_ptr)->pl_size > (cache_ptr)->max_pl_size ) \
820  (cache_ptr)->max_pl_size = (cache_ptr)->pl_size; \
821  if ( (entry_ptr)->size > \
822  ((cache_ptr)->max_size)[(entry_ptr)->type->id] ) \
823  ((cache_ptr)->max_size)[(entry_ptr)->type->id] = (entry_ptr)->size; \
824  ((entry_ptr)->accesses)++; \
825 }
826 
827 #define H5C__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr) \
828 { \
829  ((cache_ptr)->pins)[(entry_ptr)->type->id]++; \
830  (entry_ptr)->pins++; \
831  if ( (cache_ptr)->pel_len > (cache_ptr)->max_pel_len ) \
832  (cache_ptr)->max_pel_len = (cache_ptr)->pel_len; \
833  if ( (cache_ptr)->pel_size > (cache_ptr)->max_pel_size ) \
834  (cache_ptr)->max_pel_size = (cache_ptr)->pel_size; \
835 }
836 
837 #else /* H5C_COLLECT_CACHE_ENTRY_STATS */
838 
839 #define H5C__RESET_CACHE_ENTRY_STATS(entry_ptr)
840 
841 #define H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr) \
842 { \
843  (((cache_ptr)->clears)[(entry_ptr)->type->id])++; \
844  if((entry_ptr)->is_pinned) \
845  (((cache_ptr)->pinned_clears)[(entry_ptr)->type->id])++; \
846 }
847 
848 #define H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr) \
849 { \
850  (((cache_ptr)->flushes)[(entry_ptr)->type->id])++; \
851  if ( (entry_ptr)->is_pinned ) \
852  (((cache_ptr)->pinned_flushes)[(entry_ptr)->type->id])++; \
853 }
854 
855 #define H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr, take_ownership) \
856 { \
857  if ( take_ownership ) \
858  (((cache_ptr)->take_ownerships)[(entry_ptr)->type->id])++; \
859  else \
860  (((cache_ptr)->evictions)[(entry_ptr)->type->id])++; \
861 }
862 
863 #define H5C__UPDATE_STATS_FOR_INSERTION(cache_ptr, entry_ptr) \
864 { \
865  (((cache_ptr)->insertions)[(entry_ptr)->type->id])++; \
866  if ( (entry_ptr)->is_pinned ) { \
867  (((cache_ptr)->pinned_insertions)[(entry_ptr)->type->id])++; \
868  ((cache_ptr)->pins)[(entry_ptr)->type->id]++; \
869  if ( (cache_ptr)->pel_len > (cache_ptr)->max_pel_len ) \
870  (cache_ptr)->max_pel_len = (cache_ptr)->pel_len; \
871  if ( (cache_ptr)->pel_size > (cache_ptr)->max_pel_size ) \
872  (cache_ptr)->max_pel_size = (cache_ptr)->pel_size; \
873  } \
874  if ( (cache_ptr)->index_len > (cache_ptr)->max_index_len ) \
875  (cache_ptr)->max_index_len = (cache_ptr)->index_len; \
876  H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \
877  if ( (cache_ptr)->slist_len > (cache_ptr)->max_slist_len ) \
878  (cache_ptr)->max_slist_len = (cache_ptr)->slist_len; \
879  if ( (cache_ptr)->slist_size > (cache_ptr)->max_slist_size ) \
880  (cache_ptr)->max_slist_size = (cache_ptr)->slist_size; \
881  cache_ptr->entries_inserted_counter++; \
882 }
883 
884 #define H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit) \
885 { \
886  if ( hit ) \
887  ((cache_ptr)->hits)[(entry_ptr)->type->id]++; \
888  else \
889  ((cache_ptr)->misses)[(entry_ptr)->type->id]++; \
890  if ( ! ((entry_ptr)->is_read_only) ) \
891  ((cache_ptr)->write_protects)[(entry_ptr)->type->id]++; \
892  else { \
893  ((cache_ptr)->read_protects)[(entry_ptr)->type->id]++; \
894  if ( ((entry_ptr)->ro_ref_count) > \
895  ((cache_ptr)->max_read_protects)[(entry_ptr)->type->id] ) \
896  ((cache_ptr)->max_read_protects)[(entry_ptr)->type->id] = \
897  ((entry_ptr)->ro_ref_count); \
898  } \
899  if ( (cache_ptr)->index_len > (cache_ptr)->max_index_len ) \
900  (cache_ptr)->max_index_len = (cache_ptr)->index_len; \
901  H5C__UPDATE_MAX_INDEX_SIZE_STATS(cache_ptr) \
902  if ( (cache_ptr)->pl_len > (cache_ptr)->max_pl_len ) \
903  (cache_ptr)->max_pl_len = (cache_ptr)->pl_len; \
904  if ( (cache_ptr)->pl_size > (cache_ptr)->max_pl_size ) \
905  (cache_ptr)->max_pl_size = (cache_ptr)->pl_size; \
906 }
907 
908 #define H5C__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr) \
909 { \
910  ((cache_ptr)->pins)[(entry_ptr)->type->id]++; \
911  if ( (cache_ptr)->pel_len > (cache_ptr)->max_pel_len ) \
912  (cache_ptr)->max_pel_len = (cache_ptr)->pel_len; \
913  if ( (cache_ptr)->pel_size > (cache_ptr)->max_pel_size ) \
914  (cache_ptr)->max_pel_size = (cache_ptr)->pel_size; \
915 }
916 
917 #endif /* H5C_COLLECT_CACHE_ENTRY_STATS */
918 
919 #else /* H5C_COLLECT_CACHE_STATS */
920 
921 #define H5C__RESET_CACHE_ENTRY_STATS(entry_ptr)
922 #define H5C__UPDATE_STATS_FOR_DIRTY_PIN(cache_ptr, entry_ptr)
923 #define H5C__UPDATE_STATS_FOR_UNPROTECT(cache_ptr)
924 #define H5C__UPDATE_STATS_FOR_MOVE(cache_ptr, entry_ptr)
925 #define H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, entry_ptr, new_size)
926 #define H5C__UPDATE_STATS_FOR_HT_INSERTION(cache_ptr)
927 #define H5C__UPDATE_STATS_FOR_HT_DELETION(cache_ptr)
928 #define H5C__UPDATE_STATS_FOR_HT_SEARCH(cache_ptr, success, depth)
929 #define H5C__UPDATE_STATS_FOR_INSERTION(cache_ptr, entry_ptr)
930 #define H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr)
931 #define H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr)
932 #define H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr, take_ownership)
933 #define H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit)
934 #define H5C__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr)
935 #define H5C__UPDATE_STATS_FOR_UNPIN(cache_ptr, entry_ptr)
936 #define H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr)
937 #define H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr)
938 #define H5C__UPDATE_STATS_FOR_INDEX_SCAN_RESTART(cache_ptr)
939 #define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_CREATE(cache_ptr)
940 #define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_READ(cache_ptr)
941 #define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_LOAD(cache_ptr)
942 #define H5C__UPDATE_STATS_FOR_PREFETCH(cache_ptr, dirty)
943 #define H5C__UPDATE_STATS_FOR_PREFETCH_HIT(cache_ptr)
944 
945 #endif /* H5C_COLLECT_CACHE_STATS */
946 
947 
948 /***********************************************************************
949  *
950  * Hash table access and manipulation macros:
951  *
952  * The following macros handle searches, insertions, and deletion in
953  * the hash table.
954  *
955  * When modifying these macros, remember to modify the similar macros
956  * in tst/cache.c
957  *
958  * Changes:
959  *
960  * - Updated existing index macros and sanity check macros to maintain
961  * the clean_index_size and dirty_index_size fields of H5C_t. Also
962  * added macros to allow us to track entry cleans and dirties.
963  *
964  * JRM -- 11/5/08
965  *
966  * - Updated existing index macros and sanity check macros to maintain
967  * the index_ring_len, index_ring_size, clean_index_ring_size, and
968  * dirty_index_ring_size fields of H5C_t.
969  *
970  * JRM -- 9/1/15
971  *
972  * - Updated existing index macros and sanity checks macros to
973  * maintain an doubly linked list of all entries in the index.
974  * This is necessary to reduce the computational cost of visiting
975  * all entries in the index, which used to be done by scanning
976  * the hash table.
977  *
978  * JRM -- 10/15/15
979  *
980  ***********************************************************************/
981 
982 /* H5C__HASH_TABLE_LEN is defined in H5Cpkg.h. It mut be a power of two. */
983 
984 #define H5C__HASH_MASK ((size_t)(H5C__HASH_TABLE_LEN - 1) << 3)
985 
986 #define H5C__HASH_FCN(x) (int)((unsigned)((x) & H5C__HASH_MASK) >> 3)
987 
988 #if H5C_DO_SANITY_CHECKS
989 
990 #define H5C__PRE_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) \
991 if ( ( (cache_ptr) == NULL ) || \
992  ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \
993  ( (entry_ptr) == NULL ) || \
994  ( ! H5F_addr_defined((entry_ptr)->addr) ) || \
995  ( (entry_ptr)->ht_next != NULL ) || \
996  ( (entry_ptr)->ht_prev != NULL ) || \
997  ( (entry_ptr)->size <= 0 ) || \
998  ( H5C__HASH_FCN((entry_ptr)->addr) < 0 ) || \
999  ( H5C__HASH_FCN((entry_ptr)->addr) >= H5C__HASH_TABLE_LEN ) || \
1000  ( (cache_ptr)->index_size != \
1001  ((cache_ptr)->clean_index_size + \
1002  (cache_ptr)->dirty_index_size) ) || \
1003  ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
1004  ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \
1005  ( (entry_ptr)->ring <= H5C_RING_UNDEFINED ) || \
1006  ( (entry_ptr)->ring >= H5C_RING_NTYPES ) || \
1007  ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \
1008  (cache_ptr)->index_len ) || \
1009  ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \
1010  (cache_ptr)->index_size ) || \
1011  ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
1012  ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
1013  (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) || \
1014  ( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \
1015  ( (cache_ptr)->index_size != (cache_ptr)->il_size ) ) { \
1016  HDassert(FALSE); \
1017  HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "pre HT insert SC failed") \
1018 }
1019 
1020 #define H5C__POST_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) \
1021 if ( ( (cache_ptr) == NULL ) || \
1022  ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \
1023  ( (cache_ptr)->index_size != \
1024  ((cache_ptr)->clean_index_size + \
1025  (cache_ptr)->dirty_index_size) ) || \
1026  ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
1027  ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \
1028  ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] == 0 ) || \
1029  ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \
1030  (cache_ptr)->index_len ) || \
1031  ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \
1032  (cache_ptr)->index_size ) || \
1033  ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
1034  ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
1035  (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) || \
1036  ( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \
1037  ( (cache_ptr)->index_size != (cache_ptr)->il_size) ) { \
1038  HDassert(FALSE); \
1039  HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "post HT insert SC failed") \
1040 }
1041 
1042 #define H5C__PRE_HT_REMOVE_SC(cache_ptr, entry_ptr) \
1043 if ( ( (cache_ptr) == NULL ) || \
1044  ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \
1045  ( (cache_ptr)->index_len < 1 ) || \
1046  ( (entry_ptr) == NULL ) || \
1047  ( (cache_ptr)->index_size < (entry_ptr)->size ) || \
1048  ( ! H5F_addr_defined((entry_ptr)->addr) ) || \
1049  ( (entry_ptr)->size <= 0 ) || \
1050  ( H5C__HASH_FCN((entry_ptr)->addr) < 0 ) || \
1051  ( H5C__HASH_FCN((entry_ptr)->addr) >= H5C__HASH_TABLE_LEN ) || \
1052  ( ((cache_ptr)->index)[(H5C__HASH_FCN((entry_ptr)->addr))] \
1053  == NULL ) || \
1054  ( ( ((cache_ptr)->index)[(H5C__HASH_FCN((entry_ptr)->addr))] \
1055  != (entry_ptr) ) && \
1056  ( (entry_ptr)->ht_prev == NULL ) ) || \
1057  ( ( ((cache_ptr)->index)[(H5C__HASH_FCN((entry_ptr)->addr))] == \
1058  (entry_ptr) ) && \
1059  ( (entry_ptr)->ht_prev != NULL ) ) || \
1060  ( (cache_ptr)->index_size != \
1061  ((cache_ptr)->clean_index_size + \
1062  (cache_ptr)->dirty_index_size) ) || \
1063  ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
1064  ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \
1065  ( (entry_ptr)->ring <= H5C_RING_UNDEFINED ) || \
1066  ( (entry_ptr)->ring >= H5C_RING_NTYPES ) || \
1067  ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] <= 0 ) || \
1068  ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \
1069  (cache_ptr)->index_len ) || \
1070  ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] < \
1071  (entry_ptr)->size ) || \
1072  ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \
1073  (cache_ptr)->index_size ) || \
1074  ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
1075  ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
1076  (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) || \
1077  ( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \
1078  ( (cache_ptr)->index_size != (cache_ptr)->il_size ) ) { \
1079  HDassert(FALSE); \
1080  HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pre HT remove SC failed") \
1081 }
1082 
1083 #define H5C__POST_HT_REMOVE_SC(cache_ptr, entry_ptr) \
1084 if ( ( (cache_ptr) == NULL ) || \
1085  ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \
1086  ( (entry_ptr) == NULL ) || \
1087  ( ! H5F_addr_defined((entry_ptr)->addr) ) || \
1088  ( (entry_ptr)->size <= 0 ) || \
1089  ( (entry_ptr)->ht_prev != NULL ) || \
1090  ( (entry_ptr)->ht_prev != NULL ) || \
1091  ( (cache_ptr)->index_size != \
1092  ((cache_ptr)->clean_index_size + \
1093  (cache_ptr)->dirty_index_size) ) || \
1094  ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
1095  ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \
1096  ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \
1097  (cache_ptr)->index_len ) || \
1098  ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \
1099  (cache_ptr)->index_size ) || \
1100  ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
1101  ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
1102  (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) || \
1103  ( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \
1104  ( (cache_ptr)->index_size != (cache_ptr)->il_size ) ) { \
1105  HDassert(FALSE); \
1106  HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "post HT remove SC failed") \
1107 }
1108 
1109 /* (Keep in sync w/H5C_TEST__PRE_HT_SEARCH_SC macro in test/cache_common.h -QAK) */
1110 #define H5C__PRE_HT_SEARCH_SC(cache_ptr, Addr, fail_val) \
1111 if ( ( (cache_ptr) == NULL ) || \
1112  ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \
1113  ( (cache_ptr)->index_size != \
1114  ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \
1115  ( ! H5F_addr_defined(Addr) ) || \
1116  ( H5C__HASH_FCN(Addr) < 0 ) || \
1117  ( H5C__HASH_FCN(Addr) >= H5C__HASH_TABLE_LEN ) ) { \
1118  HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "pre HT search SC failed") \
1119 }
1120 
1121 /* (Keep in sync w/H5C_TEST__POST_SUC_HT_SEARCH_SC macro in test/cache_common.h -QAK) */
1122 #define H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, k, fail_val) \
1123 if ( ( (cache_ptr) == NULL ) || \
1124  ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \
1125  ( (cache_ptr)->index_len < 1 ) || \
1126  ( (entry_ptr) == NULL ) || \
1127  ( (cache_ptr)->index_size < (entry_ptr)->size ) || \
1128  ( (cache_ptr)->index_size != \
1129  ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \
1130  ( (entry_ptr)->size <= 0 ) || \
1131  ( ((cache_ptr)->index)[k] == NULL ) || \
1132  ( ( ((cache_ptr)->index)[k] != (entry_ptr) ) && \
1133  ( (entry_ptr)->ht_prev == NULL ) ) || \
1134  ( ( ((cache_ptr)->index)[k] == (entry_ptr) ) && \
1135  ( (entry_ptr)->ht_prev != NULL ) ) || \
1136  ( ( (entry_ptr)->ht_prev != NULL ) && \
1137  ( (entry_ptr)->ht_prev->ht_next != (entry_ptr) ) ) || \
1138  ( ( (entry_ptr)->ht_next != NULL ) && \
1139  ( (entry_ptr)->ht_next->ht_prev != (entry_ptr) ) ) ) { \
1140  HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "post successful HT search SC failed") \
1141 }
1142 
1143 /* (Keep in sync w/H5C_TEST__POST_HT_SHIFT_TO_FRONT macro in test/cache_common.h -QAK) */
1144 #define H5C__POST_HT_SHIFT_TO_FRONT(cache_ptr, entry_ptr, k, fail_val) \
1145 if ( ( (cache_ptr) == NULL ) || \
1146  ( ((cache_ptr)->index)[k] != (entry_ptr) ) || \
1147  ( (entry_ptr)->ht_prev != NULL ) ) { \
1148  HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "post HT shift to front SC failed") \
1149 }
1150 
1151 #define H5C__PRE_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \
1152  entry_ptr, was_clean) \
1153 if ( ( (cache_ptr) == NULL ) || \
1154  ( (cache_ptr)->index_len <= 0 ) || \
1155  ( (cache_ptr)->index_size <= 0 ) || \
1156  ( (new_size) <= 0 ) || \
1157  ( (old_size) > (cache_ptr)->index_size ) || \
1158  ( ( (cache_ptr)->index_len == 1 ) && \
1159  ( (cache_ptr)->index_size != (old_size) ) ) || \
1160  ( (cache_ptr)->index_size != \
1161  ((cache_ptr)->clean_index_size + \
1162  (cache_ptr)->dirty_index_size) ) || \
1163  ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
1164  ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \
1165  ( ( !( was_clean ) || \
1166  ( (cache_ptr)->clean_index_size < (old_size) ) ) && \
1167  ( ( (was_clean) ) || \
1168  ( (cache_ptr)->dirty_index_size < (old_size) ) ) ) || \
1169  ( (entry_ptr) == NULL ) || \
1170  ( (entry_ptr)->ring <= H5C_RING_UNDEFINED ) || \
1171  ( (entry_ptr)->ring >= H5C_RING_NTYPES ) || \
1172  ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] <= 0 ) || \
1173  ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \
1174  (cache_ptr)->index_len ) || \
1175  ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \
1176  (cache_ptr)->index_size ) || \
1177  ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
1178  ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
1179  (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) || \
1180  ( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \
1181  ( (cache_ptr)->index_size != (cache_ptr)->il_size ) ) { \
1182  HDassert(FALSE); \
1183  HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pre HT entry size change SC failed") \
1184 }
1185 
1186 #define H5C__POST_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \
1187  entry_ptr) \
1188 if ( ( (cache_ptr) == NULL ) || \
1189  ( (cache_ptr)->index_len <= 0 ) || \
1190  ( (cache_ptr)->index_size <= 0 ) || \
1191  ( (new_size) > (cache_ptr)->index_size ) || \
1192  ( (cache_ptr)->index_size != \
1193  ((cache_ptr)->clean_index_size + \
1194  (cache_ptr)->dirty_index_size) ) || \
1195  ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
1196  ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \
1197  ( ( !((entry_ptr)->is_dirty ) || \
1198  ( (cache_ptr)->dirty_index_size < (new_size) ) ) && \
1199  ( ( ((entry_ptr)->is_dirty) ) || \
1200  ( (cache_ptr)->clean_index_size < (new_size) ) ) ) || \
1201  ( ( (cache_ptr)->index_len == 1 ) && \
1202  ( (cache_ptr)->index_size != (new_size) ) ) || \
1203  ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \
1204  (cache_ptr)->index_len ) || \
1205  ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \
1206  (cache_ptr)->index_size ) || \
1207  ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
1208  ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
1209  (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) || \
1210  ( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \
1211  ( (cache_ptr)->index_size != (cache_ptr)->il_size ) ) { \
1212  HDassert(FALSE); \
1213  HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "post HT entry size change SC failed") \
1214 }
1215 
1216 #define H5C__PRE_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr) \
1217 if ( \
1218  ( (cache_ptr) == NULL ) || \
1219  ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \
1220  ( (cache_ptr)->index_len <= 0 ) || \
1221  ( (entry_ptr) == NULL ) || \
1222  ( (entry_ptr)->is_dirty != FALSE ) || \
1223  ( (cache_ptr)->index_size < (entry_ptr)->size ) || \
1224  ( (cache_ptr)->dirty_index_size < (entry_ptr)->size ) || \
1225  ( (cache_ptr)->index_size != \
1226  ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \
1227  ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
1228  ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \
1229  ( (entry_ptr)->ring <= H5C_RING_UNDEFINED ) || \
1230  ( (entry_ptr)->ring >= H5C_RING_NTYPES ) || \
1231  ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] <= 0 ) || \
1232  ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \
1233  (cache_ptr)->index_len ) || \
1234  ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \
1235  (cache_ptr)->index_size ) || \
1236  ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
1237  ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
1238  (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \
1239  HDassert(FALSE); \
1240  HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pre HT update for entry clean SC failed") \
1241 }
1242 
1243 #define H5C__PRE_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr) \
1244 if ( \
1245  ( (cache_ptr) == NULL ) || \
1246  ( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \
1247  ( (cache_ptr)->index_len <= 0 ) || \
1248  ( (entry_ptr) == NULL ) || \
1249  ( (entry_ptr)->is_dirty != TRUE ) || \
1250  ( (cache_ptr)->index_size < (entry_ptr)->size ) || \
1251  ( (cache_ptr)->clean_index_size < (entry_ptr)->size ) || \
1252  ( (cache_ptr)->index_size != \
1253  ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \
1254  ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
1255  ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \
1256  ( (entry_ptr)->ring <= H5C_RING_UNDEFINED ) || \
1257  ( (entry_ptr)->ring >= H5C_RING_NTYPES ) || \
1258  ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] <= 0 ) || \
1259  ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \
1260  (cache_ptr)->index_len ) || \
1261  ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \
1262  (cache_ptr)->index_size ) || \
1263  ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
1264  ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
1265  (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \
1266  HDassert(FALSE); \
1267  HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pre HT update for entry dirty SC failed") \
1268 }
1269 
1270 #define H5C__POST_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr) \
1271 if ( ( (cache_ptr)->index_size != \
1272  ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \
1273  ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
1274  ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \
1275  ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \
1276  (cache_ptr)->index_len ) || \
1277  ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \
1278  (cache_ptr)->index_size ) || \
1279  ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
1280  ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
1281  (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \
1282  HDassert(FALSE); \
1283  HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "post HT update for entry clean SC failed") \
1284 }
1285 
1286 #define H5C__POST_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr) \
1287 if ( ( (cache_ptr)->index_size != \
1288  ((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \
1289  ( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
1290  ( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \
1291  ( (cache_ptr)->index_ring_len[(entry_ptr)->ring] > \
1292  (cache_ptr)->index_len ) || \
1293  ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] > \
1294  (cache_ptr)->index_size ) || \
1295  ( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
1296  ((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
1297  (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \
1298  HDassert(FALSE); \
1299  HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "post HT update for entry dirty SC failed") \
1300 }
1301 
1302 #else /* H5C_DO_SANITY_CHECKS */
1303 
1304 #define H5C__PRE_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val)
1305 #define H5C__POST_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val)
1306 #define H5C__PRE_HT_REMOVE_SC(cache_ptr, entry_ptr)
1307 #define H5C__POST_HT_REMOVE_SC(cache_ptr, entry_ptr)
1308 #define H5C__PRE_HT_SEARCH_SC(cache_ptr, Addr, fail_val)
1309 #define H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, k, fail_val)
1310 #define H5C__POST_HT_SHIFT_TO_FRONT(cache_ptr, entry_ptr, k, fail_val)
1311 #define H5C__PRE_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr)
1312 #define H5C__PRE_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr)
1313 #define H5C__PRE_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \
1314  entry_ptr, was_clean)
1315 #define H5C__POST_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \
1316  entry_ptr)
1317 #define H5C__POST_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr)
1318 #define H5C__POST_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr)
1319 
1320 #endif /* H5C_DO_SANITY_CHECKS */
1321 
1322 
1323 #define H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, fail_val) \
1324 { \
1325  int k; \
1326  H5C__PRE_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) \
1327  k = H5C__HASH_FCN((entry_ptr)->addr); \
1328  if(((cache_ptr)->index)[k] != NULL) { \
1329  (entry_ptr)->ht_next = ((cache_ptr)->index)[k]; \
1330  (entry_ptr)->ht_next->ht_prev = (entry_ptr); \
1331  } \
1332  ((cache_ptr)->index)[k] = (entry_ptr); \
1333  (cache_ptr)->index_len++; \
1334  (cache_ptr)->index_size += (entry_ptr)->size; \
1335  ((cache_ptr)->index_ring_len[entry_ptr->ring])++; \
1336  ((cache_ptr)->index_ring_size[entry_ptr->ring]) \
1337  += (entry_ptr)->size; \
1338  if((entry_ptr)->is_dirty) { \
1339  (cache_ptr)->dirty_index_size += (entry_ptr)->size; \
1340  ((cache_ptr)->dirty_index_ring_size[entry_ptr->ring]) \
1341  += (entry_ptr)->size; \
1342  } else { \
1343  (cache_ptr)->clean_index_size += (entry_ptr)->size; \
1344  ((cache_ptr)->clean_index_ring_size[entry_ptr->ring]) \
1345  += (entry_ptr)->size; \
1346  } \
1347  if((entry_ptr)->flush_me_last) { \
1348  (cache_ptr)->num_last_entries++; \
1349  HDassert((cache_ptr)->num_last_entries <= 2); \
1350  } \
1351  H5C__IL_DLL_APPEND((entry_ptr), (cache_ptr)->il_head, \
1352  (cache_ptr)->il_tail, (cache_ptr)->il_len, \
1353  (cache_ptr)->il_size, fail_val) \
1354  H5C__UPDATE_STATS_FOR_HT_INSERTION(cache_ptr) \
1355  H5C__POST_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) \
1356 }
1357 
1358 #define H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, fail_val) \
1359 { \
1360  int k; \
1361  H5C__PRE_HT_REMOVE_SC(cache_ptr, entry_ptr) \
1362  k = H5C__HASH_FCN((entry_ptr)->addr); \
1363  if((entry_ptr)->ht_next) \
1364  (entry_ptr)->ht_next->ht_prev = (entry_ptr)->ht_prev; \
1365  if((entry_ptr)->ht_prev) \
1366  (entry_ptr)->ht_prev->ht_next = (entry_ptr)->ht_next; \
1367  if(((cache_ptr)->index)[k] == (entry_ptr)) \
1368  ((cache_ptr)->index)[k] = (entry_ptr)->ht_next; \
1369  (entry_ptr)->ht_next = NULL; \
1370  (entry_ptr)->ht_prev = NULL; \
1371  (cache_ptr)->index_len--; \
1372  (cache_ptr)->index_size -= (entry_ptr)->size; \
1373  ((cache_ptr)->index_ring_len[entry_ptr->ring])--; \
1374  ((cache_ptr)->index_ring_size[entry_ptr->ring]) \
1375  -= (entry_ptr)->size; \
1376  if((entry_ptr)->is_dirty) { \
1377  (cache_ptr)->dirty_index_size -= (entry_ptr)->size; \
1378  ((cache_ptr)->dirty_index_ring_size[entry_ptr->ring]) \
1379  -= (entry_ptr)->size; \
1380  } else { \
1381  (cache_ptr)->clean_index_size -= (entry_ptr)->size; \
1382  ((cache_ptr)->clean_index_ring_size[entry_ptr->ring]) \
1383  -= (entry_ptr)->size; \
1384  } \
1385  if((entry_ptr)->flush_me_last) { \
1386  (cache_ptr)->num_last_entries--; \
1387  HDassert((cache_ptr)->num_last_entries <= 1); \
1388  } \
1389  H5C__IL_DLL_REMOVE((entry_ptr), (cache_ptr)->il_head, \
1390  (cache_ptr)->il_tail, (cache_ptr)->il_len, \
1391  (cache_ptr)->il_size, fail_val) \
1392  H5C__UPDATE_STATS_FOR_HT_DELETION(cache_ptr) \
1393  H5C__POST_HT_REMOVE_SC(cache_ptr, entry_ptr) \
1394 }
1395 
1396 #define H5C__SEARCH_INDEX(cache_ptr, Addr, entry_ptr, fail_val) \
1397 { \
1398  int k; \
1399  int depth = 0; \
1400  H5C__PRE_HT_SEARCH_SC(cache_ptr, Addr, fail_val) \
1401  k = H5C__HASH_FCN(Addr); \
1402  entry_ptr = ((cache_ptr)->index)[k]; \
1403  while(entry_ptr) { \
1404  if(H5F_addr_eq(Addr, (entry_ptr)->addr)) { \
1405  H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, k, fail_val) \
1406  if(entry_ptr != ((cache_ptr)->index)[k]) { \
1407  if((entry_ptr)->ht_next) \
1408  (entry_ptr)->ht_next->ht_prev = (entry_ptr)->ht_prev; \
1409  HDassert((entry_ptr)->ht_prev != NULL); \
1410  (entry_ptr)->ht_prev->ht_next = (entry_ptr)->ht_next; \
1411  ((cache_ptr)->index)[k]->ht_prev = (entry_ptr); \
1412  (entry_ptr)->ht_next = ((cache_ptr)->index)[k]; \
1413  (entry_ptr)->ht_prev = NULL; \
1414  ((cache_ptr)->index)[k] = (entry_ptr); \
1415  H5C__POST_HT_SHIFT_TO_FRONT(cache_ptr, entry_ptr, k, fail_val) \
1416  } \
1417  break; \
1418  } \
1419  (entry_ptr) = (entry_ptr)->ht_next; \
1420  (depth)++; \
1421  } \
1422  H5C__UPDATE_STATS_FOR_HT_SEARCH(cache_ptr, (entry_ptr != NULL), depth) \
1423 }
1424 
1425 #define H5C__SEARCH_INDEX_NO_STATS(cache_ptr, Addr, entry_ptr, fail_val) \
1426 { \
1427  int k; \
1428  H5C__PRE_HT_SEARCH_SC(cache_ptr, Addr, fail_val) \
1429  k = H5C__HASH_FCN(Addr); \
1430  entry_ptr = ((cache_ptr)->index)[k]; \
1431  while(entry_ptr) { \
1432  if(H5F_addr_eq(Addr, (entry_ptr)->addr)) { \
1433  H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, k, fail_val) \
1434  if(entry_ptr != ((cache_ptr)->index)[k]) { \
1435  if((entry_ptr)->ht_next) \
1436  (entry_ptr)->ht_next->ht_prev = (entry_ptr)->ht_prev; \
1437  HDassert((entry_ptr)->ht_prev != NULL); \
1438  (entry_ptr)->ht_prev->ht_next = (entry_ptr)->ht_next; \
1439  ((cache_ptr)->index)[k]->ht_prev = (entry_ptr); \
1440  (entry_ptr)->ht_next = ((cache_ptr)->index)[k]; \
1441  (entry_ptr)->ht_prev = NULL; \
1442  ((cache_ptr)->index)[k] = (entry_ptr); \
1443  H5C__POST_HT_SHIFT_TO_FRONT(cache_ptr, entry_ptr, k, fail_val) \
1444  } \
1445  break; \
1446  } \
1447  (entry_ptr) = (entry_ptr)->ht_next; \
1448  } \
1449 }
1450 
1451 #define H5C__UPDATE_INDEX_FOR_ENTRY_CLEAN(cache_ptr, entry_ptr) \
1452 { \
1453  H5C__PRE_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr); \
1454  (cache_ptr)->dirty_index_size -= (entry_ptr)->size; \
1455  ((cache_ptr)->dirty_index_ring_size[entry_ptr->ring]) \
1456  -= (entry_ptr)->size; \
1457  (cache_ptr)->clean_index_size += (entry_ptr)->size; \
1458  ((cache_ptr)->clean_index_ring_size[entry_ptr->ring]) \
1459  += (entry_ptr)->size; \
1460  H5C__POST_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr); \
1461 }
1462 
1463 #define H5C__UPDATE_INDEX_FOR_ENTRY_DIRTY(cache_ptr, entry_ptr) \
1464 { \
1465  H5C__PRE_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr); \
1466  (cache_ptr)->clean_index_size -= (entry_ptr)->size; \
1467  ((cache_ptr)->clean_index_ring_size[entry_ptr->ring]) \
1468  -= (entry_ptr)->size; \
1469  (cache_ptr)->dirty_index_size += (entry_ptr)->size; \
1470  ((cache_ptr)->dirty_index_ring_size[entry_ptr->ring]) \
1471  += (entry_ptr)->size; \
1472  H5C__POST_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr); \
1473 }
1474 
1475 #define H5C__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, old_size, new_size, \
1476  entry_ptr, was_clean) \
1477 { \
1478  H5C__PRE_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \
1479  entry_ptr, was_clean) \
1480  (cache_ptr)->index_size -= (old_size); \
1481  (cache_ptr)->index_size += (new_size); \
1482  ((cache_ptr)->index_ring_size[entry_ptr->ring]) -= (old_size); \
1483  ((cache_ptr)->index_ring_size[entry_ptr->ring]) += (new_size); \
1484  if(was_clean) { \
1485  (cache_ptr)->clean_index_size -= (old_size); \
1486  ((cache_ptr)->clean_index_ring_size[entry_ptr->ring])-= (old_size); \
1487  } else { \
1488  (cache_ptr)->dirty_index_size -= (old_size); \
1489  ((cache_ptr)->dirty_index_ring_size[entry_ptr->ring])-= (old_size); \
1490  } \
1491  if((entry_ptr)->is_dirty) { \
1492  (cache_ptr)->dirty_index_size += (new_size); \
1493  ((cache_ptr)->dirty_index_ring_size[entry_ptr->ring])+= (new_size); \
1494  } else { \
1495  (cache_ptr)->clean_index_size += (new_size); \
1496  ((cache_ptr)->clean_index_ring_size[entry_ptr->ring])+= (new_size); \
1497  } \
1498  H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->il_len, \
1499  (cache_ptr)->il_size, \
1500  (old_size), (new_size)) \
1501  H5C__POST_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \
1502  entry_ptr) \
1503 }
1504 
1505 
1506 /**************************************************************************
1507  *
1508  * Skip list insertion and deletion macros:
1509  *
1510  * These used to be functions, but I converted them to macros to avoid some
1511  * function call overhead.
1512  *
1513  **************************************************************************/
1514 
1515 /*-------------------------------------------------------------------------
1516  *
1517  * Macro: H5C__INSERT_ENTRY_IN_SLIST
1518  *
1519  * Purpose: Insert the specified instance of H5C_cache_entry_t into
1520  * the skip list in the specified instance of H5C_t. Update
1521  * the associated length and size fields.
1522  *
1523  * Return: N/A
1524  *
1525  * Programmer: John Mainzer, 5/10/04
1526  *
1527  * Modifications:
1528  *
1529  * JRM -- 7/21/04
1530  * Updated function to set the in_tree flag when inserting
1531  * an entry into the tree. Also modified the function to
1532  * update the tree size and len fields instead of the similar
1533  * index fields.
1534  *
1535  * All of this is part of the modifications to support the
1536  * hash table.
1537  *
1538  * JRM -- 7/27/04
1539  * Converted the function H5C_insert_entry_in_tree() into
1540  * the macro H5C__INSERT_ENTRY_IN_TREE in the hopes of
1541  * wringing a little more speed out of the cache.
1542  *
1543  * Note that we don't bother to check if the entry is already
1544  * in the tree -- if it is, H5SL_insert() will fail.
1545  *
1546  * QAK -- 11/27/04
1547  * Switched over to using skip list routines.
1548  *
1549  * JRM -- 6/27/06
1550  * Added fail_val parameter.
1551  *
1552  * JRM -- 8/25/06
1553  * Added the H5C_DO_SANITY_CHECKS version of the macro.
1554  *
1555  * This version maintains the slist_len_increase and
1556  * slist_size_increase fields that are used in sanity
1557  * checks in the flush routines.
1558  *
1559  * All this is needed as the fractal heap needs to be
1560  * able to dirty, resize and/or move entries during the
1561  * flush.
1562  *
1563  * JRM -- 12/13/14
1564  * Added code to set cache_ptr->slist_changed to TRUE
1565  * when an entry is inserted in the slist.
1566  *
1567  * JRM -- 9/1/15
1568  * Added code to maintain the cache_ptr->slist_ring_len
1569  * and cache_ptr->slist_ring_size arrays.
1570  *
1571  *-------------------------------------------------------------------------
1572  */
1573 
1574 #if H5C_DO_SLIST_SANITY_CHECKS
1575 #define ENTRY_IN_SLIST(cache_ptr, entry_ptr) \
1576  H5C_entry_in_skip_list((cache_ptr), (entry_ptr))
1577 #else /* H5C_DO_SLIST_SANITY_CHECKS */
1578 #define ENTRY_IN_SLIST(cache_ptr, entry_ptr) FALSE
1579 #endif /* H5C_DO_SLIST_SANITY_CHECKS */
1580 
1581 #if H5C_DO_SANITY_CHECKS
1582 
1583 #define H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, fail_val) \
1584 { \
1585  HDassert( (cache_ptr) ); \
1586  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
1587  HDassert( (entry_ptr) ); \
1588  HDassert( (entry_ptr)->size > 0 ); \
1589  HDassert( H5F_addr_defined((entry_ptr)->addr) ); \
1590  HDassert( !((entry_ptr)->in_slist) ); \
1591  HDassert( !ENTRY_IN_SLIST((cache_ptr), (entry_ptr)) ); \
1592  HDassert( (entry_ptr)->ring > H5C_RING_UNDEFINED ); \
1593  HDassert( (entry_ptr)->ring < H5C_RING_NTYPES ); \
1594  HDassert( (cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \
1595  (cache_ptr)->slist_len ); \
1596  HDassert( (cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \
1597  (cache_ptr)->slist_size ); \
1598  \
1599  if(H5SL_insert((cache_ptr)->slist_ptr, entry_ptr, &(entry_ptr)->addr) < 0) \
1600  HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, (fail_val), "can't insert entry in skip list") \
1601  \
1602  (entry_ptr)->in_slist = TRUE; \
1603  (cache_ptr)->slist_changed = TRUE; \
1604  (cache_ptr)->slist_len++; \
1605  (cache_ptr)->slist_size += (entry_ptr)->size; \
1606  ((cache_ptr)->slist_ring_len[(entry_ptr)->ring])++; \
1607  ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) += (entry_ptr)->size; \
1608  (cache_ptr)->slist_len_increase++; \
1609  (cache_ptr)->slist_size_increase += (int64_t)((entry_ptr)->size); \
1610  \
1611  HDassert( (cache_ptr)->slist_len > 0 ); \
1612  HDassert( (cache_ptr)->slist_size > 0 ); \
1613  \
1614 } /* H5C__INSERT_ENTRY_IN_SLIST */
1615 
1616 #else /* H5C_DO_SANITY_CHECKS */
1617 
1618 #define H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, fail_val) \
1619 { \
1620  HDassert( (cache_ptr) ); \
1621  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
1622  HDassert( (entry_ptr) ); \
1623  HDassert( (entry_ptr)->size > 0 ); \
1624  HDassert( H5F_addr_defined((entry_ptr)->addr) ); \
1625  HDassert( !((entry_ptr)->in_slist) ); \
1626  HDassert( !ENTRY_IN_SLIST((cache_ptr), (entry_ptr)) ); \
1627  HDassert( (entry_ptr)->ring > H5C_RING_UNDEFINED ); \
1628  HDassert( (entry_ptr)->ring < H5C_RING_NTYPES ); \
1629  HDassert( (cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \
1630  (cache_ptr)->slist_len ); \
1631  HDassert( (cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \
1632  (cache_ptr)->slist_size ); \
1633  \
1634  if(H5SL_insert((cache_ptr)->slist_ptr, entry_ptr, &(entry_ptr)->addr) < 0) \
1635  HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, (fail_val), "can't insert entry in skip list") \
1636  \
1637  (entry_ptr)->in_slist = TRUE; \
1638  (cache_ptr)->slist_changed = TRUE; \
1639  (cache_ptr)->slist_len++; \
1640  (cache_ptr)->slist_size += (entry_ptr)->size; \
1641  ((cache_ptr)->slist_ring_len[(entry_ptr)->ring])++; \
1642  ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) += (entry_ptr)->size; \
1643  \
1644  HDassert( (cache_ptr)->slist_len > 0 ); \
1645  HDassert( (cache_ptr)->slist_size > 0 ); \
1646  \
1647 } /* H5C__INSERT_ENTRY_IN_SLIST */
1648 
1649 #endif /* H5C_DO_SANITY_CHECKS */
1650 
1651 
1652 /*-------------------------------------------------------------------------
1653  *
1654  * Function: H5C__REMOVE_ENTRY_FROM_SLIST
1655  *
1656  * Purpose: Remove the specified instance of H5C_cache_entry_t from the
1657  * index skip list in the specified instance of H5C_t. Update
1658  * the associated length and size fields.
1659  *
1660  * Return: N/A
1661  *
1662  * Programmer: John Mainzer, 5/10/04
1663  *
1664  *-------------------------------------------------------------------------
1665  */
1666 
1667 #if H5C_DO_SANITY_CHECKS
1668 #define H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush) \
1669 { \
1670  HDassert( (cache_ptr) ); \
1671  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
1672  HDassert( (entry_ptr) ); \
1673  HDassert( !((entry_ptr)->is_read_only) ); \
1674  HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
1675  HDassert( (entry_ptr)->size > 0 ); \
1676  HDassert( (entry_ptr)->in_slist ); \
1677  HDassert( (cache_ptr)->slist_ptr ); \
1678  HDassert( (entry_ptr)->ring > H5C_RING_UNDEFINED ); \
1679  HDassert( (entry_ptr)->ring < H5C_RING_NTYPES ); \
1680  HDassert( (cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \
1681  (cache_ptr)->slist_len ); \
1682  HDassert( (cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \
1683  (cache_ptr)->slist_size ); \
1684  \
1685  if ( H5SL_remove((cache_ptr)->slist_ptr, &(entry_ptr)->addr) \
1686  != (entry_ptr) ) \
1687  HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "can't delete entry from skip list") \
1688  \
1689  HDassert( (cache_ptr)->slist_len > 0 ); \
1690  if(!(during_flush)) \
1691  (cache_ptr)->slist_changed = TRUE; \
1692  (cache_ptr)->slist_len--; \
1693  HDassert( (cache_ptr)->slist_size >= (entry_ptr)->size ); \
1694  (cache_ptr)->slist_size -= (entry_ptr)->size; \
1695  ((cache_ptr)->slist_ring_len[(entry_ptr)->ring])--; \
1696  HDassert( (cache_ptr)->slist_ring_size[(entry_ptr->ring)] >= \
1697  (entry_ptr)->size ); \
1698  ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) -= (entry_ptr)->size; \
1699  (cache_ptr)->slist_len_increase--; \
1700  (cache_ptr)->slist_size_increase -= (int64_t)((entry_ptr)->size); \
1701  (entry_ptr)->in_slist = FALSE; \
1702 } /* H5C__REMOVE_ENTRY_FROM_SLIST */
1703 
1704 #else /* H5C_DO_SANITY_CHECKS */
1705 
1706 #define H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush) \
1707 { \
1708  HDassert( (cache_ptr) ); \
1709  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
1710  HDassert( (entry_ptr) ); \
1711  HDassert( !((entry_ptr)->is_read_only) ); \
1712  HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
1713  HDassert( (entry_ptr)->in_slist ); \
1714  HDassert( (cache_ptr)->slist_ptr ); \
1715  HDassert( (entry_ptr)->ring > H5C_RING_UNDEFINED ); \
1716  HDassert( (entry_ptr)->ring < H5C_RING_NTYPES ); \
1717  HDassert( (cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \
1718  (cache_ptr)->slist_len ); \
1719  HDassert( (cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \
1720  (cache_ptr)->slist_size ); \
1721  \
1722  if ( H5SL_remove((cache_ptr)->slist_ptr, &(entry_ptr)->addr) \
1723  != (entry_ptr) ) \
1724  HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "can't delete entry from skip list") \
1725  \
1726  HDassert( (cache_ptr)->slist_len > 0 ); \
1727  if(!(during_flush)) \
1728  (cache_ptr)->slist_changed = TRUE; \
1729  (cache_ptr)->slist_len--; \
1730  HDassert( (cache_ptr)->slist_size >= (entry_ptr)->size ); \
1731  (cache_ptr)->slist_size -= (entry_ptr)->size; \
1732  ((cache_ptr)->slist_ring_len[(entry_ptr)->ring])--; \
1733  HDassert( (cache_ptr)->slist_ring_size[(entry_ptr->ring)] >= \
1734  (entry_ptr)->size ); \
1735  ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) -= (entry_ptr)->size; \
1736  (entry_ptr)->in_slist = FALSE; \
1737 } /* H5C__REMOVE_ENTRY_FROM_SLIST */
1738 #endif /* H5C_DO_SANITY_CHECKS */
1739 
1740 
1741 /*-------------------------------------------------------------------------
1742  *
1743  * Function: H5C__UPDATE_SLIST_FOR_SIZE_CHANGE
1744  *
1745  * Purpose: Update cache_ptr->slist_size for a change in the size of
1746  * and entry in the slist.
1747  *
1748  * Return: N/A
1749  *
1750  * Programmer: John Mainzer, 9/07/05
1751  *
1752  * Modifications:
1753  *
1754  * JRM -- 8/27/06
1755  * Added the H5C_DO_SANITY_CHECKS version of the macro.
1756  *
1757  * This version maintains the slist_size_increase field
1758  * that are used in sanity checks in the flush routines.
1759  *
1760  * All this is needed as the fractal heap needs to be
1761  * able to dirty, resize and/or move entries during the
1762  * flush.
1763  *
1764  * JRM -- 12/13/14
1765  * Note that we do not set cache_ptr->slist_changed to TRUE
1766  * in this case, as the structure of the slist is not
1767  * modified.
1768  *
1769  * JRM -- 9/1/15
1770  * Added code to maintain the cache_ptr->slist_ring_len
1771  * and cache_ptr->slist_ring_size arrays.
1772  *
1773  *-------------------------------------------------------------------------
1774  */
1775 
1776 #if H5C_DO_SANITY_CHECKS
1777 
1778 #define H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, old_size, new_size) \
1779 { \
1780  HDassert( (cache_ptr) ); \
1781  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
1782  HDassert( (old_size) > 0 ); \
1783  HDassert( (new_size) > 0 ); \
1784  HDassert( (old_size) <= (cache_ptr)->slist_size ); \
1785  HDassert( (cache_ptr)->slist_len > 0 ); \
1786  HDassert( ((cache_ptr)->slist_len > 1) || \
1787  ( (cache_ptr)->slist_size == (old_size) ) ); \
1788  HDassert( (entry_ptr)->ring > H5C_RING_UNDEFINED ); \
1789  HDassert( (entry_ptr)->ring < H5C_RING_NTYPES ); \
1790  HDassert( (cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \
1791  (cache_ptr)->slist_len ); \
1792  HDassert( (cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \
1793  (cache_ptr)->slist_size ); \
1794  \
1795  (cache_ptr)->slist_size -= (old_size); \
1796  (cache_ptr)->slist_size += (new_size); \
1797  \
1798  HDassert( (cache_ptr)->slist_ring_size[(entry_ptr->ring)] >=(old_size) ); \
1799  ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) -= (old_size); \
1800  ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) += (new_size); \
1801  \
1802  (cache_ptr)->slist_size_increase -= (int64_t)(old_size); \
1803  (cache_ptr)->slist_size_increase += (int64_t)(new_size); \
1804  \
1805  HDassert( (new_size) <= (cache_ptr)->slist_size ); \
1806  HDassert( ( (cache_ptr)->slist_len > 1 ) || \
1807  ( (cache_ptr)->slist_size == (new_size) ) ); \
1808 } /* H5C__UPDATE_SLIST_FOR_SIZE_CHANGE */
1809 
1810 #else /* H5C_DO_SANITY_CHECKS */
1811 
1812 #define H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, old_size, new_size) \
1813 { \
1814  HDassert( (cache_ptr) ); \
1815  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
1816  HDassert( (old_size) > 0 ); \
1817  HDassert( (new_size) > 0 ); \
1818  HDassert( (old_size) <= (cache_ptr)->slist_size ); \
1819  HDassert( (cache_ptr)->slist_len > 0 ); \
1820  HDassert( ((cache_ptr)->slist_len > 1) || \
1821  ( (cache_ptr)->slist_size == (old_size) ) ); \
1822  HDassert( (entry_ptr)->ring > H5C_RING_UNDEFINED ); \
1823  HDassert( (entry_ptr)->ring < H5C_RING_NTYPES ); \
1824  HDassert( (cache_ptr)->slist_ring_len[(entry_ptr)->ring] <= \
1825  (cache_ptr)->slist_len ); \
1826  HDassert( (cache_ptr)->slist_ring_size[(entry_ptr)->ring] <= \
1827  (cache_ptr)->slist_size ); \
1828  \
1829  (cache_ptr)->slist_size -= (old_size); \
1830  (cache_ptr)->slist_size += (new_size); \
1831  \
1832  HDassert( (cache_ptr)->slist_ring_size[(entry_ptr->ring)] >=(old_size) ); \
1833  ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) -= (old_size); \
1834  ((cache_ptr)->slist_ring_size[(entry_ptr)->ring]) += (new_size); \
1835  \
1836  HDassert( (new_size) <= (cache_ptr)->slist_size ); \
1837  HDassert( ( (cache_ptr)->slist_len > 1 ) || \
1838  ( (cache_ptr)->slist_size == (new_size) ) ); \
1839 } /* H5C__UPDATE_SLIST_FOR_SIZE_CHANGE */
1840 
1841 #endif /* H5C_DO_SANITY_CHECKS */
1842 
1843 
1844 /**************************************************************************
1845  *
1846  * Replacement policy update macros:
1847  *
1848  * These used to be functions, but I converted them to macros to avoid some
1849  * function call overhead.
1850  *
1851  **************************************************************************/
1852 
1853 /*-------------------------------------------------------------------------
1854  *
1855  * Macro: H5C__FAKE_RP_FOR_MOST_RECENT_ACCESS
1856  *
1857  * Purpose: For efficiency, we sometimes change the order of flushes --
1858  * but doing so can confuse the replacement policy. This
1859  * macro exists to allow us to specify an entry as the
1860  * most recently touched so we can repair any such
1861  * confusion.
1862  *
1863  * At present, we only support the modified LRU policy, so
1864  * this function deals with that case unconditionally. If
1865  * we ever support other replacement policies, the macro
1866  * should switch on the current policy and act accordingly.
1867  *
1868  * Return: N/A
1869  *
1870  * Programmer: John Mainzer, 10/13/05
1871  *
1872  * Modifications:
1873  *
1874  * JRM -- 3/20/06
1875  * Modified macro to ignore pinned entries. Pinned entries
1876  * do not appear in the data structures maintained by the
1877  * replacement policy code, and thus this macro has nothing
1878  * to do if called for such an entry.
1879  *
1880  * JRM -- 3/28/07
1881  * Added sanity checks using the new is_read_only and
1882  * ro_ref_count fields of struct H5C_cache_entry_t.
1883  *
1884  *-------------------------------------------------------------------------
1885  */
1886 
1887 #if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
1888 
1889 #define H5C__FAKE_RP_FOR_MOST_RECENT_ACCESS(cache_ptr, entry_ptr, fail_val) \
1890 { \
1891  HDassert( (cache_ptr) ); \
1892  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
1893  HDassert( (entry_ptr) ); \
1894  HDassert( !((entry_ptr)->is_protected) ); \
1895  HDassert( !((entry_ptr)->is_read_only) ); \
1896  HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
1897  HDassert( (entry_ptr)->size > 0 ); \
1898  \
1899  if ( ! ((entry_ptr)->is_pinned) ) { \
1900  \
1901  /* modified LRU specific code */ \
1902  \
1903  /* remove the entry from the LRU list, and re-insert it at the head.\
1904  */ \
1905  \
1906  H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \
1907  (cache_ptr)->LRU_tail_ptr, \
1908  (cache_ptr)->LRU_list_len, \
1909  (cache_ptr)->LRU_list_size, (fail_val)) \
1910  \
1911  H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
1912  (cache_ptr)->LRU_tail_ptr, \
1913  (cache_ptr)->LRU_list_len, \
1914  (cache_ptr)->LRU_list_size, (fail_val)) \
1915  \
1916  /* Use the dirty flag to infer whether the entry is on the clean or \
1917  * dirty LRU list, and remove it. Then insert it at the head of \
1918  * the same LRU list. \
1919  * \
1920  * At least initially, all entries should be clean. That may \
1921  * change, so we may as well deal with both cases now. \
1922  */ \
1923  \
1924  if ( (entry_ptr)->is_dirty ) { \
1925  H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->dLRU_head_ptr, \
1926  (cache_ptr)->dLRU_tail_ptr, \
1927  (cache_ptr)->dLRU_list_len, \
1928  (cache_ptr)->dLRU_list_size, (fail_val)) \
1929  \
1930  H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->dLRU_head_ptr, \
1931  (cache_ptr)->dLRU_tail_ptr, \
1932  (cache_ptr)->dLRU_list_len, \
1933  (cache_ptr)->dLRU_list_size, (fail_val)) \
1934  } else { \
1935  H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->cLRU_head_ptr, \
1936  (cache_ptr)->cLRU_tail_ptr, \
1937  (cache_ptr)->cLRU_list_len, \
1938  (cache_ptr)->cLRU_list_size, (fail_val)) \
1939  \
1940  H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \
1941  (cache_ptr)->cLRU_tail_ptr, \
1942  (cache_ptr)->cLRU_list_len, \
1943  (cache_ptr)->cLRU_list_size, (fail_val)) \
1944  } \
1945  \
1946  /* End modified LRU specific code. */ \
1947  } \
1948 } /* H5C__FAKE_RP_FOR_MOST_RECENT_ACCESS */
1949 
1950 #else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
1951 
1952 #define H5C__FAKE_RP_FOR_MOST_RECENT_ACCESS(cache_ptr, entry_ptr, fail_val) \
1953 { \
1954  HDassert( (cache_ptr) ); \
1955  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
1956  HDassert( (entry_ptr) ); \
1957  HDassert( !((entry_ptr)->is_protected) ); \
1958  HDassert( !((entry_ptr)->is_read_only) ); \
1959  HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
1960  HDassert( (entry_ptr)->size > 0 ); \
1961  \
1962  if ( ! ((entry_ptr)->is_pinned) ) { \
1963  \
1964  /* modified LRU specific code */ \
1965  \
1966  /* remove the entry from the LRU list, and re-insert it at the head \
1967  */ \
1968  \
1969  H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \
1970  (cache_ptr)->LRU_tail_ptr, \
1971  (cache_ptr)->LRU_list_len, \
1972  (cache_ptr)->LRU_list_size, (fail_val)) \
1973  \
1974  H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
1975  (cache_ptr)->LRU_tail_ptr, \
1976  (cache_ptr)->LRU_list_len, \
1977  (cache_ptr)->LRU_list_size, (fail_val)) \
1978  \
1979  /* End modified LRU specific code. */ \
1980  } \
1981 } /* H5C__FAKE_RP_FOR_MOST_RECENT_ACCESS */
1982 
1983 #endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
1984 
1985 
1986 /*-------------------------------------------------------------------------
1987  *
1988  * Macro: H5C__UPDATE_RP_FOR_EVICTION
1989  *
1990  * Purpose: Update the replacement policy data structures for an
1991  * eviction of the specified cache entry.
1992  *
1993  * At present, we only support the modified LRU policy, so
1994  * this function deals with that case unconditionally. If
1995  * we ever support other replacement policies, the function
1996  * should switch on the current policy and act accordingly.
1997  *
1998  * Return: Non-negative on success/Negative on failure.
1999  *
2000  * Programmer: John Mainzer, 5/10/04
2001  *
2002  * Modifications:
2003  *
2004  * JRM - 7/27/04
2005  * Converted the function H5C_update_rp_for_eviction() to the
2006  * macro H5C__UPDATE_RP_FOR_EVICTION in an effort to squeeze
2007  * a bit more performance out of the cache.
2008  *
2009  * At least for the first cut, I am leaving the comments and
2010  * white space in the macro. If they cause difficulties with
2011  * the pre-processor, I'll have to remove them.
2012  *
2013  * JRM - 7/28/04
2014  * Split macro into two version, one supporting the clean and
2015  * dirty LRU lists, and the other not. Yet another attempt
2016  * at optimization.
2017  *
2018  * JRM - 3/20/06
2019  * Pinned entries can't be evicted, so this entry should never
2020  * be called on a pinned entry. Added assert to verify this.
2021  *
2022  * JRM -- 3/28/07
2023  * Added sanity checks for the new is_read_only and
2024  * ro_ref_count fields of struct H5C_cache_entry_t.
2025  *
2026  *-------------------------------------------------------------------------
2027  */
2028 
2029 #if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
2030 
2031 #define H5C__UPDATE_RP_FOR_EVICTION(cache_ptr, entry_ptr, fail_val) \
2032 { \
2033  HDassert( (cache_ptr) ); \
2034  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
2035  HDassert( (entry_ptr) ); \
2036  HDassert( !((entry_ptr)->is_protected) ); \
2037  HDassert( !((entry_ptr)->is_read_only) ); \
2038  HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
2039  HDassert( !((entry_ptr)->is_pinned) ); \
2040  HDassert( (entry_ptr)->size > 0 ); \
2041  \
2042  /* modified LRU specific code */ \
2043  \
2044  /* remove the entry from the LRU list. */ \
2045  \
2046  H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \
2047  (cache_ptr)->LRU_tail_ptr, (cache_ptr)->LRU_list_len, \
2048  (cache_ptr)->LRU_list_size, (fail_val)) \
2049  \
2050  /* If the entry is clean when it is evicted, it should be on the \
2051  * clean LRU list, if it was dirty, it should be on the dirty LRU list. \
2052  * Remove it from the appropriate list according to the value of the \
2053  * dirty flag. \
2054  */ \
2055  \
2056  if ( (entry_ptr)->is_dirty ) { \
2057  \
2058  H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->dLRU_head_ptr, \
2059  (cache_ptr)->dLRU_tail_ptr, \
2060  (cache_ptr)->dLRU_list_len, \
2061  (cache_ptr)->dLRU_list_size, (fail_val)) \
2062  } else { \
2063  H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->cLRU_head_ptr, \
2064  (cache_ptr)->cLRU_tail_ptr, \
2065  (cache_ptr)->cLRU_list_len, \
2066  (cache_ptr)->cLRU_list_size, (fail_val)) \
2067  } \
2068  \
2069 } /* H5C__UPDATE_RP_FOR_EVICTION */
2070 
2071 #else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
2072 
2073 #define H5C__UPDATE_RP_FOR_EVICTION(cache_ptr, entry_ptr, fail_val) \
2074 { \
2075  HDassert( (cache_ptr) ); \
2076  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
2077  HDassert( (entry_ptr) ); \
2078  HDassert( !((entry_ptr)->is_protected) ); \
2079  HDassert( !((entry_ptr)->is_read_only) ); \
2080  HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
2081  HDassert( !((entry_ptr)->is_pinned) ); \
2082  HDassert( (entry_ptr)->size > 0 ); \
2083  \
2084  /* modified LRU specific code */ \
2085  \
2086  /* remove the entry from the LRU list. */ \
2087  \
2088  H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \
2089  (cache_ptr)->LRU_tail_ptr, (cache_ptr)->LRU_list_len, \
2090  (cache_ptr)->LRU_list_size, (fail_val)) \
2091  \
2092 } /* H5C__UPDATE_RP_FOR_EVICTION */
2093 
2094 #endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
2095 
2096 
2097 /*-------------------------------------------------------------------------
2098  *
2099  * Macro: H5C__UPDATE_RP_FOR_FLUSH
2100  *
2101  * Purpose: Update the replacement policy data structures for a flush
2102  * of the specified cache entry.
2103  *
2104  * At present, we only support the modified LRU policy, so
2105  * this function deals with that case unconditionally. If
2106  * we ever support other replacement policies, the function
2107  * should switch on the current policy and act accordingly.
2108  *
2109  * Return: N/A
2110  *
2111  * Programmer: John Mainzer, 5/6/04
2112  *
2113  * Modifications:
2114  *
2115  * JRM - 7/27/04
2116  * Converted the function H5C_update_rp_for_flush() to the
2117  * macro H5C__UPDATE_RP_FOR_FLUSH in an effort to squeeze
2118  * a bit more performance out of the cache.
2119  *
2120  * At least for the first cut, I am leaving the comments and
2121  * white space in the macro. If they cause difficulties with
2122  * pre-processor, I'll have to remove them.
2123  *
2124  * JRM - 7/28/04
2125  * Split macro into two versions, one supporting the clean and
2126  * dirty LRU lists, and the other not. Yet another attempt
2127  * at optimization.
2128  *
2129  * JRM - 3/20/06
2130  * While pinned entries can be flushed, they don't reside in
2131  * the replacement policy data structures when unprotected.
2132  * Thus I modified this macro to do nothing if the entry is
2133  * pinned.
2134  *
2135  * JRM - 3/28/07
2136  * Added sanity checks based on the new is_read_only and
2137  * ro_ref_count fields of struct H5C_cache_entry_t.
2138  *
2139  *-------------------------------------------------------------------------
2140  */
2141 
2142 #if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
2143 
2144 #define H5C__UPDATE_RP_FOR_FLUSH(cache_ptr, entry_ptr, fail_val) \
2145 { \
2146  HDassert( (cache_ptr) ); \
2147  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
2148  HDassert( (entry_ptr) ); \
2149  HDassert( !((entry_ptr)->is_protected) ); \
2150  HDassert( !((entry_ptr)->is_read_only) ); \
2151  HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
2152  HDassert( (entry_ptr)->size > 0 ); \
2153  \
2154  if ( ! ((entry_ptr)->is_pinned) ) { \
2155  \
2156  /* modified LRU specific code */ \
2157  \
2158  /* remove the entry from the LRU list, and re-insert it at the \
2159  * head. \
2160  */ \
2161  \
2162  H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \
2163  (cache_ptr)->LRU_tail_ptr, \
2164  (cache_ptr)->LRU_list_len, \
2165  (cache_ptr)->LRU_list_size, (fail_val)) \
2166  \
2167  H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
2168  (cache_ptr)->LRU_tail_ptr, \
2169  (cache_ptr)->LRU_list_len, \
2170  (cache_ptr)->LRU_list_size, (fail_val)) \
2171  \
2172  /* since the entry is being flushed or cleared, one would think \
2173  * that it must be dirty -- but that need not be the case. Use the \
2174  * dirty flag to infer whether the entry is on the clean or dirty \
2175  * LRU list, and remove it. Then insert it at the head of the \
2176  * clean LRU list. \
2177  * \
2178  * The function presumes that a dirty entry will be either cleared \
2179  * or flushed shortly, so it is OK if we put a dirty entry on the \
2180  * clean LRU list. \
2181  */ \
2182  \
2183  if ( (entry_ptr)->is_dirty ) { \
2184  H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->dLRU_head_ptr, \
2185  (cache_ptr)->dLRU_tail_ptr, \
2186  (cache_ptr)->dLRU_list_len, \
2187  (cache_ptr)->dLRU_list_size, (fail_val)) \
2188  } else { \
2189  H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->cLRU_head_ptr, \
2190  (cache_ptr)->cLRU_tail_ptr, \
2191  (cache_ptr)->cLRU_list_len, \
2192  (cache_ptr)->cLRU_list_size, (fail_val)) \
2193  } \
2194  \
2195  H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \
2196  (cache_ptr)->cLRU_tail_ptr, \
2197  (cache_ptr)->cLRU_list_len, \
2198  (cache_ptr)->cLRU_list_size, (fail_val)) \
2199  \
2200  /* End modified LRU specific code. */ \
2201  } \
2202 } /* H5C__UPDATE_RP_FOR_FLUSH */
2203 
2204 #else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
2205 
2206 #define H5C__UPDATE_RP_FOR_FLUSH(cache_ptr, entry_ptr, fail_val) \
2207 { \
2208  HDassert( (cache_ptr) ); \
2209  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
2210  HDassert( (entry_ptr) ); \
2211  HDassert( !((entry_ptr)->is_protected) ); \
2212  HDassert( !((entry_ptr)->is_read_only) ); \
2213  HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
2214  HDassert( (entry_ptr)->size > 0 ); \
2215  \
2216  if ( ! ((entry_ptr)->is_pinned) ) { \
2217  \
2218  /* modified LRU specific code */ \
2219  \
2220  /* remove the entry from the LRU list, and re-insert it at the \
2221  * head. \
2222  */ \
2223  \
2224  H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \
2225  (cache_ptr)->LRU_tail_ptr, \
2226  (cache_ptr)->LRU_list_len, \
2227  (cache_ptr)->LRU_list_size, (fail_val)) \
2228  \
2229  H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
2230  (cache_ptr)->LRU_tail_ptr, \
2231  (cache_ptr)->LRU_list_len, \
2232  (cache_ptr)->LRU_list_size, (fail_val)) \
2233  \
2234  /* End modified LRU specific code. */ \
2235  } \
2236 } /* H5C__UPDATE_RP_FOR_FLUSH */
2237 
2238 #endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
2239 
2240 
2241 /*-------------------------------------------------------------------------
2242  *
2243  * Macro: H5C__UPDATE_RP_FOR_INSERT_APPEND
2244  *
2245  * Purpose: Update the replacement policy data structures for an
2246  * insertion of the specified cache entry.
2247  *
2248  * Unlike H5C__UPDATE_RP_FOR_INSERTION below, mark the
2249  * new entry as the LEAST recently used entry, not the
2250  * most recently used.
2251  *
2252  * For now at least, this macro should only be used in
2253  * the reconstruction of the metadata cache from a cache
2254  * image block.
2255  *
2256  * At present, we only support the modified LRU policy, so
2257  * this function deals with that case unconditionally. If
2258  * we ever support other replacement policies, the function
2259  * should switch on the current policy and act accordingly.
2260  *
2261  * Return: N/A
2262  *
2263  * Programmer: John Mainzer, 8/15/15
2264  *
2265  *-------------------------------------------------------------------------
2266  */
2267 
2268 #if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
2269 
2270 #define H5C__UPDATE_RP_FOR_INSERT_APPEND(cache_ptr, entry_ptr, fail_val) \
2271 { \
2272  HDassert( (cache_ptr) ); \
2273  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
2274  HDassert( (entry_ptr) ); \
2275  HDassert( !((entry_ptr)->is_protected) ); \
2276  HDassert( !((entry_ptr)->is_read_only) ); \
2277  HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
2278  HDassert( (entry_ptr)->size > 0 ); \
2279  \
2280  if ( (entry_ptr)->is_pinned ) { \
2281  \
2282  H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->pel_head_ptr, \
2283  (cache_ptr)->pel_tail_ptr, \
2284  (cache_ptr)->pel_len, \
2285  (cache_ptr)->pel_size, (fail_val)) \
2286  \
2287  } else { \
2288  \
2289  /* modified LRU specific code */ \
2290  \
2291  /* insert the entry at the tail of the LRU list. */ \
2292  \
2293  H5C__DLL_APPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
2294  (cache_ptr)->LRU_tail_ptr, \
2295  (cache_ptr)->LRU_list_len, \
2296  (cache_ptr)->LRU_list_size, (fail_val)) \
2297  \
2298  /* insert the entry at the tail of the clean or dirty LRU list as \
2299  * appropriate. \
2300  */ \
2301  \
2302  if ( entry_ptr->is_dirty ) { \
2303  H5C__AUX_DLL_APPEND((entry_ptr), (cache_ptr)->dLRU_head_ptr, \
2304  (cache_ptr)->dLRU_tail_ptr, \
2305  (cache_ptr)->dLRU_list_len, \
2306  (cache_ptr)->dLRU_list_size, (fail_val)) \
2307  } else { \
2308  H5C__AUX_DLL_APPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \
2309  (cache_ptr)->cLRU_tail_ptr, \
2310  (cache_ptr)->cLRU_list_len, \
2311  (cache_ptr)->cLRU_list_size, (fail_val)) \
2312  } \
2313  \
2314  /* End modified LRU specific code. */ \
2315  } \
2316 }
2317 
2318 #else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
2319 
2320 #define H5C__UPDATE_RP_FOR_INSERT_APPEND(cache_ptr, entry_ptr, fail_val) \
2321 { \
2322  HDassert( (cache_ptr) ); \
2323  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
2324  HDassert( (entry_ptr) ); \
2325  HDassert( !((entry_ptr)->is_protected) ); \
2326  HDassert( !((entry_ptr)->is_read_only) ); \
2327  HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
2328  HDassert( (entry_ptr)->size > 0 ); \
2329  \
2330  if ( (entry_ptr)->is_pinned ) { \
2331  \
2332  H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->pel_head_ptr, \
2333  (cache_ptr)->pel_tail_ptr, \
2334  (cache_ptr)->pel_len, \
2335  (cache_ptr)->pel_size, (fail_val)) \
2336  \
2337  } else { \
2338  \
2339  /* modified LRU specific code */ \
2340  \
2341  /* insert the entry at the tail of the LRU list. */ \
2342  \
2343  H5C__DLL_APPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
2344  (cache_ptr)->LRU_tail_ptr, \
2345  (cache_ptr)->LRU_list_len, \
2346  (cache_ptr)->LRU_list_size, (fail_val)) \
2347  \
2348  /* End modified LRU specific code. */ \
2349  } \
2350 }
2351 
2352 #endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
2353 
2354 
2355 /*-------------------------------------------------------------------------
2356  *
2357  * Macro: H5C__UPDATE_RP_FOR_INSERTION
2358  *
2359  * Purpose: Update the replacement policy data structures for an
2360  * insertion of the specified cache entry.
2361  *
2362  * At present, we only support the modified LRU policy, so
2363  * this function deals with that case unconditionally. If
2364  * we ever support other replacement policies, the function
2365  * should switch on the current policy and act accordingly.
2366  *
2367  * Return: N/A
2368  *
2369  * Programmer: John Mainzer, 5/17/04
2370  *
2371  * Modifications:
2372  *
2373  * JRM - 7/27/04
2374  * Converted the function H5C_update_rp_for_insertion() to the
2375  * macro H5C__UPDATE_RP_FOR_INSERTION in an effort to squeeze
2376  * a bit more performance out of the cache.
2377  *
2378  * At least for the first cut, I am leaving the comments and
2379  * white space in the macro. If they cause difficulties with
2380  * pre-processor, I'll have to remove them.
2381  *
2382  * JRM - 7/28/04
2383  * Split macro into two version, one supporting the clean and
2384  * dirty LRU lists, and the other not. Yet another attempt
2385  * at optimization.
2386  *
2387  * JRM - 3/10/06
2388  * This macro should never be called on a pinned entry.
2389  * Inserted an assert to verify this.
2390  *
2391  * JRM - 8/9/06
2392  * Not any more. We must now allow insertion of pinned
2393  * entries. Updated macro to support this.
2394  *
2395  * JRM - 3/28/07
2396  * Added sanity checks using the new is_read_only and
2397  * ro_ref_count fields of struct H5C_cache_entry_t.
2398  *
2399  *-------------------------------------------------------------------------
2400  */
2401 
2402 #if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
2403 
2404 #define H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, entry_ptr, fail_val) \
2405 { \
2406  HDassert( (cache_ptr) ); \
2407  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
2408  HDassert( (entry_ptr) ); \
2409  HDassert( !((entry_ptr)->is_protected) ); \
2410  HDassert( !((entry_ptr)->is_read_only) ); \
2411  HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
2412  HDassert( (entry_ptr)->size > 0 ); \
2413  \
2414  if ( (entry_ptr)->is_pinned ) { \
2415  \
2416  H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->pel_head_ptr, \
2417  (cache_ptr)->pel_tail_ptr, \
2418  (cache_ptr)->pel_len, \
2419  (cache_ptr)->pel_size, (fail_val)) \
2420  \
2421  } else { \
2422  \
2423  /* modified LRU specific code */ \
2424  \
2425  /* insert the entry at the head of the LRU list. */ \
2426  \
2427  H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
2428  (cache_ptr)->LRU_tail_ptr, \
2429  (cache_ptr)->LRU_list_len, \
2430  (cache_ptr)->LRU_list_size, (fail_val)) \
2431  \
2432  /* insert the entry at the head of the clean or dirty LRU list as \
2433  * appropriate. \
2434  */ \
2435  \
2436  if ( entry_ptr->is_dirty ) { \
2437  H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->dLRU_head_ptr, \
2438  (cache_ptr)->dLRU_tail_ptr, \
2439  (cache_ptr)->dLRU_list_len, \
2440  (cache_ptr)->dLRU_list_size, (fail_val)) \
2441  } else { \
2442  H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \
2443  (cache_ptr)->cLRU_tail_ptr, \
2444  (cache_ptr)->cLRU_list_len, \
2445  (cache_ptr)->cLRU_list_size, (fail_val)) \
2446  } \
2447  \
2448  /* End modified LRU specific code. */ \
2449  } \
2450 }
2451 
2452 #else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
2453 
2454 #define H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, entry_ptr, fail_val) \
2455 { \
2456  HDassert( (cache_ptr) ); \
2457  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
2458  HDassert( (entry_ptr) ); \
2459  HDassert( !((entry_ptr)->is_protected) ); \
2460  HDassert( !((entry_ptr)->is_read_only) ); \
2461  HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
2462  HDassert( (entry_ptr)->size > 0 ); \
2463  \
2464  if ( (entry_ptr)->is_pinned ) { \
2465  \
2466  H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->pel_head_ptr, \
2467  (cache_ptr)->pel_tail_ptr, \
2468  (cache_ptr)->pel_len, \
2469  (cache_ptr)->pel_size, (fail_val)) \
2470  \
2471  } else { \
2472  \
2473  /* modified LRU specific code */ \
2474  \
2475  /* insert the entry at the head of the LRU list. */ \
2476  \
2477  H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
2478  (cache_ptr)->LRU_tail_ptr, \
2479  (cache_ptr)->LRU_list_len, \
2480  (cache_ptr)->LRU_list_size, (fail_val)) \
2481  \
2482  /* End modified LRU specific code. */ \
2483  } \
2484 }
2485 
2486 #endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
2487 
2488 
2489 /*-------------------------------------------------------------------------
2490  *
2491  * Macro: H5C__UPDATE_RP_FOR_PROTECT
2492  *
2493  * Purpose: Update the replacement policy data structures for a
2494  * protect of the specified cache entry.
2495  *
2496  * To do this, unlink the specified entry from any data
2497  * structures used by the replacement policy, and add the
2498  * entry to the protected list.
2499  *
2500  * At present, we only support the modified LRU policy, so
2501  * this function deals with that case unconditionally. If
2502  * we ever support other replacement policies, the function
2503  * should switch on the current policy and act accordingly.
2504  *
2505  * Return: N/A
2506  *
2507  * Programmer: John Mainzer, 5/17/04
2508  *
2509  * Modifications:
2510  *
2511  * JRM - 7/27/04
2512  * Converted the function H5C_update_rp_for_protect() to the
2513  * macro H5C__UPDATE_RP_FOR_PROTECT in an effort to squeeze
2514  * a bit more performance out of the cache.
2515  *
2516  * At least for the first cut, I am leaving the comments and
2517  * white space in the macro. If they cause difficulties with
2518  * pre-processor, I'll have to remove them.
2519  *
2520  * JRM - 7/28/04
2521  * Split macro into two version, one supporting the clean and
2522  * dirty LRU lists, and the other not. Yet another attempt
2523  * at optimization.
2524  *
2525  * JRM - 3/17/06
2526  * Modified macro to attempt to remove pinned entriese from
2527  * the pinned entry list instead of from the data structures
2528  * maintained by the replacement policy.
2529  *
2530  * JRM - 3/28/07
2531  * Added sanity checks based on the new is_read_only and
2532  * ro_ref_count fields of struct H5C_cache_entry_t.
2533  *
2534  *-------------------------------------------------------------------------
2535  */
2536 
2537 #if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
2538 
2539 #define H5C__UPDATE_RP_FOR_PROTECT(cache_ptr, entry_ptr, fail_val) \
2540 { \
2541  HDassert( (cache_ptr) ); \
2542  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
2543  HDassert( (entry_ptr) ); \
2544  HDassert( !((entry_ptr)->is_protected) ); \
2545  HDassert( !((entry_ptr)->is_read_only) ); \
2546  HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
2547  HDassert( (entry_ptr)->size > 0 ); \
2548  \
2549  if ( (entry_ptr)->is_pinned ) { \
2550  \
2551  H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pel_head_ptr, \
2552  (cache_ptr)->pel_tail_ptr, \
2553  (cache_ptr)->pel_len, \
2554  (cache_ptr)->pel_size, (fail_val)) \
2555  \
2556  } else { \
2557  \
2558  /* modified LRU specific code */ \
2559  \
2560  /* remove the entry from the LRU list. */ \
2561  \
2562  H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \
2563  (cache_ptr)->LRU_tail_ptr, \
2564  (cache_ptr)->LRU_list_len, \
2565  (cache_ptr)->LRU_list_size, (fail_val)) \
2566  \
2567  /* Similarly, remove the entry from the clean or dirty LRU list \
2568  * as appropriate. \
2569  */ \
2570  \
2571  if ( (entry_ptr)->is_dirty ) { \
2572  \
2573  H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->dLRU_head_ptr, \
2574  (cache_ptr)->dLRU_tail_ptr, \
2575  (cache_ptr)->dLRU_list_len, \
2576  (cache_ptr)->dLRU_list_size, (fail_val)) \
2577  \
2578  } else { \
2579  \
2580  H5C__AUX_DLL_REMOVE((entry_ptr), (cache_ptr)->cLRU_head_ptr, \
2581  (cache_ptr)->cLRU_tail_ptr, \
2582  (cache_ptr)->cLRU_list_len, \
2583  (cache_ptr)->cLRU_list_size, (fail_val)) \
2584  } \
2585  \
2586  /* End modified LRU specific code. */ \
2587  } \
2588  \
2589  /* Regardless of the replacement policy, or whether the entry is \
2590  * pinned, now add the entry to the protected list. \
2591  */ \
2592  \
2593  H5C__DLL_APPEND((entry_ptr), (cache_ptr)->pl_head_ptr, \
2594  (cache_ptr)->pl_tail_ptr, \
2595  (cache_ptr)->pl_len, \
2596  (cache_ptr)->pl_size, (fail_val)) \
2597 } /* H5C__UPDATE_RP_FOR_PROTECT */
2598 
2599 #else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
2600 
2601 #define H5C__UPDATE_RP_FOR_PROTECT(cache_ptr, entry_ptr, fail_val) \
2602 { \
2603  HDassert( (cache_ptr) ); \
2604  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
2605  HDassert( (entry_ptr) ); \
2606  HDassert( !((entry_ptr)->is_protected) ); \
2607  HDassert( !((entry_ptr)->is_read_only) ); \
2608  HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
2609  HDassert( (entry_ptr)->size > 0 ); \
2610  \
2611  if ( (entry_ptr)->is_pinned ) { \
2612  \
2613  H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pel_head_ptr, \
2614  (cache_ptr)->pel_tail_ptr, \
2615  (cache_ptr)->pel_len, \
2616  (cache_ptr)->pel_size, (fail_val)) \
2617  \
2618  } else { \
2619  \
2620  /* modified LRU specific code */ \
2621  \
2622  /* remove the entry from the LRU list. */ \
2623  \
2624  H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \
2625  (cache_ptr)->LRU_tail_ptr, \
2626  (cache_ptr)->LRU_list_len, \
2627  (cache_ptr)->LRU_list_size, (fail_val)) \
2628  \
2629  /* End modified LRU specific code. */ \
2630  } \
2631  \
2632  /* Regardless of the replacement policy, or whether the entry is \
2633  * pinned, now add the entry to the protected list. \
2634  */ \
2635  \
2636  H5C__DLL_APPEND((entry_ptr), (cache_ptr)->pl_head_ptr, \
2637  (cache_ptr)->pl_tail_ptr, \
2638  (cache_ptr)->pl_len, \
2639  (cache_ptr)->pl_size, (fail_val)) \
2640 } /* H5C__UPDATE_RP_FOR_PROTECT */
2641 
2642 #endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
2643 
2644 
2645 /*-------------------------------------------------------------------------
2646  *
2647  * Macro: H5C__UPDATE_RP_FOR_MOVE
2648  *
2649  * Purpose: Update the replacement policy data structures for a
2650  * move of the specified cache entry.
2651  *
2652  * At present, we only support the modified LRU policy, so
2653  * this function deals with that case unconditionally. If
2654  * we ever support other replacement policies, the function
2655  * should switch on the current policy and act accordingly.
2656  *
2657  * Return: N/A
2658  *
2659  * Programmer: John Mainzer, 5/17/04
2660  *
2661  *-------------------------------------------------------------------------
2662  */
2663 
2664 #if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
2665 
2666 #define H5C__UPDATE_RP_FOR_MOVE(cache_ptr, entry_ptr, was_dirty, fail_val) \
2667 { \
2668  HDassert( (cache_ptr) ); \
2669  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
2670  HDassert( (entry_ptr) ); \
2671  HDassert( !((entry_ptr)->is_read_only) ); \
2672  HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
2673  HDassert( (entry_ptr)->size > 0 ); \
2674  \
2675  if ( ! ( (entry_ptr)->is_pinned ) && ! ( (entry_ptr->is_protected ) ) ) { \
2676  \
2677  /* modified LRU specific code */ \
2678  \
2679  /* remove the entry from the LRU list, and re-insert it at the head. \
2680  */ \
2681  \
2682  H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \
2683  (cache_ptr)->LRU_tail_ptr, \
2684  (cache_ptr)->LRU_list_len, \
2685  (cache_ptr)->LRU_list_size, (fail_val)) \
2686  \
2687  H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
2688  (cache_ptr)->LRU_tail_ptr, \
2689  (cache_ptr)->LRU_list_len, \
2690  (cache_ptr)->LRU_list_size, (fail_val)) \
2691  \
2692  /* remove the entry from either the clean or dirty LUR list as \
2693  * indicated by the was_dirty parameter \
2694  */ \
2695  if ( was_dirty ) { \
2696  \
2697  H5C__AUX_DLL_REMOVE((entry_ptr), \
2698  (cache_ptr)->dLRU_head_ptr, \
2699  (cache_ptr)->dLRU_tail_ptr, \
2700  (cache_ptr)->dLRU_list_len, \
2701  (cache_ptr)->dLRU_list_size, \
2702  (fail_val)) \
2703  \
2704  } else { \
2705  \
2706  H5C__AUX_DLL_REMOVE((entry_ptr), \
2707  (cache_ptr)->cLRU_head_ptr, \
2708  (cache_ptr)->cLRU_tail_ptr, \
2709  (cache_ptr)->cLRU_list_len, \
2710  (cache_ptr)->cLRU_list_size, \
2711  (fail_val)) \
2712  } \
2713  \
2714  /* insert the entry at the head of either the clean or dirty \
2715  * LRU list as appropriate. \
2716  */ \
2717  \
2718  if ( (entry_ptr)->is_dirty ) { \
2719  \
2720  H5C__AUX_DLL_PREPEND((entry_ptr), \
2721  (cache_ptr)->dLRU_head_ptr, \
2722  (cache_ptr)->dLRU_tail_ptr, \
2723  (cache_ptr)->dLRU_list_len, \
2724  (cache_ptr)->dLRU_list_size, \
2725  (fail_val)) \
2726  \
2727  } else { \
2728  \
2729  H5C__AUX_DLL_PREPEND((entry_ptr), \
2730  (cache_ptr)->cLRU_head_ptr, \
2731  (cache_ptr)->cLRU_tail_ptr, \
2732  (cache_ptr)->cLRU_list_len, \
2733  (cache_ptr)->cLRU_list_size, \
2734  (fail_val)) \
2735  } \
2736  \
2737  /* End modified LRU specific code. */ \
2738  } \
2739 } /* H5C__UPDATE_RP_FOR_MOVE */
2740 
2741 #else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
2742 
2743 #define H5C__UPDATE_RP_FOR_MOVE(cache_ptr, entry_ptr, was_dirty, fail_val) \
2744 { \
2745  HDassert( (cache_ptr) ); \
2746  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
2747  HDassert( (entry_ptr) ); \
2748  HDassert( !((entry_ptr)->is_read_only) ); \
2749  HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
2750  HDassert( (entry_ptr)->size > 0 ); \
2751  \
2752  if ( ! ( (entry_ptr)->is_pinned ) && ! ( (entry_ptr->is_protected ) ) ) { \
2753  \
2754  /* modified LRU specific code */ \
2755  \
2756  /* remove the entry from the LRU list, and re-insert it at the head. \
2757  */ \
2758  \
2759  H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->LRU_head_ptr, \
2760  (cache_ptr)->LRU_tail_ptr, \
2761  (cache_ptr)->LRU_list_len, \
2762  (cache_ptr)->LRU_list_size, (fail_val)) \
2763  \
2764  H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
2765  (cache_ptr)->LRU_tail_ptr, \
2766  (cache_ptr)->LRU_list_len, \
2767  (cache_ptr)->LRU_list_size, (fail_val)) \
2768  \
2769  /* End modified LRU specific code. */ \
2770  } \
2771 } /* H5C__UPDATE_RP_FOR_MOVE */
2772 
2773 #endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
2774 
2775 
2776 /*-------------------------------------------------------------------------
2777  *
2778  * Macro: H5C__UPDATE_RP_FOR_SIZE_CHANGE
2779  *
2780  * Purpose: Update the replacement policy data structures for a
2781  * size change of the specified cache entry.
2782  *
2783  * To do this, determine if the entry is pinned. If it is,
2784  * update the size of the pinned entry list.
2785  *
2786  * If it isn't pinned, the entry must handled by the
2787  * replacement policy. Update the appropriate replacement
2788  * policy data structures.
2789  *
2790  * At present, we only support the modified LRU policy, so
2791  * this function deals with that case unconditionally. If
2792  * we ever support other replacement policies, the function
2793  * should switch on the current policy and act accordingly.
2794  *
2795  * Return: N/A
2796  *
2797  * Programmer: John Mainzer, 8/23/06
2798  *
2799  * Modifications:
2800  *
2801  * JRM -- 3/28/07
2802  * Added sanity checks based on the new is_read_only and
2803  * ro_ref_count fields of struct H5C_cache_entry_t.
2804  *
2805  *-------------------------------------------------------------------------
2806  */
2807 
2808 #if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
2809 
2810 #define H5C__UPDATE_RP_FOR_SIZE_CHANGE(cache_ptr, entry_ptr, new_size) \
2811 { \
2812  HDassert( (cache_ptr) ); \
2813  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
2814  HDassert( (entry_ptr) ); \
2815  HDassert( !((entry_ptr)->is_protected) ); \
2816  HDassert( !((entry_ptr)->is_read_only) ); \
2817  HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
2818  HDassert( (entry_ptr)->size > 0 ); \
2819  HDassert( new_size > 0 ); \
2820  \
2821  if ( (entry_ptr)->coll_access ) { \
2822  \
2823  H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->coll_list_len, \
2824  (cache_ptr)->coll_list_size, \
2825  (entry_ptr)->size, \
2826  (new_size)); \
2827  \
2828  } \
2829  \
2830  if ( (entry_ptr)->is_pinned ) { \
2831  \
2832  H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->pel_len, \
2833  (cache_ptr)->pel_size, \
2834  (entry_ptr)->size, \
2835  (new_size)); \
2836  \
2837  } else { \
2838  \
2839  /* modified LRU specific code */ \
2840  \
2841  /* Update the size of the LRU list */ \
2842  \
2843  H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->LRU_list_len, \
2844  (cache_ptr)->LRU_list_size, \
2845  (entry_ptr)->size, \
2846  (new_size)); \
2847  \
2848  /* Similarly, update the size of the clean or dirty LRU list as \
2849  * appropriate. At present, the entry must be clean, but that \
2850  * could change. \
2851  */ \
2852  \
2853  if ( (entry_ptr)->is_dirty ) { \
2854  \
2855  H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->dLRU_list_len, \
2856  (cache_ptr)->dLRU_list_size, \
2857  (entry_ptr)->size, \
2858  (new_size)); \
2859  \
2860  } else { \
2861  \
2862  H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->cLRU_list_len, \
2863  (cache_ptr)->cLRU_list_size, \
2864  (entry_ptr)->size, \
2865  (new_size)); \
2866  } \
2867  \
2868  /* End modified LRU specific code. */ \
2869  } \
2870  \
2871 } /* H5C__UPDATE_RP_FOR_SIZE_CHANGE */
2872 
2873 #else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
2874 
2875 #define H5C__UPDATE_RP_FOR_SIZE_CHANGE(cache_ptr, entry_ptr, new_size) \
2876 { \
2877  HDassert( (cache_ptr) ); \
2878  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
2879  HDassert( (entry_ptr) ); \
2880  HDassert( !((entry_ptr)->is_protected) ); \
2881  HDassert( !((entry_ptr)->is_read_only) ); \
2882  HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
2883  HDassert( (entry_ptr)->size > 0 ); \
2884  HDassert( new_size > 0 ); \
2885  \
2886  if ( (entry_ptr)->is_pinned ) { \
2887  \
2888  H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->pel_len, \
2889  (cache_ptr)->pel_size, \
2890  (entry_ptr)->size, \
2891  (new_size)); \
2892  \
2893  } else { \
2894  \
2895  /* modified LRU specific code */ \
2896  \
2897  /* Update the size of the LRU list */ \
2898  \
2899  H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->LRU_list_len, \
2900  (cache_ptr)->LRU_list_size, \
2901  (entry_ptr)->size, \
2902  (new_size)); \
2903  \
2904  /* End modified LRU specific code. */ \
2905  } \
2906  \
2907 } /* H5C__UPDATE_RP_FOR_SIZE_CHANGE */
2908 
2909 #endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
2910 
2911 
2912 /*-------------------------------------------------------------------------
2913  *
2914  * Macro: H5C__UPDATE_RP_FOR_UNPIN
2915  *
2916  * Purpose: Update the replacement policy data structures for an
2917  * unpin of the specified cache entry.
2918  *
2919  * To do this, unlink the specified entry from the protected
2920  * entry list, and re-insert it in the data structures used
2921  * by the current replacement policy.
2922  *
2923  * At present, we only support the modified LRU policy, so
2924  * this function deals with that case unconditionally. If
2925  * we ever support other replacement policies, the macro
2926  * should switch on the current policy and act accordingly.
2927  *
2928  * Return: N/A
2929  *
2930  * Programmer: John Mainzer, 3/22/06
2931  *
2932  * Modifications:
2933  *
2934  * JRM -- 3/28/07
2935  * Added sanity checks based on the new is_read_only and
2936  * ro_ref_count fields of struct H5C_cache_entry_t.
2937  *
2938  *-------------------------------------------------------------------------
2939  */
2940 
2941 #if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
2942 
2943 #define H5C__UPDATE_RP_FOR_UNPIN(cache_ptr, entry_ptr, fail_val) \
2944 { \
2945  HDassert( (cache_ptr) ); \
2946  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
2947  HDassert( (entry_ptr) ); \
2948  HDassert( !((entry_ptr)->is_protected) ); \
2949  HDassert( !((entry_ptr)->is_read_only) ); \
2950  HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
2951  HDassert( (entry_ptr)->is_pinned); \
2952  HDassert( (entry_ptr)->size > 0 ); \
2953  \
2954  /* Regardless of the replacement policy, remove the entry from the \
2955  * pinned entry list. \
2956  */ \
2957  H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pel_head_ptr, \
2958  (cache_ptr)->pel_tail_ptr, (cache_ptr)->pel_len, \
2959  (cache_ptr)->pel_size, (fail_val)) \
2960  \
2961  /* modified LRU specific code */ \
2962  \
2963  /* insert the entry at the head of the LRU list. */ \
2964  \
2965  H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
2966  (cache_ptr)->LRU_tail_ptr, \
2967  (cache_ptr)->LRU_list_len, \
2968  (cache_ptr)->LRU_list_size, (fail_val)) \
2969  \
2970  /* Similarly, insert the entry at the head of either the clean \
2971  * or dirty LRU list as appropriate. \
2972  */ \
2973  \
2974  if ( (entry_ptr)->is_dirty ) { \
2975  \
2976  H5C__AUX_DLL_PREPEND((entry_ptr), \
2977  (cache_ptr)->dLRU_head_ptr, \
2978  (cache_ptr)->dLRU_tail_ptr, \
2979  (cache_ptr)->dLRU_list_len, \
2980  (cache_ptr)->dLRU_list_size, \
2981  (fail_val)) \
2982  \
2983  } else { \
2984  \
2985  H5C__AUX_DLL_PREPEND((entry_ptr), \
2986  (cache_ptr)->cLRU_head_ptr, \
2987  (cache_ptr)->cLRU_tail_ptr, \
2988  (cache_ptr)->cLRU_list_len, \
2989  (cache_ptr)->cLRU_list_size, \
2990  (fail_val)) \
2991  } \
2992  \
2993  /* End modified LRU specific code. */ \
2994  \
2995 } /* H5C__UPDATE_RP_FOR_UNPIN */
2996 
2997 #else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
2998 
2999 #define H5C__UPDATE_RP_FOR_UNPIN(cache_ptr, entry_ptr, fail_val) \
3000 { \
3001  HDassert( (cache_ptr) ); \
3002  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
3003  HDassert( (entry_ptr) ); \
3004  HDassert( !((entry_ptr)->is_protected) ); \
3005  HDassert( !((entry_ptr)->is_read_only) ); \
3006  HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
3007  HDassert( (entry_ptr)->is_pinned); \
3008  HDassert( (entry_ptr)->size > 0 ); \
3009  \
3010  /* Regardless of the replacement policy, remove the entry from the \
3011  * pinned entry list. \
3012  */ \
3013  H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pel_head_ptr, \
3014  (cache_ptr)->pel_tail_ptr, (cache_ptr)->pel_len, \
3015  (cache_ptr)->pel_size, (fail_val)) \
3016  \
3017  /* modified LRU specific code */ \
3018  \
3019  /* insert the entry at the head of the LRU list. */ \
3020  \
3021  H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
3022  (cache_ptr)->LRU_tail_ptr, \
3023  (cache_ptr)->LRU_list_len, \
3024  (cache_ptr)->LRU_list_size, (fail_val)) \
3025  \
3026  /* End modified LRU specific code. */ \
3027  \
3028 } /* H5C__UPDATE_RP_FOR_UNPIN */
3029 
3030 #endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
3031 
3032 
3033 /*-------------------------------------------------------------------------
3034  *
3035  * Macro: H5C__UPDATE_RP_FOR_UNPROTECT
3036  *
3037  * Purpose: Update the replacement policy data structures for an
3038  * unprotect of the specified cache entry.
3039  *
3040  * To do this, unlink the specified entry from the protected
3041  * list, and re-insert it in the data structures used by the
3042  * current replacement policy.
3043  *
3044  * At present, we only support the modified LRU policy, so
3045  * this function deals with that case unconditionally. If
3046  * we ever support other replacement policies, the function
3047  * should switch on the current policy and act accordingly.
3048  *
3049  * Return: N/A
3050  *
3051  * Programmer: John Mainzer, 5/19/04
3052  *
3053  * Modifications:
3054  *
3055  * JRM - 7/27/04
3056  * Converted the function H5C_update_rp_for_unprotect() to
3057  * the macro H5C__UPDATE_RP_FOR_UNPROTECT in an effort to
3058  * squeeze a bit more performance out of the cache.
3059  *
3060  * At least for the first cut, I am leaving the comments and
3061  * white space in the macro. If they cause difficulties with
3062  * pre-processor, I'll have to remove them.
3063  *
3064  * JRM - 7/28/04
3065  * Split macro into two version, one supporting the clean and
3066  * dirty LRU lists, and the other not. Yet another attempt
3067  * at optimization.
3068  *
3069  * JRM - 3/17/06
3070  * Modified macro to put pinned entries on the pinned entry
3071  * list instead of inserting them in the data structures
3072  * maintained by the replacement policy.
3073  *
3074  *-------------------------------------------------------------------------
3075  */
3076 
3077 #if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
3078 
3079 #define H5C__UPDATE_RP_FOR_UNPROTECT(cache_ptr, entry_ptr, fail_val) \
3080 { \
3081  HDassert( (cache_ptr) ); \
3082  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
3083  HDassert( (entry_ptr) ); \
3084  HDassert( (entry_ptr)->is_protected); \
3085  HDassert( (entry_ptr)->size > 0 ); \
3086  \
3087  /* Regardless of the replacement policy, remove the entry from the \
3088  * protected list. \
3089  */ \
3090  H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pl_head_ptr, \
3091  (cache_ptr)->pl_tail_ptr, (cache_ptr)->pl_len, \
3092  (cache_ptr)->pl_size, (fail_val)) \
3093  \
3094  if ( (entry_ptr)->is_pinned ) { \
3095  \
3096  H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->pel_head_ptr, \
3097  (cache_ptr)->pel_tail_ptr, \
3098  (cache_ptr)->pel_len, \
3099  (cache_ptr)->pel_size, (fail_val)) \
3100  \
3101  } else { \
3102  \
3103  /* modified LRU specific code */ \
3104  \
3105  /* insert the entry at the head of the LRU list. */ \
3106  \
3107  H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
3108  (cache_ptr)->LRU_tail_ptr, \
3109  (cache_ptr)->LRU_list_len, \
3110  (cache_ptr)->LRU_list_size, (fail_val)) \
3111  \
3112  /* Similarly, insert the entry at the head of either the clean or \
3113  * dirty LRU list as appropriate. \
3114  */ \
3115  \
3116  if ( (entry_ptr)->is_dirty ) { \
3117  \
3118  H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->dLRU_head_ptr, \
3119  (cache_ptr)->dLRU_tail_ptr, \
3120  (cache_ptr)->dLRU_list_len, \
3121  (cache_ptr)->dLRU_list_size, (fail_val)) \
3122  \
3123  } else { \
3124  \
3125  H5C__AUX_DLL_PREPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \
3126  (cache_ptr)->cLRU_tail_ptr, \
3127  (cache_ptr)->cLRU_list_len, \
3128  (cache_ptr)->cLRU_list_size, (fail_val)) \
3129  } \
3130  \
3131  /* End modified LRU specific code. */ \
3132  } \
3133  \
3134 } /* H5C__UPDATE_RP_FOR_UNPROTECT */
3135 
3136 #else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
3137 
3138 #define H5C__UPDATE_RP_FOR_UNPROTECT(cache_ptr, entry_ptr, fail_val) \
3139 { \
3140  HDassert( (cache_ptr) ); \
3141  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
3142  HDassert( (entry_ptr) ); \
3143  HDassert( (entry_ptr)->is_protected); \
3144  HDassert( (entry_ptr)->size > 0 ); \
3145  \
3146  /* Regardless of the replacement policy, remove the entry from the \
3147  * protected list. \
3148  */ \
3149  H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pl_head_ptr, \
3150  (cache_ptr)->pl_tail_ptr, (cache_ptr)->pl_len, \
3151  (cache_ptr)->pl_size, (fail_val)) \
3152  \
3153  if ( (entry_ptr)->is_pinned ) { \
3154  \
3155  H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->pel_head_ptr, \
3156  (cache_ptr)->pel_tail_ptr, \
3157  (cache_ptr)->pel_len, \
3158  (cache_ptr)->pel_size, (fail_val)) \
3159  \
3160  } else { \
3161  \
3162  /* modified LRU specific code */ \
3163  \
3164  /* insert the entry at the head of the LRU list. */ \
3165  \
3166  H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
3167  (cache_ptr)->LRU_tail_ptr, \
3168  (cache_ptr)->LRU_list_len, \
3169  (cache_ptr)->LRU_list_size, (fail_val)) \
3170  \
3171  /* End modified LRU specific code. */ \
3172  } \
3173 } /* H5C__UPDATE_RP_FOR_UNPROTECT */
3174 
3175 #endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
3176 
3177 #ifdef H5_HAVE_PARALLEL
3178 
3179 #if H5C_DO_SANITY_CHECKS
3180 
3181 #define H5C__COLL_DLL_PRE_REMOVE_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv) \
3182 if ( ( (hd_ptr) == NULL ) || \
3183  ( (tail_ptr) == NULL ) || \
3184  ( (entry_ptr) == NULL ) || \
3185  ( (len) <= 0 ) || \
3186  ( (Size) < (entry_ptr)->size ) || \
3187  ( ( (Size) == (entry_ptr)->size ) && ( ! ( (len) == 1 ) ) ) || \
3188  ( ( (entry_ptr)->coll_prev == NULL ) && ( (hd_ptr) != (entry_ptr) ) ) || \
3189  ( ( (entry_ptr)->coll_next == NULL ) && ( (tail_ptr) != (entry_ptr) ) ) || \
3190  ( ( (len) == 1 ) && \
3191  ( ! ( ( (hd_ptr) == (entry_ptr) ) && ( (tail_ptr) == (entry_ptr) ) && \
3192  ( (entry_ptr)->coll_next == NULL ) && \
3193  ( (entry_ptr)->coll_prev == NULL ) && \
3194  ( (Size) == (entry_ptr)->size ) \
3195  ) \
3196  ) \
3197  ) \
3198  ) { \
3199  HDassert(0 && "coll DLL pre remove SC failed"); \
3200  HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "coll DLL pre remove SC failed") \
3201 }
3202 
3203 #define H5C__COLL_DLL_SC(head_ptr, tail_ptr, len, Size, fv) \
3204 if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \
3205  ( (head_ptr) != (tail_ptr) ) \
3206  ) || \
3207  ( (len) < 0 ) || \
3208  ( (Size) < 0 ) || \
3209  ( ( (len) == 1 ) && \
3210  ( ( (head_ptr) != (tail_ptr) ) || ( (Size) <= 0 ) || \
3211  ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) ) \
3212  ) \
3213  ) || \
3214  ( ( (len) >= 1 ) && \
3215  ( ( (head_ptr) == NULL ) || ( (head_ptr)->coll_prev != NULL ) || \
3216  ( (tail_ptr) == NULL ) || ( (tail_ptr)->coll_next != NULL ) \
3217  ) \
3218  ) \
3219  ) { \
3220  HDassert(0 && "COLL DLL sanity check failed"); \
3221  HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "COLL DLL sanity check failed") \
3222 }
3223 
3224 #define H5C__COLL_DLL_PRE_INSERT_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv) \
3225 if ( ( (entry_ptr) == NULL ) || \
3226  ( (entry_ptr)->coll_next != NULL ) || \
3227  ( (entry_ptr)->coll_prev != NULL ) || \
3228  ( ( ( (hd_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \
3229  ( (hd_ptr) != (tail_ptr) ) \
3230  ) || \
3231  ( ( (len) == 1 ) && \
3232  ( ( (hd_ptr) != (tail_ptr) ) || ( (Size) <= 0 ) || \
3233  ( (hd_ptr) == NULL ) || ( (hd_ptr)->size != (Size) ) \
3234  ) \
3235  ) || \
3236  ( ( (len) >= 1 ) && \
3237  ( ( (hd_ptr) == NULL ) || ( (hd_ptr)->coll_prev != NULL ) || \
3238  ( (tail_ptr) == NULL ) || ( (tail_ptr)->coll_next != NULL ) \
3239  ) \
3240  ) \
3241  ) { \
3242  HDassert(0 && "COLL DLL pre insert SC failed"); \
3243  HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "COLL DLL pre insert SC failed") \
3244 }
3245 
3246 #else /* H5C_DO_SANITY_CHECKS */
3247 
3248 #define H5C__COLL_DLL_PRE_REMOVE_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv)
3249 #define H5C__COLL_DLL_SC(head_ptr, tail_ptr, len, Size, fv)
3250 #define H5C__COLL_DLL_PRE_INSERT_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv)
3251 
3252 #endif /* H5C_DO_SANITY_CHECKS */
3253 
3254 
3255 #define H5C__COLL_DLL_APPEND(entry_ptr, head_ptr, tail_ptr, len, Size, fail_val) \
3256 { \
3257  H5C__COLL_DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, \
3258  fail_val) \
3259  if ( (head_ptr) == NULL ) \
3260  { \
3261  (head_ptr) = (entry_ptr); \
3262  (tail_ptr) = (entry_ptr); \
3263  } \
3264  else \
3265  { \
3266  (tail_ptr)->coll_next = (entry_ptr); \
3267  (entry_ptr)->coll_prev = (tail_ptr); \
3268  (tail_ptr) = (entry_ptr); \
3269  } \
3270  (len)++; \
3271  (Size) += entry_ptr->size; \
3272 } /* H5C__COLL_DLL_APPEND() */
3273 
3274 #define H5C__COLL_DLL_PREPEND(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \
3275 { \
3276  H5C__COLL_DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv)\
3277  if ( (head_ptr) == NULL ) \
3278  { \
3279  (head_ptr) = (entry_ptr); \
3280  (tail_ptr) = (entry_ptr); \
3281  } \
3282  else \
3283  { \
3284  (head_ptr)->coll_prev = (entry_ptr); \
3285  (entry_ptr)->coll_next = (head_ptr); \
3286  (head_ptr) = (entry_ptr); \
3287  } \
3288  (len)++; \
3289  (Size) += entry_ptr->size; \
3290 } /* H5C__COLL_DLL_PREPEND() */
3291 
3292 #define H5C__COLL_DLL_REMOVE(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \
3293 { \
3294  H5C__COLL_DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv)\
3295  { \
3296  if ( (head_ptr) == (entry_ptr) ) \
3297  { \
3298  (head_ptr) = (entry_ptr)->coll_next; \
3299  if ( (head_ptr) != NULL ) \
3300  (head_ptr)->coll_prev = NULL; \
3301  } \
3302  else \
3303  { \
3304  (entry_ptr)->coll_prev->coll_next = (entry_ptr)->coll_next; \
3305  } \
3306  if ( (tail_ptr) == (entry_ptr) ) \
3307  { \
3308  (tail_ptr) = (entry_ptr)->coll_prev; \
3309  if ( (tail_ptr) != NULL ) \
3310  (tail_ptr)->coll_next = NULL; \
3311  } \
3312  else \
3313  (entry_ptr)->coll_next->coll_prev = (entry_ptr)->coll_prev; \
3314  entry_ptr->coll_next = NULL; \
3315  entry_ptr->coll_prev = NULL; \
3316  (len)--; \
3317  (Size) -= entry_ptr->size; \
3318  } \
3319 } /* H5C__COLL_DLL_REMOVE() */
3320 
3321 
3322 /*-------------------------------------------------------------------------
3323  *
3324  * Macro: H5C__INSERT_IN_COLL_LIST
3325  *
3326  * Purpose: Insert entry into collective entries list
3327  *
3328  * Return: N/A
3329  *
3330  * Programmer: Mohamad Chaarawi
3331  *
3332  *-------------------------------------------------------------------------
3333  */
3334 
3335 #define H5C__INSERT_IN_COLL_LIST(cache_ptr, entry_ptr, fail_val) \
3336 { \
3337  HDassert( (cache_ptr) ); \
3338  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
3339  HDassert( (entry_ptr) ); \
3340  \
3341  /* insert the entry at the head of the list. */ \
3342  \
3343  H5C__COLL_DLL_PREPEND((entry_ptr), (cache_ptr)->coll_head_ptr, \
3344  (cache_ptr)->coll_tail_ptr, \
3345  (cache_ptr)->coll_list_len, \
3346  (cache_ptr)->coll_list_size, \
3347  (fail_val)) \
3348  \
3349 } /* H5C__INSERT_IN_COLL_LIST */
3350 
3351 
3352 /*-------------------------------------------------------------------------
3353  *
3354  * Macro: H5C__REMOVE_FROM_COLL_LIST
3355  *
3356  * Purpose: Remove entry from collective entries list
3357  *
3358  * Return: N/A
3359  *
3360  * Programmer: Mohamad Chaarawi
3361  *
3362  *-------------------------------------------------------------------------
3363  */
3364 
3365 #define H5C__REMOVE_FROM_COLL_LIST(cache_ptr, entry_ptr, fail_val) \
3366 { \
3367  HDassert( (cache_ptr) ); \
3368  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
3369  HDassert( (entry_ptr) ); \
3370  \
3371  /* remove the entry from the list. */ \
3372  \
3373  H5C__COLL_DLL_REMOVE((entry_ptr), (cache_ptr)->coll_head_ptr, \
3374  (cache_ptr)->coll_tail_ptr, \
3375  (cache_ptr)->coll_list_len, \
3376  (cache_ptr)->coll_list_size, \
3377  (fail_val)) \
3378  \
3379 } /* H5C__REMOVE_FROM_COLL_LIST */
3380 
3381 
3382 /*-------------------------------------------------------------------------
3383  *
3384  * Macro: H5C__MOVE_TO_TOP_IN_COLL_LIST
3385  *
3386  * Purpose: Update entry position in collective entries list
3387  *
3388  * Return: N/A
3389  *
3390  * Programmer: Mohamad Chaarawi
3391  *
3392  *-------------------------------------------------------------------------
3393  */
3394 
3395 #define H5C__MOVE_TO_TOP_IN_COLL_LIST(cache_ptr, entry_ptr, fail_val) \
3396 { \
3397  HDassert( (cache_ptr) ); \
3398  HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
3399  HDassert( (entry_ptr) ); \
3400  \
3401  /* Remove entry and insert at the head of the list. */ \
3402  H5C__COLL_DLL_REMOVE((entry_ptr), (cache_ptr)->coll_head_ptr, \
3403  (cache_ptr)->coll_tail_ptr, \
3404  (cache_ptr)->coll_list_len, \
3405  (cache_ptr)->coll_list_size, \
3406  (fail_val)) \
3407  \
3408  H5C__COLL_DLL_PREPEND((entry_ptr), (cache_ptr)->coll_head_ptr, \
3409  (cache_ptr)->coll_tail_ptr, \
3410  (cache_ptr)->coll_list_len, \
3411  (cache_ptr)->coll_list_size, \
3412  (fail_val)) \
3413  \
3414 } /* H5C__MOVE_TO_TOP_IN_COLL_LIST */
3415 #endif /* H5_HAVE_PARALLEL */
3416 
3417 
3418 /****************************/
3419 /* Package Private Typedefs */
3420 /****************************/
3421 
3422 /****************************************************************************
3423  *
3424  * structure H5C_tag_info_t
3425  *
3426  * Structure about each set of tagged entries for an object in the file.
3427  *
3428  * Each H5C_tag_info_t struct corresponds to a particular object in the file.
3429  *
3430  * Each H5C_cache_entry struct in the linked list of entries for this tag
3431  * also contains a pointer back to the H5C_tag_info_t struct for the
3432  * overall object.
3433  *
3434  *
3435  * The fields of this structure are discussed individually below:
3436  *
3437  * tag: Address (i.e. "tag") of the object header for all the entries
3438  * corresponding to parts of that object.
3439  *
3440  * head: Head of doubly-linked list of all entries belonging to the tag.
3441  *
3442  * entry_cnt: Number of entries on linked list of entries for this tag.
3443  *
3444  * corked: Boolean flag indicating whether entries for this object can be
3445  * evicted.
3446  *
3447  ****************************************************************************/
3448 typedef struct H5C_tag_info_t {
3449  haddr_t tag; /* Tag (address) of the entries (must be first, for skiplist) */
3450  H5C_cache_entry_t *head; /* Head of the list of entries for this tag */
3451  size_t entry_cnt; /* Number of entries on list */
3452  hbool_t corked; /* Whether this object is corked */
3453 } H5C_tag_info_t;
3454 
3455 
3456 /****************************************************************************
3457  *
3458  * structure H5C_t
3459  *
3460  * Catchall structure for all variables specific to an instance of the cache.
3461  *
3462  * While the individual fields of the structure are discussed below, the
3463  * following overview may be helpful.
3464  *
3465  * Entries in the cache are stored in an instance of H5TB_TREE, indexed on
3466  * the entry's disk address. While the H5TB_TREE is less efficient than
3467  * hash table, it keeps the entries in address sorted order. As flushes
3468  * in parallel mode are more efficient if they are issued in increasing
3469  * address order, this is a significant benefit. Also the H5TB_TREE code
3470  * was readily available, which reduced development time.
3471  *
3472  * While the cache was designed with multiple replacement policies in mind,
3473  * at present only a modified form of LRU is supported.
3474  *
3475  * JRM - 4/26/04
3476  *
3477  * Profiling has indicated that searches in the instance of H5TB_TREE are
3478  * too expensive. To deal with this issue, I have augmented the cache
3479  * with a hash table in which all entries will be stored. Given the
3480  * advantages of flushing entries in increasing address order, the TBBT
3481  * is retained, but only dirty entries are stored in it. At least for
3482  * now, we will leave entries in the TBBT after they are flushed.
3483  *
3484  * Note that index_size and index_len now refer to the total size of
3485  * and number of entries in the hash table.
3486  *
3487  * JRM - 7/19/04
3488  *
3489  * The TBBT has since been replaced with a skip list. This change
3490  * greatly predates this note.
3491  *
3492  * JRM - 9/26/05
3493  *
3494  * magic: Unsigned 32 bit integer always set to H5C__H5C_T_MAGIC.
3495  * This field is used to validate pointers to instances of
3496  * H5C_t.
3497  *
3498  * flush_in_progress: Boolean flag indicating whether a flush is in
3499  * progress.
3500  *
3501  * log_info: Information used by the MDC logging functionality.
3502  * Described in H5Clog.h.
3503  *
3504  * aux_ptr: Pointer to void used to allow wrapper code to associate
3505  * its data with an instance of H5C_t. The H5C cache code
3506  * sets this field to NULL, and otherwise leaves it alone.
3507  *
3508  * max_type_id: Integer field containing the maximum type id number assigned
3509  * to a type of entry in the cache. All type ids from 0 to
3510  * max_type_id inclusive must be defined. The names of the
3511  * types are stored in the type_name_table discussed below, and
3512  * indexed by the ids.
3513  *
3514  * class_table_ptr: Pointer to an array of H5C_class_t of length
3515  * max_type_id + 1. Entry classes for the cache.
3516  *
3517  * max_cache_size: Nominal maximum number of bytes that may be stored in the
3518  * cache. This value should be viewed as a soft limit, as the
3519  * cache can exceed this value under the following circumstances:
3520  *
3521  * a) All entries in the cache are protected, and the cache is
3522  * asked to insert a new entry. In this case the new entry
3523  * will be created. If this causes the cache to exceed
3524  * max_cache_size, it will do so. The cache will attempt
3525  * to reduce its size as entries are unprotected.
3526  *
3527  * b) When running in parallel mode, the cache may not be
3528  * permitted to flush a dirty entry in response to a read.
3529  * If there are no clean entries available to evict, the
3530  * cache will exceed its maximum size. Again the cache
3531  * will attempt to reduce its size to the max_cache_size
3532  * limit on the next cache write.
3533  *
3534  * c) When an entry increases in size, the cache may exceed
3535  * the max_cache_size limit until the next time the cache
3536  * attempts to load or insert an entry.
3537  *
3538  * d) When the evictions_enabled field is false (see below),
3539  * the cache size will increase without limit until the
3540  * field is set to true.
3541  *
3542  * min_clean_size: Nominal minimum number of clean bytes in the cache.
3543  * The cache attempts to maintain this number of bytes of
3544  * clean data so as to avoid case b) above. Again, this is
3545  * a soft limit.
3546  *
3547  * close_warning_received: Boolean flag indicating that a file closing
3548  * warning has been received.
3549  *
3550  *
3551  * In addition to the call back functions required for each entry, the
3552  * cache requires the following call back functions for this instance of
3553  * the cache as a whole:
3554  *
3555  * check_write_permitted: In certain applications, the cache may not
3556  * be allowed to write to disk at certain time. If specified,
3557  * the check_write_permitted function is used to determine if
3558  * a write is permissible at any given point in time.
3559  *
3560  * If no such function is specified (i.e. this field is NULL),
3561  * the cache uses the following write_permitted field to
3562  * determine whether writes are permitted.
3563  *
3564  * write_permitted: If check_write_permitted is NULL, this boolean flag
3565  * indicates whether writes are permitted.
3566  *
3567  * log_flush: If provided, this function is called whenever a dirty
3568  * entry is flushed to disk.
3569  *
3570  *
3571  * In cases where memory is plentiful, and performance is an issue, it may
3572  * be useful to disable all cache evictions, and thereby postpone metadata
3573  * writes. The following field is used to implement this.
3574  *
3575  * evictions_enabled: Boolean flag that is initialized to TRUE. When
3576  * this flag is set to FALSE, the metadata cache will not
3577  * attempt to evict entries to make space for newly protected
3578  * entries, and instead the will grow without limit.
3579  *
3580  * Needless to say, this feature must be used with care.
3581  *
3582  *
3583  * The cache requires an index to facilitate searching for entries. The
3584  * following fields support that index.
3585  *
3586  * Addendum: JRM -- 10/14/15
3587  *
3588  * We sometimes need to visit all entries in the cache. In the past, this
3589  * was done by scanning the hash table. However, this is expensive, and
3590  * we have come to scan the hash table often enough that it has become a
3591  * performance issue. To repair this, I have added code to maintain a
3592  * list of all entries in the index -- call this list the index list.
3593  *
3594  * The index list is maintained by the same macros that maintain the
3595  * index, and must have the same length and size as the index proper.
3596  *
3597  * index_len: Number of entries currently in the hash table used to index
3598  * the cache.
3599  *
3600  * index_size: Number of bytes of cache entries currently stored in the
3601  * hash table used to index the cache.
3602  *
3603  * This value should not be mistaken for footprint of the
3604  * cache in memory. The average cache entry is small, and
3605  * the cache has a considerable overhead. Multiplying the
3606  * index_size by three should yield a conservative estimate
3607  * of the cache's memory footprint.
3608  *
3609  * index_ring_len: Array of integer of length H5C_RING_NTYPES used to
3610  * maintain a count of entries in the index by ring. Note
3611  * that the sum of all the cells in this array must equal
3612  * the value stored in index_len above.
3613  *
3614  * index_ring_size: Array of size_t of length H5C_RING_NTYPES used to
3615  * maintain the sum of the sizes of all entries in the index
3616  * by ring. Note that the sum of all cells in this array must
3617  * equal the value stored in index_size above.
3618  *
3619  * clean_index_size: Number of bytes of clean entries currently stored in
3620  * the hash table. Note that the index_size field (above)
3621  * is also the sum of the sizes of all entries in the cache.
3622  * Thus we should have the invariant that clean_index_size +
3623  * dirty_index_size == index_size.
3624  *
3625  * WARNING:
3626  *
3627  * The value of the clean_index_size must not be mistaken
3628  * for the current clean size of the cache. Rather, the
3629  * clean size of the cache is the current value of
3630  * clean_index_size plus the amount of empty space (if any)
3631  * in the cache.
3632  *
3633  * clean_index_ring_size: Array of size_t of length H5C_RING_NTYPES used to
3634  * maintain the sum of the sizes of all clean entries in the
3635  * index by ring. Note that the sum of all cells in this array
3636  * must equal the value stored in clean_index_size above.
3637  *
3638  * dirty_index_size: Number of bytes of dirty entries currently stored in
3639  * the hash table. Note that the index_size field (above)
3640  * is also the sum of the sizes of all entries in the cache.
3641  * Thus we should have the invariant that clean_index_size +
3642  * dirty_index_size == index_size.
3643  *
3644  * dirty_index_ring_size: Array of size_t of length H5C_RING_NTYPES used to
3645  * maintain the sum of the sizes of all dirty entries in the
3646  * index by ring. Note that the sum of all cells in this array
3647  * must equal the value stored in dirty_index_size above.
3648  *
3649  * index: Array of pointer to H5C_cache_entry_t of size
3650  * H5C__HASH_TABLE_LEN. At present, this value is a power
3651  * of two, not the usual prime number.
3652  *
3653  * I hope that the variable size of cache elements, the large
3654  * hash table size, and the way in which HDF5 allocates space
3655  * will combine to avoid problems with periodicity. If so, we
3656  * can use a trivial hash function (a bit-and and a 3 bit left
3657  * shift) with some small savings.
3658  *
3659  * If not, it will become evident in the statistics. Changing
3660  * to the usual prime number length hash table will require
3661  * changing the H5C__HASH_FCN macro and the deletion of the
3662  * H5C__HASH_MASK #define. No other changes should be required.
3663  *
3664  * il_len: Number of entries on the index list.
3665  *
3666  * This must always be equal to index_len. As such, this
3667  * field is redundant. However, the existing linked list
3668  * management macros expect to maintain a length field, so
3669  * this field exists primarily to avoid adding complexity to
3670  * these macros.
3671  *
3672  * il_size: Number of bytes of cache entries currently stored in the
3673  * index list.
3674  *
3675  * This must always be equal to index_size. As such, this
3676  * field is redundant. However, the existing linked list
3677  * management macros expect to maintain a size field, so
3678  * this field exists primarily to avoid adding complexity to
3679  * these macros.
3680  *
3681  * il_head: Pointer to the head of the doubly linked list of entries in
3682  * the index list. Note that cache entries on this list are
3683  * linked by their il_next and il_prev fields.
3684  *
3685  * This field is NULL if the index is empty.
3686  *
3687  * il_tail: Pointer to the tail of the doubly linked list of entries in
3688  * the index list. Note that cache entries on this list are
3689  * linked by their il_next and il_prev fields.
3690  *
3691  * This field is NULL if the index is empty.
3692  *
3693  *
3694  * With the addition of the take ownership flag, it is possible that
3695  * an entry may be removed from the cache as the result of the flush of
3696  * a second entry. In general, this causes little trouble, but it is
3697  * possible that the entry removed may be the next entry in the scan of
3698  * a list. In this case, we must be able to detect the fact that the
3699  * entry has been removed, so that the scan doesn't attempt to proceed with
3700  * an entry that is no longer in the cache.
3701  *
3702  * The following fields are maintained to facilitate this.
3703  *
3704  * entries_removed_counter: Counter that is incremented each time an
3705  * entry is removed from the cache by any means (eviction,
3706  * expungement, or take ownership at this point in time).
3707  * Functions that perform scans on lists may set this field
3708  * to zero prior to calling H5C__flush_single_entry().
3709  * Unexpected changes to the counter indicate that an entry
3710  * was removed from the cache as a side effect of the flush.
3711  *
3712  * last_entry_removed_ptr: Pointer to the instance of H5C_cache_entry_t
3713  * which contained the last entry to be removed from the cache,
3714  * or NULL if there either is no such entry, or if a function
3715  * performing a scan of a list has set this field to NULL prior
3716  * to calling H5C__flush_single_entry().
3717  *
3718  * WARNING!!! This field must NEVER be dereferenced. It is
3719  * maintained to allow functions that perform scans of lists
3720  * to compare this pointer with their pointers to next, thus
3721  * allowing them to avoid unnecessary restarts of scans if the
3722  * pointers don't match, and if entries_removed_counter is
3723  * one.
3724  *
3725  * entry_watched_for_removal: Pointer to an instance of H5C_cache_entry_t
3726  * which contains the 'next' entry for an iteration. Removing
3727  * this entry must trigger a rescan of the iteration, so each
3728  * entry removed from the cache is compared against this pointer
3729  * and the pointer is reset to NULL if the watched entry is removed.
3730  * (This functions similarly to a "dead man's switch")
3731  *
3732  *
3733  * When we flush the cache, we need to write entries out in increasing
3734  * address order. An instance of a skip list is used to store dirty entries in
3735  * sorted order. Whether it is cheaper to sort the dirty entries as needed,
3736  * or to maintain the list is an open question. At a guess, it depends
3737  * on how frequently the cache is flushed. We will see how it goes.
3738  *
3739  * For now at least, I will not remove dirty entries from the list as they
3740  * are flushed. (this has been changed -- dirty entries are now removed from
3741  * the skip list as they are flushed. JRM - 10/25/05)
3742  *
3743  * slist_changed: Boolean flag used to indicate whether the contents of
3744  * the slist has changed since the last time this flag was
3745  * reset. This is used in the cache flush code to detect
3746  * conditions in which pre-serialize or serialize callbacks
3747  * have modified the slist -- which obliges us to restart
3748  * the scan of the slist from the beginning.
3749  *
3750  * slist_len: Number of entries currently in the skip list
3751  * used to maintain a sorted list of dirty entries in the
3752  * cache.
3753  *
3754  * slist_size: Number of bytes of cache entries currently stored in the
3755  * skip list used to maintain a sorted list of
3756  * dirty entries in the cache.
3757  *
3758  * slist_ring_len: Array of integer of length H5C_RING_NTYPES used to
3759  * maintain a count of entries in the slist by ring. Note
3760  * that the sum of all the cells in this array must equal
3761  * the value stored in slist_len above.
3762  *
3763  * slist_ring_size: Array of size_t of length H5C_RING_NTYPES used to
3764  * maintain the sum of the sizes of all entries in the
3765  * slist by ring. Note that the sum of all cells in this
3766  * array must equal the value stored in slist_size above.
3767  *
3768  * slist_ptr: pointer to the instance of H5SL_t used maintain a sorted
3769  * list of dirty entries in the cache. This sorted list has
3770  * two uses:
3771  *
3772  * a) It allows us to flush dirty entries in increasing address
3773  * order, which results in significant savings.
3774  *
3775  * b) It facilitates checking for adjacent dirty entries when
3776  * attempting to evict entries from the cache. While we
3777  * don't use this at present, I hope that this will allow
3778  * some optimizations when I get to it.
3779  *
3780  * num_last_entries: The number of entries in the cache that can only be
3781  * flushed after all other entries in the cache have
3782  * been flushed. At this time, this will only ever be
3783  * one entry (the superblock), and the code has been
3784  * protected with HDasserts to enforce this. This restraint
3785  * can certainly be relaxed in the future if the need for
3786  * multiple entries being flushed last arises, though
3787  * explicit tests for that case should be added when said
3788  * HDasserts are removed.
3789  *
3790  * Update: There are now two possible last entries
3791  * (superblock and file driver info message). This
3792  * number will probably increase as we add superblock
3793  * messages. JRM -- 11/18/14
3794  *
3795  * With the addition of the fractal heap, the cache must now deal with
3796  * the case in which entries may be dirtied, moved, or have their sizes
3797  * changed during a flush. To allow sanity checks in this situation, the
3798  * following two fields have been added. They are only compiled in when
3799  * H5C_DO_SANITY_CHECKS is TRUE.
3800  *
3801  * slist_len_increase: Number of entries that have been added to the
3802  * slist since the last time this field was set to zero.
3803  * Note that this value can be negative.
3804  *
3805  * slist_size_increase: Total size of all entries that have been added
3806  * to the slist since the last time this field was set to
3807  * zero. Note that this value can be negative.
3808  *
3809  * Cache entries belonging to a particular object are "tagged" with that
3810  * object's base object header address.
3811  *
3812  * The following fields are maintained to facilitate this.
3813  *
3814  * tag_list: A skip list to track entries that belong to an object.
3815  * Each H5C_tag_info_t struct on the tag list corresponds to
3816  * a particular object in the file. Tagged entries can be
3817  * flushed or evicted as a group, or corked to prevent entries
3818  * from being evicted from the cache.
3819  *
3820  * "Global" entries, like the superblock and the file's
3821  * freelist, as well as shared entries like global
3822  * heaps and shared object header messages, are not tagged.
3823  *
3824  * ignore_tags: Boolean flag to disable tag validation during entry insertion.
3825  *
3826  * num_objs_corked: Unsigned integer field containing the number of objects
3827  * that are "corked". The "corked" status of an object is
3828  * found by searching the "tag_list". This field is added
3829  * for optimization so that the skip list search on "tag_list"
3830  * can be skipped if this field is zero, i.e. no "corked"
3831  * objects.
3832  *
3833  * When a cache entry is protected, it must be removed from the LRU
3834  * list(s) as it cannot be either flushed or evicted until it is unprotected.
3835  * The following fields are used to implement the protected list (pl).
3836  *
3837  * pl_len: Number of entries currently residing on the protected list.
3838  *
3839  * pl_size: Number of bytes of cache entries currently residing on the
3840  * protected list.
3841  *
3842  * pl_head_ptr: Pointer to the head of the doubly linked list of protected
3843  * entries. Note that cache entries on this list are linked
3844  * by their next and prev fields.
3845  *
3846  * This field is NULL if the list is empty.
3847  *
3848  * pl_tail_ptr: Pointer to the tail of the doubly linked list of protected
3849  * entries. Note that cache entries on this list are linked
3850  * by their next and prev fields.
3851  *
3852  * This field is NULL if the list is empty.
3853  *
3854  *
3855  * For very frequently used entries, the protect/unprotect overhead can
3856  * become burdensome. To avoid this overhead, I have modified the cache
3857  * to allow entries to be "pinned". A pinned entry is similar to a
3858  * protected entry, in the sense that it cannot be evicted, and that
3859  * the entry can be modified at any time.
3860  *
3861  * Pinning an entry has the following implications:
3862  *
3863  * 1) A pinned entry cannot be evicted. Thus unprotected
3864  * pinned entries reside in the pinned entry list, instead
3865  * of the LRU list(s) (or other lists maintained by the current
3866  * replacement policy code).
3867  *
3868  * 2) A pinned entry can be accessed or modified at any time.
3869  * This places an additional burden on the associated pre-serialize
3870  * and serialize callbacks, which must ensure the the entry is in
3871  * a consistent state before creating an image of it.
3872  *
3873  * 3) A pinned entry can be marked as dirty (and possibly
3874  * change size) while it is unprotected.
3875  *
3876  * 4) The flush-destroy code must allow pinned entries to
3877  * be unpinned (and possibly unprotected) during the
3878  * flush.
3879  *
3880  * Since pinned entries cannot be evicted, they must be kept on a pinned
3881  * entry list (pel), instead of being entrusted to the replacement policy
3882  * code.
3883  *
3884  * Maintaining the pinned entry list requires the following fields:
3885  *
3886  * pel_len: Number of entries currently residing on the pinned
3887  * entry list.
3888  *
3889  * pel_size: Number of bytes of cache entries currently residing on
3890  * the pinned entry list.
3891  *
3892  * pel_head_ptr: Pointer to the head of the doubly linked list of pinned
3893  * but not protected entries. Note that cache entries on
3894  * this list are linked by their next and prev fields.
3895  *
3896  * This field is NULL if the list is empty.
3897  *
3898  * pel_tail_ptr: Pointer to the tail of the doubly linked list of pinned
3899  * but not protected entries. Note that cache entries on
3900  * this list are linked by their next and prev fields.
3901  *
3902  * This field is NULL if the list is empty.
3903  *
3904  *
3905  * The cache must have a replacement policy, and the fields supporting this
3906  * policy must be accessible from this structure.
3907  *
3908  * While there has been interest in several replacement policies for
3909  * this cache, the initial development schedule is tight. Thus I have
3910  * elected to support only a modified LRU (least recently used) policy
3911  * for the first cut.
3912  *
3913  * To further simplify matters, I have simply included the fields needed
3914  * by the modified LRU in this structure. When and if we add support for
3915  * other policies, it will probably be easiest to just add the necessary
3916  * fields to this structure as well -- we only create one instance of this
3917  * structure per file, so the overhead is not excessive.
3918  *
3919  *
3920  * Fields supporting the modified LRU policy:
3921  *
3922  * See most any OS text for a discussion of the LRU replacement policy.
3923  *
3924  * When operating in parallel mode, we must ensure that a read does not
3925  * cause a write. If it does, the process will hang, as the write will
3926  * be collective and the other processes will not know to participate.
3927  *
3928  * To deal with this issue, I have modified the usual LRU policy by adding
3929  * clean and dirty LRU lists to the usual LRU list. In general, these
3930  * lists are only exist in parallel builds.
3931  *
3932  * The clean LRU list is simply the regular LRU list with all dirty cache
3933  * entries removed.
3934  *
3935  * Similarly, the dirty LRU list is the regular LRU list with all the clean
3936  * cache entries removed.
3937  *
3938  * When reading in parallel mode, we evict from the clean LRU list only.
3939  * This implies that we must try to ensure that the clean LRU list is
3940  * reasonably well stocked at all times.
3941  *
3942  * We attempt to do this by trying to flush enough entries on each write
3943  * to keep the cLRU_list_size >= min_clean_size.
3944  *
3945  * Even if we start with a completely clean cache, a sequence of protects
3946  * without unprotects can empty the clean LRU list. In this case, the
3947  * cache must grow temporarily. At the next sync point, we will attempt to
3948  * evict enough entries to reduce index_size to less than max_cache_size.
3949  * While this will usually be possible, all bets are off if enough entries
3950  * are protected.
3951  *
3952  * Discussions of the individual fields used by the modified LRU replacement
3953  * policy follow:
3954  *
3955  * LRU_list_len: Number of cache entries currently on the LRU list.
3956  *
3957  * Observe that LRU_list_len + pl_len + pel_len must always
3958  * equal index_len.
3959  *
3960  * LRU_list_size: Number of bytes of cache entries currently residing on the
3961  * LRU list.
3962  *
3963  * Observe that LRU_list_size + pl_size + pel_size must always
3964  * equal index_size.
3965  *
3966  * LRU_head_ptr: Pointer to the head of the doubly linked LRU list. Cache
3967  * entries on this list are linked by their next and prev fields.
3968  *
3969  * This field is NULL if the list is empty.
3970  *
3971  * LRU_tail_ptr: Pointer to the tail of the doubly linked LRU list. Cache
3972  * entries on this list are linked by their next and prev fields.
3973  *
3974  * This field is NULL if the list is empty.
3975  *
3976  * cLRU_list_len: Number of cache entries currently on the clean LRU list.
3977  *
3978  * Observe that cLRU_list_len + dLRU_list_len must always
3979  * equal LRU_list_len.
3980  *
3981  * cLRU_list_size: Number of bytes of cache entries currently residing on
3982  * the clean LRU list.
3983  *
3984  * Observe that cLRU_list_size + dLRU_list_size must always
3985  * equal LRU_list_size.
3986  *
3987  * cLRU_head_ptr: Pointer to the head of the doubly linked clean LRU list.
3988  * Cache entries on this list are linked by their aux_next and
3989  * aux_prev fields.
3990  *
3991  * This field is NULL if the list is empty.
3992  *
3993  * cLRU_tail_ptr: Pointer to the tail of the doubly linked clean LRU list.
3994  * Cache entries on this list are linked by their aux_next and
3995  * aux_prev fields.
3996  *
3997  * This field is NULL if the list is empty.
3998  *
3999  * dLRU_list_len: Number of cache entries currently on the dirty LRU list.
4000  *
4001  * Observe that cLRU_list_len + dLRU_list_len must always
4002  * equal LRU_list_len.
4003  *
4004  * dLRU_list_size: Number of cache entries currently on the dirty LRU list.
4005  *
4006  * Observe that cLRU_list_len + dLRU_list_len must always
4007  * equal LRU_list_len.
4008  *
4009  * dLRU_head_ptr: Pointer to the head of the doubly linked dirty LRU list.
4010  * Cache entries on this list are linked by their aux_next and
4011  * aux_prev fields.
4012  *
4013  * This field is NULL if the list is empty.
4014  *
4015  * dLRU_tail_ptr: Pointer to the tail of the doubly linked dirty LRU list.
4016  * Cache entries on this list are linked by their aux_next and
4017  * aux_prev fields.
4018  *
4019  * This field is NULL if the list is empty.
4020  *
4021  *
4022  * Automatic cache size adjustment:
4023  *
4024  * While the default cache size is adequate for most cases, we can run into
4025  * cases where the default is too small. Ideally, we will let the user
4026  * adjust the cache size as required. However, this is not possible in all
4027  * cases. Thus I have added automatic cache size adjustment code.
4028  *
4029  * The configuration for the automatic cache size adjustment is stored in
4030  * the structure described below:
4031  *
4032  * size_increase_possible: Depending on the configuration data given
4033  * in the resize_ctl field, it may or may not be possible
4034  * to increase the size of the cache. Rather than test for
4035  * all the ways this can happen, we simply set this flag when
4036  * we receive a new configuration.
4037  *
4038  * flash_size_increase_possible: Depending on the configuration data given
4039  * in the resize_ctl field, it may or may not be possible
4040  * for a flash size increase to occur. We set this flag
4041  * whenever we receive a new configuration so as to avoid
4042  * repeated calculations.
4043  *
4044  * flash_size_increase_threshold: If a flash cache size increase is possible,
4045  * this field is used to store the minimum size of a new entry
4046  * or size increase needed to trigger a flash cache size
4047  * increase. Note that this field must be updated whenever
4048  * the size of the cache is changed.
4049  *
4050  * size_decrease_possible: Depending on the configuration data given
4051  * in the resize_ctl field, it may or may not be possible
4052  * to decrease the size of the cache. Rather than test for
4053  * all the ways this can happen, we simply set this flag when
4054  * we receive a new configuration.
4055  *
4056  * resize_enabled: This is another convenience flag which is set whenever
4057  * a new set of values for resize_ctl are provided. Very
4058  * simply,
4059  *
4060  * resize_enabled = size_increase_possible ||
4061  * size_decrease_possible;
4062  *
4063  * cache_full: Boolean flag used to keep track of whether the cache is
4064  * full, so we can refrain from increasing the size of a
4065  * cache which hasn't used up the space allotted to it.
4066  *
4067  * The field is initialized to FALSE, and then set to TRUE
4068  * whenever we attempt to make space in the cache.
4069  *
4070  * size_decreased: Boolean flag set to TRUE whenever the maximum cache
4071  * size is decreased. The flag triggers a call to
4072  * H5C__make_space_in_cache() on the next call to H5C_protect().
4073  *
4074  * resize_in_progress: As the metadata cache has become re-entrant, it is
4075  * possible that a protect may trigger a call to
4076  * H5C__auto_adjust_cache_size(), which may trigger a flush,
4077  * which may trigger a protect, which will result in another
4078  * call to H5C__auto_adjust_cache_size().
4079  *
4080  * The resize_in_progress boolean flag is used to detect this,
4081  * and to prevent the infinite recursion that would otherwise
4082  * occur.
4083  *
4084  * Note that this issue is not hypothetical -- this field
4085  * was added 12/29/15 to fix a bug exposed in the testing
4086  * of changes to the file driver info superblock extension
4087  * management code needed to support rings.
4088  *
4089  * msic_in_progress: As the metadata cache has become re-entrant, and as
4090  * the free space manager code has become more tightly
4091  * integrated with the metadata cache, it is possible that
4092  * a call to H5C_insert_entry() may trigger a call to
4093  * H5C_make_space_in_cache(), which, via H5C__flush_single_entry()
4094  * and client callbacks, may trigger an infinite regression
4095  * of calls to H5C_make_space_in_cache().
4096  *
4097  * The msic_in_progress boolean flag is used to detect this,
4098  * and prevent the infinite regression that would otherwise
4099  * occur.
4100  *
4101  * Note that this is issue is not hypothetical -- this field
4102  * was added 2/16/17 to address this issue when it was
4103  * exposed by modifications to test/fheap.c to cause it to
4104  * use paged allocation.
4105  *
4106  * resize_ctl: Instance of H5C_auto_size_ctl_t containing configuration
4107  * data for automatic cache resizing.
4108  *
4109  * epoch_markers_active: Integer field containing the number of epoch
4110  * markers currently in use in the LRU list. This value
4111  * must be in the range [0, H5C__MAX_EPOCH_MARKERS - 1].
4112  *
4113  * epoch_marker_active: Array of boolean of length H5C__MAX_EPOCH_MARKERS.
4114  * This array is used to track which epoch markers are currently
4115  * in use.
4116  *
4117  * epoch_marker_ringbuf: Array of int of length H5C__MAX_EPOCH_MARKERS + 1.
4118  *
4119  * To manage the epoch marker cache entries, it is necessary
4120  * to track their order in the LRU list. This is done with
4121  * epoch_marker_ringbuf. When markers are inserted at the
4122  * head of the LRU list, the index of the marker in the
4123  * epoch_markers array is inserted at the tail of the ring
4124  * buffer. When it becomes the epoch_marker_active'th marker
4125  * in the LRU list, it will have worked its way to the head
4126  * of the ring buffer as well. This allows us to remove it
4127  * without scanning the LRU list if such is required.
4128  *
4129  * epoch_marker_ringbuf_first: Integer field containing the index of the
4130  * first entry in the ring buffer.
4131  *
4132  * epoch_marker_ringbuf_last: Integer field containing the index of the
4133  * last entry in the ring buffer.
4134  *
4135  * epoch_marker_ringbuf_size: Integer field containing the number of entries
4136  * in the ring buffer.
4137  *
4138  * epoch_markers: Array of instances of H5C_cache_entry_t of length
4139  * H5C__MAX_EPOCH_MARKERS. The entries are used as markers
4140  * in the LRU list to identify cache entries that haven't
4141  * been accessed for some (small) specified number of
4142  * epochs. These entries (if any) can then be evicted and
4143  * the cache size reduced -- ideally without evicting any
4144  * of the current working set. Needless to say, the epoch
4145  * length and the number of epochs before an unused entry
4146  * must be chosen so that all, or almost all, the working
4147  * set will be accessed before the limit.
4148  *
4149  * Epoch markers only appear in the LRU list, never in
4150  * the index or slist. While they are of type
4151  * H5C__EPOCH_MARKER_TYPE, and have associated class
4152  * functions, these functions should never be called.
4153  *
4154  * The addr fields of these instances of H5C_cache_entry_t
4155  * are set to the index of the instance in the epoch_markers
4156  * array, the size is set to 0, and the type field points
4157  * to the constant structure epoch_marker_class defined
4158  * in H5C.c. The next and prev fields are used as usual
4159  * to link the entry into the LRU list.
4160  *
4161  * All other fields are unused.
4162  *
4163  *
4164  * Cache hit rate collection fields:
4165  *
4166  * We supply the current cache hit rate on request, so we must keep a
4167  * simple cache hit rate computation regardless of whether statistics
4168  * collection is enabled. The following fields support this capability.
4169  *
4170  * cache_hits: Number of cache hits since the last time the cache hit
4171  * rate statistics were reset. Note that when automatic cache
4172  * re-sizing is enabled, this field will be reset every automatic
4173  * resize epoch.
4174  *
4175  * cache_accesses: Number of times the cache has been accessed while
4176  * since the last since the last time the cache hit rate statistics
4177  * were reset. Note that when automatic cache re-sizing is enabled,
4178  * this field will be reset every automatic resize epoch.
4179  *
4180  *
4181  * Metadata cache image management related fields.
4182  *
4183  * image_ctl: Instance of H5C_cache_image_ctl_t containing configuration
4184  * data for generation of a cache image on file close.
4185  *
4186  * serialization_in_progress: Boolean field that is set to TRUE iff
4187  * the cache is in the process of being serialized. This
4188  * field is needed to support the H5C_serialization_in_progress()
4189  * call, which is in turn required for sanity checks in some
4190  * cache clients.
4191  *
4192  * load_image: Boolean flag indicating that the metadata cache image
4193  * superblock extension message exists and should be
4194  * read, and the image block read and decoded on the next
4195  * call to H5C_protect().
4196  *
4197  * image_loaded: Boolean flag indicating that the metadata cache has
4198  * loaded the metadata cache image as directed by the
4199  * MDC cache image superblock extension message.
4200  *
4201  * delete_image: Boolean flag indicating whether the metadata cache image
4202  * superblock message should be deleted and the cache image
4203  * file space freed after they have been read and decoded.
4204  *
4205  * This flag should be set to TRUE iff the file is opened
4206  * R/W and there is a cache image to be read.
4207  *
4208  * image_addr: haddr_t containing the base address of the on disk
4209  * metadata cache image, or HADDR_UNDEF if that value is
4210  * undefined. Note that this field is used both in the
4211  * construction and write, and the read and decode of
4212  * metadata cache image blocks.
4213  *
4214  * image_len: hsize_t containing the size of the on disk metadata cache
4215  * image, or zero if that value is undefined. Note that this
4216  * field is used both in the construction and write, and the
4217  * read and decode of metadata cache image blocks.
4218  *
4219  * image_data_len: size_t containing the number of bytes of data in the
4220  * on disk metadata cache image, or zero if that value is
4221  * undefined.
4222  *
4223  * In most cases, this value is the same as the image_len
4224  * above. It exists to allow for metadata cache image blocks
4225  * that are larger than the actual image. Thus in all
4226  * cases image_data_len <= image_len.
4227  *
4228  * To create the metadata cache image, we must first serialize all the
4229  * entries in the metadata cache. This is done by a scan of the index.
4230  * As entries must be serialized in increasing flush dependency height
4231  * order, we scan the index repeatedly, once for each flush dependency
4232  * height in increasing order.
4233  *
4234  * This operation is complicated by the fact that entries other the the
4235  * target may be inserted, loaded, relocated, or removed from the cache
4236  * (either by eviction or the take ownership flag) as the result of a
4237  * pre_serialize or serialize callback. While entry removals are not
4238  * a problem for the scan of the index, insertions, loads, and relocations
4239  * are. Hence the entries loaded, inserted, and relocated counters
4240  * listed below have been implemented to allow these conditions to be
4241  * detected and dealt with by restarting the scan.
4242  *
4243  * The serialization operation is further complicated by the fact that
4244  * the flush dependency height of a given entry may increase (as the
4245  * result of an entry load or insert) or decrease (as the result of an
4246  * entry removal -- via either eviction or the take ownership flag). The
4247  * entry_fd_height_change_counter field is maintained to allow detection
4248  * of this condition, and a restart of the scan when it occurs.
4249  *
4250  * Note that all these new fields would work just as well as booleans.
4251  *
4252  * entries_loaded_counter: Number of entries loaded into the cache
4253  * since the last time this field was reset.
4254  *
4255  * entries_inserted_counter: Number of entries inserted into the cache
4256  * since the last time this field was reset.
4257  *
4258  * entries relocated_counter: Number of entries whose base address has
4259  * been changed since the last time this field was reset.
4260  *
4261  * entry_fd_height_change_counter: Number of entries whose flush dependency
4262  * height has changed since the last time this field was reset.
4263  *
4264  * The following fields are used assemble the cache image prior to
4265  * writing it to disk.
4266  *
4267  * num_entries_in_image: Unsigned integer field containing the number of entries
4268  * to be copied into the metadata cache image. Note that
4269  * this value will be less than the number of entries in
4270  * the cache, and the superblock and its related entries
4271  * are not written to the metadata cache image.
4272  *
4273  * image_entries: Pointer to a dynamically allocated array of instance of
4274  * H5C_image_entry_t of length num_entries_in_image, or NULL
4275  * if that array does not exist. This array is used to
4276  * assemble entry data to be included in the image, and to
4277  * sort them by flush dependency height and LRU rank.
4278  *
4279  * image_buffer: Pointer to the dynamically allocated buffer of length
4280  * image_len in which the metadata cache image is assembled,
4281  * or NULL if that buffer does not exist.
4282  *
4283  *
4284  * Free Space Manager Related fields:
4285  *
4286  * The free space managers must be informed when we are about to close
4287  * or flush the file so that they order themselves accordingly. This used
4288  * to be done much later in the close process, but with cache image and
4289  * page buffering, this is no longer viable, as we must finalize the on
4290  * disk image of all metadata much sooner.
4291  *
4292  * This is handled by the H5MF_settle_raw_data_fsm() and
4293  * H5MF_settle_meta_data_FSM() routines. As these calls are expensive,
4294  * the following fields are used to track whether the target free space
4295  * managers are clean.
4296  *
4297  * They are also used in sanity checking, as once a free space manager is
4298  * settled, it should not become unsettled (i.e. be asked to allocate or
4299  * free file space) either ever (in the case of a file close) or until the
4300  * flush is complete.
4301  *
4302  * rdfsm_settled: Boolean flag indicating whether the raw data free space
4303  * manager is settled -- i.e. whether the correct space has
4304  * been allocated for it in the file.
4305  *
4306  * Note that the name of this field is deceptive. In the
4307  * multi file case, the flag applies to all free space
4308  * managers that are not involved in allocating space for
4309  * free space manager metadata.
4310  *
4311  * mdfsm_settled: Boolean flag indicating whether the meta data free space
4312  * manager is settled -- i.e. whether the correct space has
4313  * been allocated for it in the file.
4314  *
4315  * Note that the name of this field is deceptive. In the
4316  * multi file case, the flag applies only to free space
4317  * managers that are involved in allocating space for free
4318  * space managers.
4319  *
4320  *
4321  * Statistics collection fields:
4322  *
4323  * When enabled, these fields are used to collect statistics as described
4324  * below. The first set are collected only when H5C_COLLECT_CACHE_STATS
4325  * is true.
4326  *
4327  * hits: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
4328  * are used to record the number of times an entry with type id
4329  * equal to the array index has been in cache when requested in
4330  * the current epoch.
4331  *
4332  * misses: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
4333  * are used to record the number of times an entry with type id
4334  * equal to the array index has not been in cache when
4335  * requested in the current epoch.
4336  *
4337  * write_protects: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The
4338  * cells are used to record the number of times an entry with
4339  * type id equal to the array index has been write protected
4340  * in the current epoch.
4341  *
4342  * Observe that (hits + misses) = (write_protects + read_protects).
4343  *
4344  * read_protects: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The
4345  * cells are used to record the number of times an entry with
4346  * type id equal to the array index has been read protected in
4347  * the current epoch.
4348  *
4349  * Observe that (hits + misses) = (write_protects + read_protects).
4350  *
4351  * max_read_protects: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1.
4352  * The cells are used to maximum number of simultaneous read
4353  * protects on any entry with type id equal to the array index
4354  * in the current epoch.
4355  *
4356  * insertions: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
4357  * are used to record the number of times an entry with type
4358  * id equal to the array index has been inserted into the
4359  * cache in the current epoch.
4360  *
4361  * pinned_insertions: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
4362  * The cells are used to record the number of times an entry
4363  * with type id equal to the array index has been inserted
4364  * pinned into the cache in the current epoch.
4365  *
4366  * clears: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
4367  * are used to record the number of times a dirty entry with type
4368  * id equal to the array index has been cleared in the current
4369  * epoch.
4370  *
4371  * flushes: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
4372  * are used to record the number of times an entry with type id
4373  * equal to the array index has been written to disk in the
4374  * current epoch.
4375  *
4376  * evictions: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
4377  * are used to record the number of times an entry with type id
4378  * equal to the array index has been evicted from the cache in
4379  * the current epoch.
4380  *
4381  * take_ownerships: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The
4382  * cells are used to record the number of times an entry with
4383  * type id equal to the array index has been removed from the
4384  * cache via the H5C__TAKE_OWNERSHIP_FLAG in the current epoch.
4385  *
4386  * moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
4387  * are used to record the number of times an entry with type
4388  * id equal to the array index has been moved in the current
4389  * epoch.
4390  *
4391  * entry_flush_moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
4392  * The cells are used to record the number of times an entry
4393  * with type id equal to the array index has been moved
4394  * during its pre-serialize callback in the current epoch.
4395  *
4396  * cache_flush_moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
4397  * The cells are used to record the number of times an entry
4398  * with type id equal to the array index has been moved
4399  * during a cache flush in the current epoch.
4400  *
4401  * pins: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
4402  * are used to record the number of times an entry with type
4403  * id equal to the array index has been pinned in the current
4404  * epoch.
4405  *
4406  * unpins: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
4407  * are used to record the number of times an entry with type
4408  * id equal to the array index has been unpinned in the current
4409  * epoch.
4410  *
4411  * dirty_pins: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
4412  * are used to record the number of times an entry with type
4413  * id equal to the array index has been marked dirty while pinned
4414  * in the current epoch.
4415  *
4416  * pinned_flushes: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The
4417  * cells are used to record the number of times an entry
4418  * with type id equal to the array index has been flushed while
4419  * pinned in the current epoch.
4420  *
4421  * pinned_clears: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The
4422  * cells are used to record the number of times an entry
4423  * with type id equal to the array index has been cleared while
4424  * pinned in the current epoch.
4425  *
4426  * size_increases: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
4427  * The cells are used to record the number of times an entry
4428  * with type id equal to the array index has increased in
4429  * size in the current epoch.
4430  *
4431  * size_decreases: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
4432  * The cells are used to record the number of times an entry
4433  * with type id equal to the array index has decreased in
4434  * size in the current epoch.
4435  *
4436  * entry_flush_size_changes: Array of int64 of length
4437  * H5C__MAX_NUM_TYPE_IDS + 1. The cells are used to record
4438  * the number of times an entry with type id equal to the
4439  * array index has changed size while in its pre-serialize
4440  * callback.
4441  *
4442  * cache_flush_size_changes: Array of int64 of length
4443  * H5C__MAX_NUM_TYPE_IDS + 1. The cells are used to record
4444  * the number of times an entry with type id equal to the
4445  * array index has changed size during a cache flush
4446  *
4447  * total_ht_insertions: Number of times entries have been inserted into the
4448  * hash table in the current epoch.
4449  *
4450  * total_ht_deletions: Number of times entries have been deleted from the
4451  * hash table in the current epoch.
4452  *
4453  * successful_ht_searches: int64 containing the total number of successful
4454  * searches of the hash table in the current epoch.
4455  *
4456  * total_successful_ht_search_depth: int64 containing the total number of
4457  * entries other than the targets examined in successful
4458  * searches of the hash table in the current epoch.
4459  *
4460  * failed_ht_searches: int64 containing the total number of unsuccessful
4461  * searches of the hash table in the current epoch.
4462  *
4463  * total_failed_ht_search_depth: int64 containing the total number of
4464  * entries examined in unsuccessful searches of the hash
4465  * table in the current epoch.
4466  *
4467  * max_index_len: Largest value attained by the index_len field in the
4468  * current epoch.
4469  *
4470  * max_index_size: Largest value attained by the index_size field in the
4471  * current epoch.
4472  *
4473  * max_clean_index_size: Largest value attained by the clean_index_size field
4474  * in the current epoch.
4475  *
4476  * max_dirty_index_size: Largest value attained by the dirty_index_size field
4477  * in the current epoch.
4478  *
4479  * max_slist_len: Largest value attained by the slist_len field in the
4480  * current epoch.
4481  *
4482  * max_slist_size: Largest value attained by the slist_size field in the
4483  * current epoch.
4484  *
4485  * max_pl_len: Largest value attained by the pl_len field in the
4486  * current epoch.
4487  *
4488  * max_pl_size: Largest value attained by the pl_size field in the
4489  * current epoch.
4490  *
4491  * max_pel_len: Largest value attained by the pel_len field in the
4492  * current epoch.
4493  *
4494  * max_pel_size: Largest value attained by the pel_size field in the
4495  * current epoch.
4496  *
4497  * calls_to_msic: Total number of calls to H5C__make_space_in_cache
4498  *
4499  * total_entries_skipped_in_msic: Number of clean entries skipped while
4500  * enforcing the min_clean_fraction in H5C__make_space_in_cache().
4501  *
4502  * total_dirty_pf_entries_skipped_in_msic: Number of dirty prefetched entries
4503  * skipped in H5C__make_space_in_cache(). Note that this can
4504  * only occur when a file is opened R/O with a cache image
4505  * containing dirty entries.
4506  *
4507  * total_entries_scanned_in_msic: Number of clean entries skipped while
4508  * enforcing the min_clean_fraction in H5C__make_space_in_cache().
4509  *
4510  * max_entries_skipped_in_msic: Maximum number of clean entries skipped
4511  * in any one call to H5C__make_space_in_cache().
4512  *
4513  * max_dirty_pf_entries_skipped_in_msic: Maximum number of dirty prefetched
4514  * entries skipped in any one call to H5C__make_space_in_cache().
4515  * Note that this can only occur when the file is opened
4516  * R/O with a cache image containing dirty entries.
4517  *
4518  * max_entries_scanned_in_msic: Maximum number of entries scanned over
4519  * in any one call to H5C__make_space_in_cache().
4520  *
4521  * entries_scanned_to_make_space: Number of entries scanned only when looking
4522  * for entries to evict in order to make space in cache.
4523  *
4524  *
4525  * The following fields track statistics on cache images.
4526  *
4527  * images_created: Integer field containing the number of cache images
4528  * created since the last time statistics were reset.
4529  *
4530  * At present, this field must always be either 0 or 1.
4531  * Further, since cache images are only created at file
4532  * close, this field should only be set at that time.
4533  *
4534  * images_read: Integer field containing the number of cache images
4535  * read from file. Note that reading an image is different
4536  * from loading it -- reading the image means just that,
4537  * while loading the image refers to decoding it and loading
4538  * it into the metadata cache.
4539  *
4540  * In the serial case, image_read should always equal
4541  * images_loaded. However, in the parallel case, the
4542  * image should only be read by process 0. All other
4543  * processes should receive the cache image via a broadcast
4544  * from process 0.
4545  *
4546  * images_loaded: Integer field containing the number of cache images
4547  * loaded since the last time statistics were reset.
4548  *
4549  * At present, this field must always be either 0 or 1.
4550  * Further, since cache images are only loaded at the
4551  * time of the first protect or on file close, this value
4552  * should only change on those events.
4553  *
4554  * last_image_size: Size of the most recently loaded metadata cache image
4555  * loaded into the cache, or zero if no image has been
4556  * loaded.
4557  *
4558  * At present, at most one cache image can be loaded into
4559  * the metadata cache for any given file, and this image
4560  * will be loaded either on the first protect, or on file
4561  * close if no entry is protected before then.
4562  *
4563  *
4564  * Fields for tracking prefetched entries. Note that flushes and evictions
4565  * of prefetched entries are tracked in the flushes and evictions arrays
4566  * discused above.
4567  *
4568  * prefetches: Number of prefetched entries that are loaded to the
4569  * cache.
4570  *
4571  * dirty_prefetches: Number of dirty prefetched entries that are loaded
4572  * into the cache.
4573  *
4574  * prefetch_hits: Number of prefetched entries that are actually used.
4575  *
4576  *
4577  * As entries are now capable of moving, loading, dirtying, and deleting
4578  * other entries in their pre_serialize and serialize callbacks, it has
4579  * been necessary to insert code to restart scans of lists so as to avoid
4580  * improper behavior if the next entry in the list is the target of one on
4581  * these operations.
4582  *
4583  * The following fields are use to count such occurrences. They are used
4584  * both in tests (to verify that the scan has been restarted), and to
4585  * obtain estimates of how frequently these restarts occur.
4586  *
4587  * slist_scan_restarts: Number of times a scan of the slist (that contains
4588  * calls to H5C__flush_single_entry()) has been restarted to
4589  * avoid potential issues with change of status of the next
4590  * entry in the scan.
4591  *
4592  * LRU_scan_restarts: Number of times a scan of the LRU list (that contains
4593  * calls to H5C__flush_single_entry()) has been restarted to
4594  * avoid potential issues with change of status of the next
4595  * entry in the scan.
4596  *
4597  * index_scan_restarts: Number of times a scan of the index has been
4598  * restarted to avoid potential issues with load, insertion
4599  * or change in flush dependency height of an entry other
4600  * than the target entry as the result of call(s) to the
4601  * pre_serialize or serialize callbacks.
4602  *
4603  * Note that at present, this condition can only be triggered
4604  * by a call to H5C_serialize_single_entry().
4605  *
4606  * The remaining stats are collected only when both H5C_COLLECT_CACHE_STATS
4607  * and H5C_COLLECT_CACHE_ENTRY_STATS are true.
4608  *
4609  * max_accesses: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
4610  * are used to record the maximum number of times any single
4611  * entry with type id equal to the array index has been
4612  * accessed in the current epoch.
4613  *
4614  * min_accesses: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
4615  * are used to record the minimum number of times any single
4616  * entry with type id equal to the array index has been
4617  * accessed in the current epoch.
4618  *
4619  * max_clears: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
4620  * are used to record the maximum number of times any single
4621  * entry with type id equal to the array index has been cleared
4622  * in the current epoch.
4623  *
4624  * max_flushes: Array of int32 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
4625  * are used to record the maximum number of times any single
4626  * entry with type id equal to the array index has been
4627  * flushed in the current epoch.
4628  *
4629  * max_size: Array of size_t of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
4630  * are used to record the maximum size of any single entry
4631  * with type id equal to the array index that has resided in
4632  * the cache in the current epoch.
4633  *
4634  * max_pins: Array of size_t of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
4635  * are used to record the maximum number of times that any single
4636  * entry with type id equal to the array index that has been
4637  * marked as pinned in the cache in the current epoch.
4638  *
4639  *
4640  * Fields supporting testing:
4641  *
4642  * prefix Array of char used to prefix debugging output. The
4643  * field is intended to allow marking of output of with
4644  * the processes mpi rank.
4645  *
4646  * get_entry_ptr_from_addr_counter: Counter used to track the number of
4647  * times the H5C_get_entry_ptr_from_addr() function has been
4648  * called successfully. This field is only defined when
4649  * NDEBUG is not #defined.
4650  *
4651  ****************************************************************************/
4652 struct H5C_t {
4653  uint32_t magic;
4656  void * aux_ptr;
4657  int32_t max_type_id;
4658  const H5C_class_t * const *class_table_ptr;
4659  size_t max_cache_size;
4660  size_t min_clean_size;
4666 
4667  /* Fields for maintaining [hash table] index of entries */
4669  size_t index_size;
4672  size_t clean_index_size;
4674  size_t dirty_index_size;
4677  uint32_t il_len;
4678  size_t il_size;
4681 
4682  /* Fields to detect entries removed during scans */
4686 
4687  /* Fields for maintaining list of in-order entries, for flushing */
4690  size_t slist_size;
4693  H5SL_t * slist_ptr;
4695 #if H5C_DO_SANITY_CHECKS
4696  int32_t slist_len_increase;
4698 #endif /* H5C_DO_SANITY_CHECKS */
4699 
4700  /* Fields for maintaining list of tagged entries */
4701  H5SL_t * tag_list;
4704 
4705  /* Fields for tracking protected entries */
4706  uint32_t pl_len;
4707  size_t pl_size;
4710 
4711  /* Fields for tracking pinned entries */
4712  uint32_t pel_len;
4713  size_t pel_size;
4716 
4717  /* Fields for complete LRU list of entries */
4719  size_t LRU_list_size;
4722 
4723 #if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
4724  /* Fields for clean LRU list of entries */
4725  uint32_t cLRU_list_len;
4726  size_t cLRU_list_size;
4727  H5C_cache_entry_t * cLRU_head_ptr;
4728  H5C_cache_entry_t * cLRU_tail_ptr;
4729 
4730  /* Fields for dirty LRU list of entries */
4731  uint32_t dLRU_list_len;
4732  size_t dLRU_list_size;
4733  H5C_cache_entry_t * dLRU_head_ptr;
4734  H5C_cache_entry_t * dLRU_tail_ptr;
4735 #endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
4736 
4737 #ifdef H5_HAVE_PARALLEL
4738  /* Fields for collective metadata reads */
4739  uint32_t coll_list_len;
4740  size_t coll_list_size;
4741  H5C_cache_entry_t * coll_head_ptr;
4742  H5C_cache_entry_t * coll_tail_ptr;
4743 
4744  /* Fields for collective metadata writes */
4745  H5SL_t * coll_write_list;
4746 #endif /* H5_HAVE_PARALLEL */
4747 
4748  /* Fields for automatic cache size adjustment */
4759 
4760  /* Fields for epoch markers used in automatic cache size adjustment */
4761  int32_t epoch_markers_active;
4765  int32_t epoch_marker_ringbuf_last;
4766  int32_t epoch_marker_ringbuf_size;
4768 
4769  /* Fields for cache hit rate collection */
4772 
4773  /* fields supporting generation of a cache image on file close */
4788  void * image_buffer;
4789 
4790  /* Free Space Manager Related fields */
4793 
4794 #if H5C_COLLECT_CACHE_STATS
4795  /* stats fields */
4819 
4820  /* Fields for hash table operations */
4828  size_t max_index_size;
4829  size_t max_clean_index_size;
4830  size_t max_dirty_index_size;
4831 
4832  /* Fields for in-order skip list */
4834  size_t max_slist_size;
4835 
4836  /* Fields for protected entry list */
4838  size_t max_pl_size;
4839 
4840  /* Fields for pinned entry list */
4842  size_t max_pel_size;
4843 
4844  /* Fields for tracking 'make space in cache' (msic) operations */
4853 
4854  /* Fields for tracking skip list scan restarts */
4858 
4859  /* Fields for tracking cache image operations */
4860  int32_t images_created;
4861  int32_t images_read;
4862  int32_t images_loaded;
4864 
4865  /* Fields for tracking prefetched entries */
4869 
4870 #if H5C_COLLECT_CACHE_ENTRY_STATS
4871  int32_t max_accesses[H5C__MAX_NUM_TYPE_IDS + 1];
4872  int32_t min_accesses[H5C__MAX_NUM_TYPE_IDS + 1];
4873  int32_t max_clears[H5C__MAX_NUM_TYPE_IDS + 1];
4874  int32_t max_flushes[H5C__MAX_NUM_TYPE_IDS + 1];
4875  size_t max_size[H5C__MAX_NUM_TYPE_IDS + 1];
4876  int32_t max_pins[H5C__MAX_NUM_TYPE_IDS + 1];
4877 #endif /* H5C_COLLECT_CACHE_ENTRY_STATS */
4878 #endif /* H5C_COLLECT_CACHE_STATS */
4879 
4880  char prefix[H5C__PREFIX_LEN];
4881 
4882 #ifndef NDEBUG
4884 #endif /* NDEBUG */
4885 };
4886 
4887 /* Define typedef for tagged cache entry iteration callbacks */
4888 typedef int (*H5C_tag_iter_cb_t)(H5C_cache_entry_t *entry, void *ctx);
4889 
4890 
4891 /*****************************/
4892 /* Package Private Variables */
4893 /*****************************/
4894 
4895 
4896 /******************************/
4897 /* Package Private Prototypes */
4898 /******************************/
4901  H5C_cache_entry_t** entry_ptr_ptr, const H5C_class_t * type, haddr_t addr,
4902  void * udata);
4903 
4904 /* General routines */
4906  unsigned flags);
4911 H5_DLL herr_t H5C__make_space_in_cache(H5F_t * f, size_t space_needed,
4912  hbool_t write_permitted);
4915  H5C_cache_entry_t *entry_ptr);
4917 H5_DLL herr_t H5C__iter_tagged_entries(H5C_t *cache, haddr_t tag, hbool_t match_global,
4918  H5C_tag_iter_cb_t cb, void *cb_ctx);
4919 
4920 /* Routines for operating on entry tags */
4921 H5_DLL herr_t H5C__tag_entry(H5C_t * cache_ptr, H5C_cache_entry_t * entry_ptr);
4923 
4924 /* Testing functions */
4925 #ifdef H5C_TESTING
4927 #endif /* H5C_TESTING */
4928 
4929 #endif /* _H5Cpkg_H */
4930 
H5C_t::min_clean_size
size_t min_clean_size
Definition: H5Cpkg.h:5845
H5C_t::epoch_marker_ringbuf_size
int32_t epoch_marker_ringbuf_size
Definition: H5Cpkg.h:5951
H5C_t::image_entries
H5C_image_entry_t * image_entries
Definition: H5Cpkg.h:5972
H5C__deserialize_prefetched_entry
H5_DLL herr_t H5C__deserialize_prefetched_entry(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t **entry_ptr_ptr, const H5C_class_t *type, haddr_t addr, void *udata)
Definition: H5Cimage.c:467
H5C_t::il_head
H5C_cache_entry_t * il_head
Definition: H5Cpkg.h:5864
H5C_t::evictions
int64_t evictions[H5C__MAX_NUM_TYPE_IDS+1]
Definition: H5Cpkg.h:5990
H5SL_t
Definition: H5SL.c:557
H5C_t::pel_tail_ptr
H5C_cache_entry_t * pel_tail_ptr
Definition: H5Cpkg.h:5900
H5C_t::slist_ring_size
size_t slist_ring_size[H5C_RING_NTYPES]
Definition: H5Cpkg.h:5877
H5C_t::flash_size_increase_possible
hbool_t flash_size_increase_possible
Definition: H5Cpkg.h:5935
f
hdr f
Definition: H5EA.c:755
H5C__flush_marked_entries
H5_DLL herr_t H5C__flush_marked_entries(H5F_t *f)
Definition: H5C.c:7619
H5C_t::index
H5C_cache_entry_t * index[H5C__HASH_TABLE_LEN]
Definition: H5Cpkg.h:5861
H5C_t::max_read_protects
int32_t max_read_protects[H5C__MAX_NUM_TYPE_IDS+1]
Definition: H5Cpkg.h:5985
H5C_t::failed_ht_searches
int64_t failed_ht_searches
Definition: H5Cpkg.h:6010
H5C_t::log_flush
H5C_log_flush_func_t log_flush
Definition: H5Cpkg.h:5848
H5C__verify_cork_tag_test
herr_t H5C__verify_cork_tag_test(hid_t fid, H5O_token_t tag_token, hbool_t status)
Definition: H5Ctest.c:136
H5C_t::resize_in_progress
hbool_t resize_in_progress
Definition: H5Cpkg.h:5941
H5C_t::clean_index_ring_size
size_t clean_index_ring_size[H5C_RING_NTYPES]
Definition: H5Cpkg.h:5858
H5C_t::entry_fd_height_change_counter
int64_t entry_fd_height_change_counter
Definition: H5Cpkg.h:5970
H5C_t::last_entry_removed_ptr
H5C_cache_entry_t * last_entry_removed_ptr
Definition: H5Cpkg.h:5869
H5C_t::max_slist_size
size_t max_slist_size
Definition: H5Cpkg.h:6019
H5C_t::cache_flush_moves
int64_t cache_flush_moves[H5C__MAX_NUM_TYPE_IDS+1]
Definition: H5Cpkg.h:5994
H5C_t::serialization_in_progress
hbool_t serialization_in_progress
Definition: H5Cpkg.h:5960
H5C_t::max_flushes
int32_t max_flushes[H5C__MAX_NUM_TYPE_IDS+1]
Definition: H5Cpkg.h:6059
H5C_t::resize_enabled
hbool_t resize_enabled
Definition: H5Cpkg.h:5938
H5C_tag_info_t::head
H5C_cache_entry_t * head
Definition: H5Cpkg.h:3465
H5C_t::max_clean_index_size
size_t max_clean_index_size
Definition: H5Cpkg.h:6014
H5C_t::flash_size_increase_threshold
size_t flash_size_increase_threshold
Definition: H5Cpkg.h:5936
H5C_RING_NTYPES
#define H5C_RING_NTYPES
Definition: H5Cprivate.h:962
H5C_t
Definition: H5Cpkg.h:4642
H5C_t::slist_ptr
H5SL_t * slist_ptr
Definition: H5Cpkg.h:5878
H5C_t::take_ownerships
int64_t take_ownerships[H5C__MAX_NUM_TYPE_IDS+1]
Definition: H5Cpkg.h:5991
H5C__mark_flush_dep_unserialized
H5_DLL herr_t H5C__mark_flush_dep_unserialized(H5C_cache_entry_t *entry_ptr)
Definition: H5C.c:7904
H5C_t::epoch_marker_ringbuf
int32_t epoch_marker_ringbuf[H5C__MAX_EPOCH_MARKERS+1]
Definition: H5Cpkg.h:5948
H5C_t::log_info
H5C_log_info_t * log_info
Definition: H5Cpkg.h:5840
H5C_t::clears
int64_t clears[H5C__MAX_NUM_TYPE_IDS+1]
Definition: H5Cpkg.h:5988
uint32_t
uint32_t
Definition: H5overflow.txt:38
H5C__generate_image
H5_DLL herr_t H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr)
Definition: H5C.c:8506
H5C_t::il_size
size_t il_size
Definition: H5Cpkg.h:5863
H5C_t::entry_flush_moves
int64_t entry_flush_moves[H5C__MAX_NUM_TYPE_IDS+1]
Definition: H5Cpkg.h:5993
haddr_t
CATCH haddr_t
Definition: H5EAdblock.c:162
H5C_t::images_read
int32_t images_read
Definition: H5Cpkg.h:6046
H5C_t::entries_relocated_counter
int64_t entries_relocated_counter
Definition: H5Cpkg.h:5969
H5C_t::unpins
int64_t unpins[H5C__MAX_NUM_TYPE_IDS+1]
Definition: H5Cpkg.h:5996
H5C_t::delete_image
hbool_t delete_image
Definition: H5Cpkg.h:5963
H5C__untag_entry
H5_DLL herr_t H5C__untag_entry(H5C_t *cache, H5C_cache_entry_t *entry)
Definition: H5Ctag.c:306
H5C_t::slist_changed
hbool_t slist_changed
Definition: H5Cpkg.h:5873
H5C_t::msic_in_progress
hbool_t msic_in_progress
Definition: H5Cpkg.h:5942
H5C_t::max_pins
int32_t max_pins[H5C__MAX_NUM_TYPE_IDS+1]
Definition: H5Cpkg.h:6061
H5Clog.h
H5C_t::cache_hits
int64_t cache_hits
Definition: H5Cpkg.h:5955
H5C_t::slist_len_increase
int32_t slist_len_increase
Definition: H5Cpkg.h:5881
H5C_t::entries_removed_counter
int64_t entries_removed_counter
Definition: H5Cpkg.h:5868
H5C_t::index_size
size_t index_size
Definition: H5Cpkg.h:5854
H5C_t::prefetch_hits
int64_t prefetch_hits
Definition: H5Cpkg.h:6053
H5C_t::insertions
int64_t insertions[H5C__MAX_NUM_TYPE_IDS+1]
Definition: H5Cpkg.h:5986
H5C_t::max_entries_skipped_in_msic
int32_t max_entries_skipped_in_msic
Definition: H5Cpkg.h:6034
H5C_t::pl_size
size_t pl_size
Definition: H5Cpkg.h:5892
H5C_t::slist_scan_restarts
int64_t slist_scan_restarts
Definition: H5Cpkg.h:6040
H5C_t::cache_accesses
int64_t cache_accesses
Definition: H5Cpkg.h:5956
H5C_t::last_image_size
hsize_t last_image_size
Definition: H5Cpkg.h:6048
H5C_t::total_entries_scanned_in_msic
int64_t total_entries_scanned_in_msic
Definition: H5Cpkg.h:6033
H5C_t::flushes
int64_t flushes[H5C__MAX_NUM_TYPE_IDS+1]
Definition: H5Cpkg.h:5989
H5C_t::misses
int64_t misses[H5C__MAX_NUM_TYPE_IDS+1]
Definition: H5Cpkg.h:5982
H5C_t::max_index_len
uint32_t max_index_len
Definition: H5Cpkg.h:6012
H5C__tag_entry
H5_DLL herr_t H5C__tag_entry(H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr)
Definition: H5Ctag.c:219
int64_t
int64_t
Definition: H5overflow.txt:35
H5C_t::aux_ptr
void * aux_ptr
Definition: H5Cpkg.h:5841
H5C_t::size_decreased
hbool_t size_decreased
Definition: H5Cpkg.h:5940
H5C__load_cache_image
H5_DLL herr_t H5C__load_cache_image(H5F_t *f)
Definition: H5Cimage.c:1117
H5C_t::max_type_id
int32_t max_type_id
Definition: H5Cpkg.h:5842
H5C_t::size_increases
int64_t size_increases[H5C__MAX_NUM_TYPE_IDS+1]
Definition: H5Cpkg.h:6000
H5C_t::total_successful_ht_search_depth
int64_t total_successful_ht_search_depth
Definition: H5Cpkg.h:6009
H5C_t::image_data_len
hsize_t image_data_len
Definition: H5Cpkg.h:5966
H5C_t::image_addr
haddr_t image_addr
Definition: H5Cpkg.h:5964
H5C_t::total_failed_ht_search_depth
int64_t total_failed_ht_search_depth
Definition: H5Cpkg.h:6011
H5C_cache_image_ctl_t
Definition: H5Cprivate.h:2214
H5C_t::index_len
uint32_t index_len
Definition: H5Cpkg.h:5853
H5C_t::entry_flush_size_changes
int64_t entry_flush_size_changes[H5C__MAX_NUM_TYPE_IDS+1]
Definition: H5Cpkg.h:6002
H5C_t::num_objs_corked
uint32_t num_objs_corked
Definition: H5Cpkg.h:5888
H5C_t::write_protects
int64_t write_protects[H5C__MAX_NUM_TYPE_IDS+1]
Definition: H5Cpkg.h:5983
H5C_t::images_loaded
int32_t images_loaded
Definition: H5Cpkg.h:6047
H5C_t::moves
int64_t moves[H5C__MAX_NUM_TYPE_IDS+1]
Definition: H5Cpkg.h:5992
H5C_t::mdfsm_settled
hbool_t mdfsm_settled
Definition: H5Cpkg.h:5977
H5C__serialize_cache
H5_DLL herr_t H5C__serialize_cache(H5F_t *f)
Definition: H5C.c:8013
H5C_t::hits
int64_t hits[H5C__MAX_NUM_TYPE_IDS+1]
Definition: H5Cpkg.h:5981
H5C_t::epoch_markers
H5C_cache_entry_t epoch_markers[H5C__MAX_EPOCH_MARKERS]
Definition: H5Cpkg.h:5952
H5C_t::total_entries_skipped_in_msic
int64_t total_entries_skipped_in_msic
Definition: H5Cpkg.h:6031
H5C_t::close_warning_received
hbool_t close_warning_received
Definition: H5Cpkg.h:5850
H5C_t::dirty_index_ring_size
size_t dirty_index_ring_size[H5C_RING_NTYPES]
Definition: H5Cpkg.h:5860
H5C_log_flush_func_t
herr_t(* H5C_log_flush_func_t)(H5C_t *cache_ptr, haddr_t addr, hbool_t was_dirty, unsigned flags)
Definition: H5Cprivate.h:907
H5C_t::pl_head_ptr
H5C_cache_entry_t * pl_head_ptr
Definition: H5Cpkg.h:5893
H5SLprivate.h
H5C__flush_single_entry
H5_DLL herr_t H5C__flush_single_entry(H5F_t *f, H5C_cache_entry_t *entry_ptr, unsigned flags)
Definition: H5C.c:5927
hid_t
int64_t hid_t
Definition: H5Ipublic.h:55
H5C_t::max_clears
int32_t max_clears[H5C__MAX_NUM_TYPE_IDS+1]
Definition: H5Cpkg.h:6058
H5C_t::flush_in_progress
hbool_t flush_in_progress
Definition: H5Cpkg.h:5839
H5C_t::entry_watched_for_removal
H5C_cache_entry_t * entry_watched_for_removal
Definition: H5Cpkg.h:5870
H5C_t::num_last_entries
uint32_t num_last_entries
Definition: H5Cpkg.h:5879
H5C_t::successful_ht_searches
int64_t successful_ht_searches
Definition: H5Cpkg.h:6008
H5C_t::ignore_tags
hbool_t ignore_tags
Definition: H5Cpkg.h:5887
H5C_t::total_ht_deletions
int64_t total_ht_deletions
Definition: H5Cpkg.h:6007
H5C_t::total_ht_insertions
int64_t total_ht_insertions
Definition: H5Cpkg.h:6006
H5C__generate_cache_image
H5_DLL herr_t H5C__generate_cache_image(H5F_t *f, H5C_t *cache_ptr)
Definition: H5Cimage.c:392
H5Cprivate.h
H5C_t::size_decreases
int64_t size_decreases[H5C__MAX_NUM_TYPE_IDS+1]
Definition: H5Cpkg.h:6001
H5C_t::il_len
uint32_t il_len
Definition: H5Cpkg.h:5862
H5C_t::pins
int64_t pins[H5C__MAX_NUM_TYPE_IDS+1]
Definition: H5Cpkg.h:5995
H5C__MAX_EPOCH_MARKERS
#define H5C__MAX_EPOCH_MARKERS
Definition: H5Cpkg.h:48
H5C_tag_iter_cb_t
int(* H5C_tag_iter_cb_t)(H5C_cache_entry_t *entry, void *ctx)
Definition: H5Cpkg.h:4878
H5C__HASH_TABLE_LEN
#define H5C__HASH_TABLE_LEN
Definition: H5Cpkg.h:51
H5C_t::LRU_list_size
size_t LRU_list_size
Definition: H5Cpkg.h:5904
int
CATCH int
Definition: H5EA.c:1002
H5C_t::cache_full
hbool_t cache_full
Definition: H5Cpkg.h:5939
H5C_t::tag_list
H5SL_t * tag_list
Definition: H5Cpkg.h:5886
H5C_t::image_buffer
void * image_buffer
Definition: H5Cpkg.h:5973
H5C_t::index_scan_restarts
int64_t index_scan_restarts
Definition: H5Cpkg.h:6042
cb_ctx
hdr cb_ctx
Definition: H5EAhdr.c:786
H5C_t::max_cache_size
size_t max_cache_size
Definition: H5Cpkg.h:5844
H5C_tag_info_t
struct H5C_tag_info_t H5C_tag_info_t
H5C_t::max_accesses
int32_t max_accesses[H5C__MAX_NUM_TYPE_IDS+1]
Definition: H5Cpkg.h:6056
H5C_t::entries_loaded_counter
int64_t entries_loaded_counter
Definition: H5Cpkg.h:5967
H5C_t::slist_len
uint32_t slist_len
Definition: H5Cpkg.h:5874
H5C_tag_info_t::entry_cnt
size_t entry_cnt
Definition: H5Cpkg.h:3466
H5C_t::LRU_tail_ptr
H5C_cache_entry_t * LRU_tail_ptr
Definition: H5Cpkg.h:5906
H5C__MAX_NUM_TYPE_IDS
#define H5C__MAX_NUM_TYPE_IDS
Definition: H5Cprivate.h:42
H5C_t::pinned_insertions
int64_t pinned_insertions[H5C__MAX_NUM_TYPE_IDS+1]
Definition: H5Cpkg.h:5987
H5C_tag_info_t::tag
haddr_t tag
Definition: H5Cpkg.h:3464
H5C_t::min_accesses
int32_t min_accesses[H5C__MAX_NUM_TYPE_IDS+1]
Definition: H5Cpkg.h:6057
H5C_t::image_loaded
hbool_t image_loaded
Definition: H5Cpkg.h:5962
H5C__iter_tagged_entries
H5_DLL herr_t H5C__iter_tagged_entries(H5C_t *cache, haddr_t tag, hbool_t match_global, H5C_tag_iter_cb_t cb, void *cb_ctx)
Definition: H5Ctag.c:427
H5C_t::max_entries_scanned_in_msic
int32_t max_entries_scanned_in_msic
Definition: H5Cpkg.h:6036
H5C_t::evictions_enabled
hbool_t evictions_enabled
Definition: H5Cpkg.h:5849
H5C_t::clean_index_size
size_t clean_index_size
Definition: H5Cpkg.h:5857
H5C_t::dirty_index_size
size_t dirty_index_size
Definition: H5Cpkg.h:5859
H5C_t::read_protects
int64_t read_protects[H5C__MAX_NUM_TYPE_IDS+1]
Definition: H5Cpkg.h:5984
H5C_t::index_ring_len
uint32_t index_ring_len[H5C_RING_NTYPES]
Definition: H5Cpkg.h:5855
H5C_class_t
Definition: H5Cprivate.h:887
H5C_t::pinned_clears
int64_t pinned_clears[H5C__MAX_NUM_TYPE_IDS+1]
Definition: H5Cpkg.h:5999
H5C_tag_info_t
Definition: H5Cpkg.h:3438
H5C_t::max_pl_len
uint32_t max_pl_len
Definition: H5Cpkg.h:6022
H5C_t::max_pel_len
uint32_t max_pel_len
Definition: H5Cpkg.h:6026
H5C_t::resize_ctl
H5C_auto_size_ctl_t resize_ctl
Definition: H5Cpkg.h:5943
H5C_t::calls_to_msic
int64_t calls_to_msic
Definition: H5Cpkg.h:6030
H5C_auto_size_ctl_t
Definition: H5Cprivate.h:2104
H5C_t::num_entries_in_image
uint32_t num_entries_in_image
Definition: H5Cpkg.h:5971
H5C_t::image_ctl
H5C_cache_image_ctl_t image_ctl
Definition: H5Cpkg.h:5959
H5C_t::image_len
hsize_t image_len
Definition: H5Cpkg.h:5965
H5C_t::slist_size_increase
int64_t slist_size_increase
Definition: H5Cpkg.h:5882
H5C_t::max_dirty_index_size
size_t max_dirty_index_size
Definition: H5Cpkg.h:6015
H5C_t::get_entry_ptr_from_addr_counter
int64_t get_entry_ptr_from_addr_counter
Definition: H5Cpkg.h:6068
H5C_t::max_slist_len
uint32_t max_slist_len
Definition: H5Cpkg.h:6018
H5C__make_space_in_cache
H5_DLL herr_t H5C__make_space_in_cache(H5F_t *f, size_t space_needed, hbool_t write_permitted)
Definition: H5C.c:6847
H5_DLL
#define H5_DLL
Definition: H5api_adpt.h:234
H5C_t::LRU_list_len
uint32_t LRU_list_len
Definition: H5Cpkg.h:5903
H5C_t::entries_scanned_to_make_space
int64_t entries_scanned_to_make_space
Definition: H5Cpkg.h:6037
H5C_t::LRU_head_ptr
H5C_cache_entry_t * LRU_head_ptr
Definition: H5Cpkg.h:5905
H5C_tag_info_t::corked
hbool_t corked
Definition: H5Cpkg.h:3467
H5C_write_permitted_func_t
herr_t(* H5C_write_permitted_func_t)(const H5F_t *f, hbool_t *write_permitted_ptr)
Definition: H5Cprivate.h:905
H5C_t::load_image
hbool_t load_image
Definition: H5Cpkg.h:5961
H5C_t::dirty_pins
int64_t dirty_pins[H5C__MAX_NUM_TYPE_IDS+1]
Definition: H5Cpkg.h:5997
H5C_t::size_decrease_possible
hbool_t size_decrease_possible
Definition: H5Cpkg.h:5937
H5C_t::epoch_marker_active
hbool_t epoch_marker_active[H5C__MAX_EPOCH_MARKERS]
Definition: H5Cpkg.h:5947
H5C_t::total_dirty_pf_entries_skipped_in_msic
int64_t total_dirty_pf_entries_skipped_in_msic
Definition: H5Cpkg.h:6032
H5C_t::prefetches
int64_t prefetches
Definition: H5Cpkg.h:6051
H5C_t::slist_ring_len
uint32_t slist_ring_len[H5C_RING_NTYPES]
Definition: H5Cpkg.h:5876
H5C__PREFIX_LEN
#define H5C__PREFIX_LEN
Definition: H5Cprivate.h:43
H5C_t::max_index_size
size_t max_index_size
Definition: H5Cpkg.h:6013
H5F_t
Definition: H5Fpkg.h:374
H5C_t::pel_size
size_t pel_size
Definition: H5Cpkg.h:5898
H5C_t::slist_size
size_t slist_size
Definition: H5Cpkg.h:5875
H5C_t::max_pel_size
size_t max_pel_size
Definition: H5Cpkg.h:6027
H5C_t::dirty_prefetches
int64_t dirty_prefetches
Definition: H5Cpkg.h:6052
H5C_t::max_size
size_t max_size[H5C__MAX_NUM_TYPE_IDS+1]
Definition: H5Cpkg.h:6060
H5C_t::il_tail
H5C_cache_entry_t * il_tail
Definition: H5Cpkg.h:5865
H5C_t::epoch_marker_ringbuf_first
int32_t epoch_marker_ringbuf_first
Definition: H5Cpkg.h:5949
H5C_t::epoch_markers_active
int32_t epoch_markers_active
Definition: H5Cpkg.h:5946
H5C_t::entries_inserted_counter
int64_t entries_inserted_counter
Definition: H5Cpkg.h:5968
H5C_t::epoch_marker_ringbuf_last
int32_t epoch_marker_ringbuf_last
Definition: H5Cpkg.h:5950
H5C_t::prefix
char prefix[H5C__PREFIX_LEN]
Definition: H5Cpkg.h:6065
H5C_t::pel_head_ptr
H5C_cache_entry_t * pel_head_ptr
Definition: H5Cpkg.h:5899
H5C_t::max_dirty_pf_entries_skipped_in_msic
int32_t max_dirty_pf_entries_skipped_in_msic
Definition: H5Cpkg.h:6035
herr_t
int herr_t
Definition: H5public.h:128
H5C_image_entry_t
Definition: H5Cprivate.h:1830
H5C_t::index_ring_size
size_t index_ring_size[H5C_RING_NTYPES]
Definition: H5Cpkg.h:5856
hbool_t
bool hbool_t
Definition: H5public.h:159
H5C_t::magic
uint32_t magic
Definition: H5Cpkg.h:5838
H5C_t::rdfsm_settled
hbool_t rdfsm_settled
Definition: H5Cpkg.h:5976
H5C__prep_image_for_file_close
H5_DLL herr_t H5C__prep_image_for_file_close(H5F_t *f, hbool_t *image_generated)
Definition: H5Cimage.c:1339
H5C_t::size_increase_possible
hbool_t size_increase_possible
Definition: H5Cpkg.h:5934
H5C_cache_entry_t
Definition: H5Cprivate.h:1597
H5C__mark_flush_dep_serialized
H5_DLL herr_t H5C__mark_flush_dep_serialized(H5C_cache_entry_t *entry_ptr)
Definition: H5C.c:7855
H5C_t::LRU_scan_restarts
int64_t LRU_scan_restarts
Definition: H5Cpkg.h:6041
H5C_t::max_pl_size
size_t max_pl_size
Definition: H5Cpkg.h:6023
hsize_t
hsize_t
Definition: H5overflow.txt:44
H5C_t::pl_len
uint32_t pl_len
Definition: H5Cpkg.h:5891
H5O_token_t
Definition: H5public.h:339
H5C_t::class_table_ptr
const H5C_class_t *const * class_table_ptr
Definition: H5Cpkg.h:5843
H5C_t::pel_len
uint32_t pel_len
Definition: H5Cpkg.h:5897
H5C_t::images_created
int32_t images_created
Definition: H5Cpkg.h:6045
H5C_t::check_write_permitted
H5C_write_permitted_func_t check_write_permitted
Definition: H5Cpkg.h:5846
H5C_t::cache_flush_size_changes
int64_t cache_flush_size_changes[H5C__MAX_NUM_TYPE_IDS+1]
Definition: H5Cpkg.h:6003
H5C_t::pl_tail_ptr
H5C_cache_entry_t * pl_tail_ptr
Definition: H5Cpkg.h:5894
H5C_t::write_permitted
hbool_t write_permitted
Definition: H5Cpkg.h:5847
H5C_log_info_t
Definition: H5Clog.h:69
H5C_t::pinned_flushes
int64_t pinned_flushes[H5C__MAX_NUM_TYPE_IDS+1]
Definition: H5Cpkg.h:5998