ref: d3b8847a0e06b130bade7e0365100fdf52efee33
parent: f574f150079d6c502ea17df323fe4a13b908f363
author: ngkaho1234 <[email protected]>
date: Mon Nov 23 08:26:56 EST 2015
ext4_bcache & ext4_blockdev: Buffer cache rework.
--- a/fs_test/common/test_lwext4.c
+++ b/fs_test/common/test_lwext4.c
@@ -136,30 +136,6 @@
printf("\n");
- uint32_t i;
- for (i = 0; i < bc->cnt; ++i) {
- printf("bcache->refctr[%" PRIu32 "]= %" PRIu32 "\n", i,
- bc->refctr[i]);
- }
-
- printf("\n");
- for (i = 0; i < bc->cnt; ++i) {
- printf("bcache->lru_id[%" PRIu32 "] = %" PRIu32 "\n", i,
- bc->lru_id[i]);
- }
-
- printf("\n");
- for (i = 0; i < bc->cnt; ++i) {
- printf("bcache->free_delay[%" PRIu32 "] = %d\n", i,
- bc->free_delay[i]);
- }
-
- printf("\n");
- for (i = 0; i < bc->cnt; ++i) {
- printf("bcache->lba[%" PRIu32 "] = %" PRIu32 "\n", i,
- (uint32_t)bc->lba[i]);
- }
-
printf("********************\n");
}
--- a/lwext4/ext4_bcache.c
+++ b/lwext4/ext4_bcache.c
@@ -42,6 +42,25 @@
#include <string.h>
#include <stdlib.h>
+static int
+ext4_bcache_lba_compare(struct ext4_buf *a,
+ struct ext4_buf *b)
+{
+ return a->lba - b->lba;
+}
+
+static int
+ext4_bcache_lru_compare(struct ext4_buf *a,
+ struct ext4_buf *b)
+{
+ return a->lru_id - b->lru_id;
+}
+
+RB_GENERATE_INTERNAL(ext4_buf_lba, ext4_buf, lba_node,
+ ext4_bcache_lba_compare, static inline)
+RB_GENERATE_INTERNAL(ext4_buf_lru, ext4_buf, lru_node,
+ ext4_bcache_lru_compare, static inline)
+
int ext4_bcache_init_dynamic(struct ext4_bcache *bc, uint32_t cnt,
uint32_t itemsize)
{
@@ -49,10 +68,6 @@
memset(bc, 0, sizeof(struct ext4_bcache));
- bc->data = malloc(cnt * itemsize);
- if (!bc->data)
- goto error;
-
bc->cnt = cnt;
bc->itemsize = itemsize;
bc->ref_blocks = 0;
@@ -59,154 +74,165 @@
bc->max_ref_blocks = 0;
return EOK;
-
-error:
-
- if (bc->data)
- free(bc->data);
-
- memset(bc, 0, sizeof(struct ext4_bcache));
-
- return ENOMEM;
}
int ext4_bcache_fini_dynamic(struct ext4_bcache *bc)
{
- if (bc->data)
- free(bc->data);
-
memset(bc, 0, sizeof(struct ext4_bcache));
-
return EOK;
}
-int ext4_bcache_alloc(struct ext4_bcache *bc, struct ext4_block *b,
- bool *is_new)
+static struct ext4_buf *
+ext4_buf_alloc(struct ext4_bcache *bc, uint64_t lba)
{
- uint32_t i;
- ext4_assert(bc && b && is_new);
+ void *data;
+ struct ext4_buf *buf;
+ data = malloc(bc->itemsize);
+ if (!data)
+ return NULL;
- /*Check if valid.*/
- ext4_assert(b->lb_id);
- if (!b->lb_id) {
- ext4_assert(b->lb_id);
+ buf = malloc(sizeof(struct ext4_buf));
+ if (!buf) {
+ free(data);
+ return NULL;
}
- uint32_t cache_id = bc->cnt;
- uint32_t alloc_id = 0;
+ buf->flags = 0;
+ buf->lba = lba;
+ buf->data = data;
+ buf->lru_id = 0;
+ buf->refctr = 0;
+ memset(&buf->lba_node, 0, sizeof(buf->lba_node));
+ memset(&buf->lru_node, 0, sizeof(buf->lru_node));
+ memset(&buf->dirty_node, 0, sizeof(buf->dirty_node));
+ return buf;
+}
- *is_new = false;
+static void ext4_buf_free(struct ext4_buf *buf)
+{
+ free(buf->data);
+ free(buf);
+}
- /*Find in free blocks (Last Recently Used).*/
- for (i = 0; i < bc->cnt; ++i) {
+static struct ext4_buf *
+ext4_buf_lookup(struct ext4_bcache *bc, uint64_t lba)
+{
+ struct ext4_buf tmp = {
+ .lba = lba
+ };
- /*Check if block is already in cache*/
- if (b->lb_id == bc->lba[i]) {
+ return RB_FIND(ext4_buf_lba, &bc->lba_root, &tmp);
+}
- if (!bc->refctr[i] && !bc->free_delay[i])
- bc->ref_blocks++;
+struct ext4_buf *ext4_buf_lowest_lru(struct ext4_bcache *bc)
+{
+ return RB_MIN(ext4_buf_lba, &bc->lba_root);
+}
- /*Update reference counter*/
- bc->refctr[i]++;
+void ext4_bcache_drop_buf(struct ext4_bcache *bc, struct ext4_buf *buf)
+{
+ /*Cannot drop any referenced buffers.*/
+ ext4_assert(!buf->refctr);
- /*Update usage marker*/
- bc->lru_id[i] = ++bc->lru_ctr;
+ RB_REMOVE(ext4_buf_lba, &bc->lba_root, buf);
+ RB_REMOVE(ext4_buf_lru, &bc->lru_root, buf);
- /*Set valid cache data and id*/
- b->data = bc->data + i * bc->itemsize;
- b->cache_id = i;
+ /*Forcibly drop dirty buffer.*/
+ if (ext4_bcache_test_flag(buf, BC_DIRTY))
+ SLIST_REMOVE(&bc->dirty_list,
+ buf,
+ ext4_buf,
+ dirty_node);
- /* If data in the caxhe is up-to-date */
- b->uptodate = ext4_bcache_test_flag(bc, i, BC_UPTODATE);
+ ext4_buf_free(buf);
+ bc->ref_blocks--;
+}
- return EOK;
- }
+int ext4_bcache_alloc(struct ext4_bcache *bc, struct ext4_block *b,
+ bool *is_new)
+{
+ struct ext4_buf *buf = ext4_buf_lookup(bc, b->lb_id);
+ if (buf) {
+ if (!buf->refctr) {
+ buf->lru_id = ++bc->lru_ctr;
+ RB_REMOVE(ext4_buf_lru, &bc->lru_root, buf);
+ if (ext4_bcache_test_flag(buf, BC_DIRTY))
+ SLIST_REMOVE(&bc->dirty_list,
+ buf,
+ ext4_buf,
+ dirty_node);
- /*Best fit calculations.*/
- if (bc->refctr[i])
- continue;
-
- if (bc->free_delay[i])
- continue;
-
- /*Block is unreferenced, but it may exist block with
- * lower usage marker*/
-
- /*First find.*/
- if (cache_id == bc->cnt) {
- cache_id = i;
- alloc_id = bc->lru_id[i];
- continue;
}
- /*Next find*/
- if (alloc_id <= bc->lru_id[i])
- continue;
-
- /*This block has lower alloc id marker*/
- cache_id = i;
- alloc_id = bc->lru_id[i];
- }
-
- if (cache_id != bc->cnt) {
- /*There was unreferenced block*/
- bc->lba[cache_id] = b->lb_id;
- bc->refctr[cache_id] = 1;
- bc->lru_id[cache_id] = ++bc->lru_ctr;
-
- /*Set valid cache data and id*/
- b->data = bc->data + cache_id * bc->itemsize;
- b->cache_id = cache_id;
-
- /* Data in the cache is not up-to-date anymore. */
- ext4_bcache_clear_flag(bc, cache_id, BC_UPTODATE);
- b->uptodate = false;
-
- /*Statistics*/
- bc->ref_blocks++;
- if (bc->ref_blocks > bc->max_ref_blocks)
- bc->max_ref_blocks = bc->ref_blocks;
-
- /*Block needs to be read.*/
- *is_new = true;
-
+ buf->refctr++;
+ b->uptodate = ext4_bcache_test_flag(buf, BC_UPTODATE);
+ b->dirty = false;
+ b->buf = buf;
+ b->data = buf->data;
+ *is_new = false;
return EOK;
}
+ buf = ext4_buf_alloc(bc, b->lb_id);
+ if (!buf)
+ return ENOMEM;
- ext4_dbg(DEBUG_BCACHE, DBG_ERROR
- "unable to alloc block cache!\n");
- return ENOMEM;
+ RB_INSERT(ext4_buf_lba, &bc->lba_root, buf);
+ bc->ref_blocks++;
+
+ buf->refctr = 1;
+ buf->lru_id = ++bc->lru_ctr;
+ b->uptodate = false;
+ b->dirty = false;
+ b->buf = buf;
+ b->data = buf->data;
+ *is_new = true;
+ return EOK;
}
int ext4_bcache_free(struct ext4_bcache *bc, struct ext4_block *b,
uint8_t free_delay)
{
+ struct ext4_buf *buf = b->buf;
+
ext4_assert(bc && b);
/*Check if valid.*/
ext4_assert(b->lb_id);
- /*Block should be in cache.*/
- ext4_assert(b->cache_id < bc->cnt);
+ /*Block should have a valid pointer to ext4_buf.*/
+ ext4_assert(buf);
/*Check if someone don't try free unreferenced block cache.*/
- ext4_assert(bc->refctr[b->cache_id]);
+ ext4_assert(buf->refctr);
/*Just decrease reference counter*/
- if (bc->refctr[b->cache_id])
- bc->refctr[b->cache_id]--;
+ buf->refctr--;
if (free_delay)
- bc->free_delay[b->cache_id] = free_delay;
+ bc->free_delay = free_delay;
- /*Update statistics*/
- if (!bc->refctr[b->cache_id] && !bc->free_delay[b->cache_id])
- bc->ref_blocks--;
+ if (b->dirty) {
+ ext4_bcache_set_flag(buf, BC_DIRTY);
+ ext4_bcache_set_flag(buf, BC_UPTODATE);
+ b->uptodate = true;
+ }
+ if (!b->uptodate)
+ ext4_bcache_clear_flag(buf, BC_UPTODATE);
+ if (!buf->refctr) {
+ RB_INSERT(ext4_buf_lru, &bc->lru_root, buf);
+ if (ext4_bcache_test_flag(buf, BC_DIRTY))
+ SLIST_INSERT_HEAD(&bc->dirty_list, buf, dirty_node);
+
+ if (!ext4_bcache_test_flag(buf, BC_UPTODATE))
+ ext4_bcache_drop_buf(bc, buf);
+
+ }
+
b->lb_id = 0;
b->data = 0;
- b->cache_id = 0;
b->uptodate = false;
+ b->dirty = false;
return EOK;
}
@@ -213,7 +239,7 @@
bool ext4_bcache_is_full(struct ext4_bcache *bc)
{
- return (bc->cnt == bc->ref_blocks);
+ return (bc->cnt <= bc->ref_blocks);
}
/**
--- a/lwext4/ext4_bcache.h
+++ b/lwext4/ext4_bcache.h
@@ -45,9 +45,11 @@
#include <stdint.h>
#include <stdbool.h>
+#include "tree.h"
+#include "queue.h"
#define EXT4_BLOCK_ZERO() \
- {.uptodate = 0, .dirty = 0, .lb_id = 0, .cache_id = 0, .data = 0}
+ {.uptodate = 0, .dirty = 0, .lb_id = 0, .data = 0}
/**@brief Single block descriptor*/
struct ext4_block {
@@ -60,13 +62,43 @@
/**@brief Logical block ID*/
uint64_t lb_id;
- /**@brief Cache id*/
- uint32_t cache_id;
+ /**@brief Buffer */
+ struct ext4_buf *buf;
/**@brief Data buffer.*/
uint8_t *data;
};
+/**@brief Single block descriptor*/
+struct ext4_buf {
+ /**@brief Flags*/
+ int flags;
+
+ /**@brief Logical block address*/
+ uint64_t lba;
+
+ /**@brief Data buffer.*/
+ uint8_t *data;
+
+ /**@brief LRU priority. (unused) */
+ uint32_t lru_prio;
+
+ /**@brief LRU id.*/
+ uint32_t lru_id;
+
+ /**@brief Reference count table*/
+ uint32_t refctr;
+
+ /**@brief LBA tree node*/
+ RB_ENTRY(ext4_buf) lba_node;
+
+ /**@brief LRU tree node*/
+ RB_ENTRY(ext4_buf) lru_node;
+
+ /**@brief Dirty list node*/
+ SLIST_ENTRY(ext4_buf) dirty_node;
+};
+
/**@brief Block cache descriptor*/
struct ext4_bcache {
@@ -79,29 +111,23 @@
/**@brief Last recently used counter*/
uint32_t lru_ctr;
- /**@brief Reference count table*/
- uint32_t refctr[CONFIG_BLOCK_DEV_CACHE_SIZE];
+ /**@brief Writeback free delay mode*/
+ uint8_t free_delay;
- /**@brief Last recently used ID table*/
- uint32_t lru_id[CONFIG_BLOCK_DEV_CACHE_SIZE];
-
- /**@brief Writeback free delay mode table*/
- uint8_t free_delay[CONFIG_BLOCK_DEV_CACHE_SIZE];
-
- /**@brief Logical block table*/
- uint64_t lba[CONFIG_BLOCK_DEV_CACHE_SIZE];
-
- /**@brief Flags*/
- int flags[CONFIG_BLOCK_DEV_CACHE_SIZE];
-
- /**@brief Cache data buffers*/
- uint8_t *data;
-
/**@brief Currently referenced datablocks*/
uint32_t ref_blocks;
/**@brief Maximum referenced datablocks*/
uint32_t max_ref_blocks;
+
+ /**@brief A tree holding all bufs*/
+ RB_HEAD(ext4_buf_lba, ext4_buf) lba_root;
+
+ /**@brief A tree holding unreferenced bufs*/
+ RB_HEAD(ext4_buf_lru, ext4_buf) lru_root;
+
+ /**@brief A singly-linked list holding dirty buffers*/
+ SLIST_HEAD(ext4_buf_dirty, ext4_buf) dirty_list;
};
enum bcache_state_bits {
@@ -109,23 +135,21 @@
BC_DIRTY
};
-#define ext4_bcache_set_flag(bc, id, b) \
- (bc)->flags[id] |= 1 << (b)
+#define ext4_bcache_set_flag(buf, b) \
+ (buf)->flags |= 1 << (b)
-#define ext4_bcache_clear_flag(bc, id, b) \
- (bc)->flags[id] &= ~(1 << (b))
+#define ext4_bcache_clear_flag(buf, b) \
+ (buf)->flags &= ~(1 << (b))
-#define ext4_bcache_test_flag(bc, id, b) \
- (((bc)->flags[id] & (1 << (b))) >> (b))
+#define ext4_bcache_test_flag(buf, b) \
+ (((buf)->flags & (1 << (b))) >> (b))
/**@brief Static initializer of block cache structure.*/
#define EXT4_BCACHE_STATIC_INSTANCE(__name, __cnt, __itemsize) \
- static uint8_t __name##_data[(__cnt) * (__itemsize)]; \
static struct ext4_bcache __name = { \
.cnt = __cnt, \
.itemsize = __itemsize, \
.lru_ctr = 0, \
- .data = __name##_data, \
}
/**@brief Dynamic initialization of block cache.
@@ -140,6 +164,16 @@
* @param bc block cache descriptor
* @return standard error code*/
int ext4_bcache_fini_dynamic(struct ext4_bcache *bc);
+
+/**@brief Get a buffer with the lowest LRU counter in bcache.
+ * @param bc block cache descriptor
+ * @return buffer with the lowest LRU counter*/
+struct ext4_buf *ext4_buf_lowest_lru(struct ext4_bcache *bc);
+
+/**@brief Drop unreferenced buffer from bcache.
+ * @param bc block cache descriptor
+ * @param buf buffer*/
+void ext4_bcache_drop_buf(struct ext4_bcache *bc, struct ext4_buf *buf);
/**@brief Allocate block from block cache memory.
* Unreferenced block allocation is based on LRU
--- a/lwext4/ext4_blockdev.c
+++ b/lwext4/ext4_blockdev.c
@@ -85,10 +85,50 @@
return bdev->close(bdev);
}
+static int
+ext4_block_flush_buf(struct ext4_blockdev *bdev, struct ext4_buf *buf)
+{
+ int r;
+ struct ext4_bcache *bc = bdev->bc;
+ /*Only flushing unreferenced buffer is allowed.*/
+ ext4_assert(!buf->refctr);
+ if (ext4_bcache_test_flag(buf, BC_DIRTY)) {
+ r = ext4_blocks_set_direct(bdev, buf->data, buf->lba, 1);
+ if (r)
+ return r;
+
+ SLIST_REMOVE(&bc->dirty_list,
+ buf,
+ ext4_buf,
+ dirty_node);
+ ext4_bcache_clear_flag(buf, BC_DIRTY);
+ }
+ return EOK;
+}
+
+int ext4_block_cache_shake(struct ext4_blockdev *bdev)
+{
+ struct ext4_buf *buf;
+ while (!RB_EMPTY(&bdev->bc->lru_root) &&
+ ext4_bcache_is_full(bdev->bc)) {
+
+ buf = ext4_buf_lowest_lru(bdev->bc);
+ ext4_assert(buf);
+ if (ext4_bcache_test_flag(buf, BC_DIRTY)) {
+ int r = ext4_block_flush_buf(bdev, buf);
+ if (r != EOK)
+ return r;
+
+ }
+
+ ext4_bcache_drop_buf(bdev->bc, buf);
+ }
+ return EOK;
+}
+
int ext4_block_get_noread(struct ext4_blockdev *bdev, struct ext4_block *b,
uint64_t lba)
{
- uint32_t i;
bool is_new;
int r;
@@ -103,46 +143,11 @@
b->dirty = 0;
b->lb_id = lba;
- /*If cache is full we have to flush it anyway :(*/
- if (ext4_bcache_is_full(bdev->bc) && bdev->cache_write_back) {
+ /*If cache is full we have to (flush and) drop it anyway :(*/
+ r = ext4_block_cache_shake(bdev);
+ if (r != EOK)
+ return r;
- uint32_t free_candidate = bdev->bc->cnt;
- uint32_t min_lru = 0xFFFFFFFF;
-
- for (i = 0; i < bdev->bc->cnt; ++i) {
- /*Check if buffer free was delayed.*/
- if (!bdev->bc->free_delay[i])
- continue;
-
- /*Check reference counter.*/
- if (bdev->bc->refctr[i])
- continue;
-
- if (bdev->bc->lru_id[i] < min_lru) {
- min_lru = bdev->bc->lru_id[i];
- free_candidate = i;
- continue;
- }
- }
-
- if (free_candidate < bdev->bc->cnt) {
- /*Buffer free was delayed and have no reference. Flush
- * it.*/
- r = ext4_blocks_set_direct(
- bdev, bdev->bc->data +
- bdev->bc->itemsize * free_candidate,
- bdev->bc->lba[free_candidate], 1);
- if (r != EOK)
- return r;
-
- /*No delayed anymore*/
- bdev->bc->free_delay[free_candidate] = 0;
-
- /*Reduce reference counter*/
- bdev->bc->ref_blocks--;
- }
- }
-
r = ext4_bcache_alloc(bdev->bc, b, &is_new);
if (r != EOK)
return r;
@@ -181,7 +186,7 @@
/* Mark buffer up-to-date, since
* fresh data is read from physical device just now. */
- ext4_bcache_set_flag(bdev->bc, b->cache_id, BC_UPTODATE);
+ ext4_bcache_set_flag(b->buf, BC_UPTODATE);
b->uptodate = true;
bdev->bread_ctr++;
return EOK;
@@ -194,23 +199,11 @@
int r;
ext4_assert(bdev && b);
+ ext4_assert(b->buf);
if (!(bdev->flags & EXT4_BDEV_INITIALIZED))
return EIO;
- /*Buffer is not marked dirty and is stale*/
- if (!b->uptodate && !b->dirty)
- ext4_bcache_clear_flag(bdev->bc, b->cache_id, BC_UPTODATE);
-
- /*No need to write.*/
- if (!b->dirty &&
- !ext4_bcache_test_flag(bdev->bc, b->cache_id, BC_DIRTY)) {
- ext4_bcache_free(bdev->bc, b, 0);
- return EOK;
- }
- /* Data is valid, so mark buffer up-to-date. */
- ext4_bcache_set_flag(bdev->bc, b->cache_id, BC_UPTODATE);
-
/*Free cache delay mode*/
if (bdev->cache_write_back) {
@@ -218,25 +211,28 @@
return ext4_bcache_free(bdev->bc, b, bdev->cache_write_back);
}
- if (bdev->bc->refctr[b->cache_id] > 1) {
- ext4_bcache_set_flag(bdev->bc, b->cache_id, BC_DIRTY);
+ if (b->buf->refctr > 1)
return ext4_bcache_free(bdev->bc, b, 0);
- }
- pba = (b->lb_id * bdev->lg_bsize) / bdev->ph_bsize;
- pb_cnt = bdev->lg_bsize / bdev->ph_bsize;
+ /*We handle the dirty flag ourselves.*/
+ if (ext4_bcache_test_flag(b->buf, BC_DIRTY) || b->dirty) {
+ b->uptodate = true;
+ ext4_bcache_set_flag(b->buf, BC_UPTODATE);
- r = bdev->bwrite(bdev, b->data, pba, pb_cnt);
- ext4_bcache_clear_flag(bdev->bc, b->cache_id, BC_DIRTY);
- if (r != EOK) {
+ pba = (b->lb_id * bdev->lg_bsize) / bdev->ph_bsize;
+ pb_cnt = bdev->lg_bsize / bdev->ph_bsize;
+
+ r = bdev->bwrite(bdev, b->data, pba, pb_cnt);
+ ext4_bcache_clear_flag(b->buf, BC_DIRTY);
+ if (r != EOK) {
+ b->dirty = true;
+ ext4_bcache_free(bdev->bc, b, 0);
+ return r;
+ }
+
b->dirty = false;
- ext4_bcache_clear_flag(bdev->bc, b->cache_id, BC_UPTODATE);
- ext4_bcache_free(bdev->bc, b, 0);
- return r;
+ bdev->bwrite_ctr++;
}
-
- bdev->bwrite_ctr++;
- b->dirty = false;
ext4_bcache_free(bdev->bc, b, 0);
return EOK;
}
@@ -412,7 +408,7 @@
int ext4_block_cache_write_back(struct ext4_blockdev *bdev, uint8_t on_off)
{
int r;
- uint32_t i;
+ struct ext4_buf *buf;
if (on_off)
bdev->cache_write_back++;
@@ -420,35 +416,19 @@
if (!on_off && bdev->cache_write_back)
bdev->cache_write_back--;
-
if (bdev->cache_write_back)
return EOK;
/*Flush all delayed cache blocks*/
- for (i = 0; i < bdev->bc->cnt; ++i) {
-
- /*Check if buffer free was delayed.*/
- if (!bdev->bc->free_delay[i])
- continue;
-
- /*Check reference counter.*/
- if (bdev->bc->refctr[i])
- continue;
-
- /*Buffer free was delayed and have no reference. Flush
- * it.*/
- r = ext4_blocks_set_direct(bdev, bdev->bc->data +
- bdev->bc->itemsize * i, bdev->bc->lba[i], 1);
+ while (!SLIST_EMPTY(&bdev->bc->dirty_list)) {
+
+ buf = SLIST_FIRST(&bdev->bc->dirty_list);
+ ext4_assert(buf);
+ r = ext4_block_flush_buf(bdev, buf);
if (r != EOK)
return r;
- /*No delayed anymore*/
- bdev->bc->free_delay[i] = 0;
-
- /*Reduce reference counter*/
- bdev->bc->ref_blocks--;
}
-
return EOK;
}
--- /dev/null
+++ b/lwext4/queue.h
@@ -1,0 +1,702 @@
+/*-
+ * Copyright (c) 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)queue.h 8.5 (Berkeley) 8/20/94
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_QUEUE_H_
+#define _SYS_QUEUE_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "ext4_config.h"
+
+/*
+ * This file defines four types of data structures: singly-linked lists,
+ * singly-linked tail queues, lists and tail queues.
+ *
+ * A singly-linked list is headed by a single forward pointer. The elements
+ * are singly linked for minimum space and pointer manipulation overhead at
+ * the expense of O(n) removal for arbitrary elements. New elements can be
+ * added to the list after an existing element or at the head of the list.
+ * Elements being removed from the head of the list should use the explicit
+ * macro for this purpose for optimum efficiency. A singly-linked list may
+ * only be traversed in the forward direction. Singly-linked lists are ideal
+ * for applications with large datasets and few or no removals or for
+ * implementing a LIFO queue.
+ *
+ * A singly-linked tail queue is headed by a pair of pointers, one to the
+ * head of the list and the other to the tail of the list. The elements are
+ * singly linked for minimum space and pointer manipulation overhead at the
+ * expense of O(n) removal for arbitrary elements. New elements can be added
+ * to the list after an existing element, at the head of the list, or at the
+ * end of the list. Elements being removed from the head of the tail queue
+ * should use the explicit macro for this purpose for optimum efficiency.
+ * A singly-linked tail queue may only be traversed in the forward direction.
+ * Singly-linked tail queues are ideal for applications with large datasets
+ * and few or no removals or for implementing a FIFO queue.
+ *
+ * A list is headed by a single forward pointer (or an array of forward
+ * pointers for a hash table header). The elements are doubly linked
+ * so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before
+ * or after an existing element or at the head of the list. A list
+ * may be traversed in either direction.
+ *
+ * A tail queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or
+ * after an existing element, at the head of the list, or at the end of
+ * the list. A tail queue may be traversed in either direction.
+ *
+ * For details on the use of these macros, see the queue(3) manual page.
+ *
+ *
+ * SLIST LIST STAILQ TAILQ
+ * _HEAD + + + +
+ * _HEAD_INITIALIZER + + + +
+ * _ENTRY + + + +
+ * _INIT + + + +
+ * _EMPTY + + + +
+ * _FIRST + + + +
+ * _NEXT + + + +
+ * _PREV - + - +
+ * _LAST - - + +
+ * _FOREACH + + + +
+ * _FOREACH_FROM + + + +
+ * _FOREACH_SAFE + + + +
+ * _FOREACH_FROM_SAFE + + + +
+ * _FOREACH_REVERSE - - - +
+ * _FOREACH_REVERSE_FROM - - - +
+ * _FOREACH_REVERSE_SAFE - - - +
+ * _FOREACH_REVERSE_FROM_SAFE - - - +
+ * _INSERT_HEAD + + + +
+ * _INSERT_BEFORE - + - +
+ * _INSERT_AFTER + + + +
+ * _INSERT_TAIL - - + +
+ * _CONCAT - - + +
+ * _REMOVE_AFTER + - + -
+ * _REMOVE_HEAD + - + -
+ * _REMOVE + + + +
+ * _SWAP + + + +
+ *
+ */
+#ifdef QUEUE_MACRO_DEBUG
+/* Store the last 2 places the queue element or head was altered */
+struct qm_trace {
+ unsigned long lastline;
+ unsigned long prevline;
+ const char *lastfile;
+ const char *prevfile;
+};
+
+#define TRACEBUF struct qm_trace trace;
+#define TRACEBUF_INITIALIZER { __LINE__, 0, __FILE__, NULL } ,
+#define TRASHIT(x) do {(x) = (void *)-1;} while (0)
+#define QMD_SAVELINK(name, link) void **name = (void *)&(link)
+
+#define QMD_TRACE_HEAD(head) do { \
+ (head)->trace.prevline = (head)->trace.lastline; \
+ (head)->trace.prevfile = (head)->trace.lastfile; \
+ (head)->trace.lastline = __LINE__; \
+ (head)->trace.lastfile = __FILE__; \
+} while (0)
+
+#define QMD_TRACE_ELEM(elem) do { \
+ (elem)->trace.prevline = (elem)->trace.lastline; \
+ (elem)->trace.prevfile = (elem)->trace.lastfile; \
+ (elem)->trace.lastline = __LINE__; \
+ (elem)->trace.lastfile = __FILE__; \
+} while (0)
+
+#else
+#define QMD_TRACE_ELEM(elem)
+#define QMD_TRACE_HEAD(head)
+#define QMD_SAVELINK(name, link)
+#define TRACEBUF
+#define TRACEBUF_INITIALIZER
+#define TRASHIT(x)
+#endif /* QUEUE_MACRO_DEBUG */
+
+/*
+ * Singly-linked List declarations.
+ */
+#define SLIST_HEAD(name, type) \
+struct name { \
+ struct type *slh_first; /* first element */ \
+}
+
+#define SLIST_HEAD_INITIALIZER(head) \
+ { NULL }
+
+#define SLIST_ENTRY(type) \
+struct { \
+ struct type *sle_next; /* next element */ \
+}
+
+/*
+ * Singly-linked List functions.
+ */
+#define SLIST_EMPTY(head) ((head)->slh_first == NULL)
+
+#define SLIST_FIRST(head) ((head)->slh_first)
+
+#define SLIST_FOREACH(var, head, field) \
+ for ((var) = SLIST_FIRST((head)); \
+ (var); \
+ (var) = SLIST_NEXT((var), field))
+
+#define SLIST_FOREACH_FROM(var, head, field) \
+ for ((var) = ((var) ? (var) : SLIST_FIRST((head))); \
+ (var); \
+ (var) = SLIST_NEXT((var), field))
+
+#define SLIST_FOREACH_SAFE(var, head, field, tvar) \
+ for ((var) = SLIST_FIRST((head)); \
+ (var) && ((tvar) = SLIST_NEXT((var), field), 1); \
+ (var) = (tvar))
+
+#define SLIST_FOREACH_FROM_SAFE(var, head, field, tvar) \
+ for ((var) = ((var) ? (var) : SLIST_FIRST((head))); \
+ (var) && ((tvar) = SLIST_NEXT((var), field), 1); \
+ (var) = (tvar))
+
+#define SLIST_FOREACH_PREVPTR(var, varp, head, field) \
+ for ((varp) = &SLIST_FIRST((head)); \
+ ((var) = *(varp)) != NULL; \
+ (varp) = &SLIST_NEXT((var), field))
+
+#define SLIST_INIT(head) do { \
+ SLIST_FIRST((head)) = NULL; \
+} while (0)
+
+#define SLIST_INSERT_AFTER(slistelm, elm, field) do { \
+ SLIST_NEXT((elm), field) = SLIST_NEXT((slistelm), field); \
+ SLIST_NEXT((slistelm), field) = (elm); \
+} while (0)
+
+#define SLIST_INSERT_HEAD(head, elm, field) do { \
+ SLIST_NEXT((elm), field) = SLIST_FIRST((head)); \
+ SLIST_FIRST((head)) = (elm); \
+} while (0)
+
+#define SLIST_NEXT(elm, field) ((elm)->field.sle_next)
+
+#define SLIST_REMOVE(head, elm, type, field) do { \
+ QMD_SAVELINK(oldnext, (elm)->field.sle_next); \
+ if (SLIST_FIRST((head)) == (elm)) { \
+ SLIST_REMOVE_HEAD((head), field); \
+ } \
+ else { \
+ struct type *curelm = SLIST_FIRST((head)); \
+ while (SLIST_NEXT(curelm, field) != (elm)) \
+ curelm = SLIST_NEXT(curelm, field); \
+ SLIST_REMOVE_AFTER(curelm, field); \
+ } \
+ TRASHIT(*oldnext); \
+} while (0)
+
+#define SLIST_REMOVE_AFTER(elm, field) do { \
+ SLIST_NEXT(elm, field) = \
+ SLIST_NEXT(SLIST_NEXT(elm, field), field); \
+} while (0)
+
+#define SLIST_REMOVE_HEAD(head, field) do { \
+ SLIST_FIRST((head)) = SLIST_NEXT(SLIST_FIRST((head)), field); \
+} while (0)
+
+#define SLIST_SWAP(head1, head2, type) do { \
+ struct type *swap_first = SLIST_FIRST(head1); \
+ SLIST_FIRST(head1) = SLIST_FIRST(head2); \
+ SLIST_FIRST(head2) = swap_first; \
+} while (0)
+
+/*
+ * Singly-linked Tail queue declarations.
+ */
+#define STAILQ_HEAD(name, type) \
+struct name { \
+ struct type *stqh_first;/* first element */ \
+ struct type **stqh_last;/* addr of last next element */ \
+}
+
+#define STAILQ_HEAD_INITIALIZER(head) \
+ { NULL, &(head).stqh_first }
+
+#define STAILQ_ENTRY(type) \
+struct { \
+ struct type *stqe_next; /* next element */ \
+}
+
+/*
+ * Singly-linked Tail queue functions.
+ */
+#define STAILQ_CONCAT(head1, head2) do { \
+ if (!STAILQ_EMPTY((head2))) { \
+ *(head1)->stqh_last = (head2)->stqh_first; \
+ (head1)->stqh_last = (head2)->stqh_last; \
+ STAILQ_INIT((head2)); \
+ } \
+} while (0)
+
+#define STAILQ_EMPTY(head) ((head)->stqh_first == NULL)
+
+#define STAILQ_FIRST(head) ((head)->stqh_first)
+
+#define STAILQ_FOREACH(var, head, field) \
+ for((var) = STAILQ_FIRST((head)); \
+ (var); \
+ (var) = STAILQ_NEXT((var), field))
+
+#define STAILQ_FOREACH_FROM(var, head, field) \
+ for ((var) = ((var) ? (var) : STAILQ_FIRST((head))); \
+ (var); \
+ (var) = STAILQ_NEXT((var), field))
+
+#define STAILQ_FOREACH_SAFE(var, head, field, tvar) \
+ for ((var) = STAILQ_FIRST((head)); \
+ (var) && ((tvar) = STAILQ_NEXT((var), field), 1); \
+ (var) = (tvar))
+
+#define STAILQ_FOREACH_FROM_SAFE(var, head, field, tvar) \
+ for ((var) = ((var) ? (var) : STAILQ_FIRST((head))); \
+ (var) && ((tvar) = STAILQ_NEXT((var), field), 1); \
+ (var) = (tvar))
+
+#define STAILQ_INIT(head) do { \
+ STAILQ_FIRST((head)) = NULL; \
+ (head)->stqh_last = &STAILQ_FIRST((head)); \
+} while (0)
+
+#define STAILQ_INSERT_AFTER(head, tqelm, elm, field) do { \
+ if ((STAILQ_NEXT((elm), field) = STAILQ_NEXT((tqelm), field)) == NULL)\
+ (head)->stqh_last = &STAILQ_NEXT((elm), field); \
+ STAILQ_NEXT((tqelm), field) = (elm); \
+} while (0)
+
+#define STAILQ_INSERT_HEAD(head, elm, field) do { \
+ if ((STAILQ_NEXT((elm), field) = STAILQ_FIRST((head))) == NULL) \
+ (head)->stqh_last = &STAILQ_NEXT((elm), field); \
+ STAILQ_FIRST((head)) = (elm); \
+} while (0)
+
+#define STAILQ_INSERT_TAIL(head, elm, field) do { \
+ STAILQ_NEXT((elm), field) = NULL; \
+ *(head)->stqh_last = (elm); \
+ (head)->stqh_last = &STAILQ_NEXT((elm), field); \
+} while (0)
+
+#define STAILQ_LAST(head, type, field) \
+ (STAILQ_EMPTY((head)) ? NULL : \
+ __containerof((head)->stqh_last, struct type, field.stqe_next))
+
+#define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next)
+
+#define STAILQ_REMOVE(head, elm, type, field) do { \
+ QMD_SAVELINK(oldnext, (elm)->field.stqe_next); \
+ if (STAILQ_FIRST((head)) == (elm)) { \
+ STAILQ_REMOVE_HEAD((head), field); \
+ } \
+ else { \
+ struct type *curelm = STAILQ_FIRST((head)); \
+ while (STAILQ_NEXT(curelm, field) != (elm)) \
+ curelm = STAILQ_NEXT(curelm, field); \
+ STAILQ_REMOVE_AFTER(head, curelm, field); \
+ } \
+ TRASHIT(*oldnext); \
+} while (0)
+
+#define STAILQ_REMOVE_AFTER(head, elm, field) do { \
+ if ((STAILQ_NEXT(elm, field) = \
+ STAILQ_NEXT(STAILQ_NEXT(elm, field), field)) == NULL) \
+ (head)->stqh_last = &STAILQ_NEXT((elm), field); \
+} while (0)
+
+#define STAILQ_REMOVE_HEAD(head, field) do { \
+ if ((STAILQ_FIRST((head)) = \
+ STAILQ_NEXT(STAILQ_FIRST((head)), field)) == NULL) \
+ (head)->stqh_last = &STAILQ_FIRST((head)); \
+} while (0)
+
+#define STAILQ_SWAP(head1, head2, type) do { \
+ struct type *swap_first = STAILQ_FIRST(head1); \
+ struct type **swap_last = (head1)->stqh_last; \
+ STAILQ_FIRST(head1) = STAILQ_FIRST(head2); \
+ (head1)->stqh_last = (head2)->stqh_last; \
+ STAILQ_FIRST(head2) = swap_first; \
+ (head2)->stqh_last = swap_last; \
+ if (STAILQ_EMPTY(head1)) \
+ (head1)->stqh_last = &STAILQ_FIRST(head1); \
+ if (STAILQ_EMPTY(head2)) \
+ (head2)->stqh_last = &STAILQ_FIRST(head2); \
+} while (0)
+
+
+/*
+ * List declarations.
+ */
+#define LIST_HEAD(name, type) \
+struct name { \
+ struct type *lh_first; /* first element */ \
+}
+
+#define LIST_HEAD_INITIALIZER(head) \
+ { NULL }
+
+#define LIST_ENTRY(type) \
+struct { \
+ struct type *le_next; /* next element */ \
+ struct type **le_prev; /* address of previous next element */ \
+}
+
+/*
+ * List functions.
+ */
+
+#if (defined(_KERNEL) && defined(INVARIANTS))
+#define QMD_LIST_CHECK_HEAD(head, field) do { \
+ if (LIST_FIRST((head)) != NULL && \
+ LIST_FIRST((head))->field.le_prev != \
+ &LIST_FIRST((head))) \
+ panic("Bad list head %p first->prev != head", (head)); \
+} while (0)
+
+#define QMD_LIST_CHECK_NEXT(elm, field) do { \
+ if (LIST_NEXT((elm), field) != NULL && \
+ LIST_NEXT((elm), field)->field.le_prev != \
+ &((elm)->field.le_next)) \
+ panic("Bad link elm %p next->prev != elm", (elm)); \
+} while (0)
+
+#define QMD_LIST_CHECK_PREV(elm, field) do { \
+ if (*(elm)->field.le_prev != (elm)) \
+ panic("Bad link elm %p prev->next != elm", (elm)); \
+} while (0)
+#else
+#define QMD_LIST_CHECK_HEAD(head, field)
+#define QMD_LIST_CHECK_NEXT(elm, field)
+#define QMD_LIST_CHECK_PREV(elm, field)
+#endif /* (_KERNEL && INVARIANTS) */
+
+#define LIST_EMPTY(head) ((head)->lh_first == NULL)
+
+#define LIST_FIRST(head) ((head)->lh_first)
+
+#define LIST_FOREACH(var, head, field) \
+ for ((var) = LIST_FIRST((head)); \
+ (var); \
+ (var) = LIST_NEXT((var), field))
+
+#define LIST_FOREACH_FROM(var, head, field) \
+ for ((var) = ((var) ? (var) : LIST_FIRST((head))); \
+ (var); \
+ (var) = LIST_NEXT((var), field))
+
+#define LIST_FOREACH_SAFE(var, head, field, tvar) \
+ for ((var) = LIST_FIRST((head)); \
+ (var) && ((tvar) = LIST_NEXT((var), field), 1); \
+ (var) = (tvar))
+
+#define LIST_FOREACH_FROM_SAFE(var, head, field, tvar) \
+ for ((var) = ((var) ? (var) : LIST_FIRST((head))); \
+ (var) && ((tvar) = LIST_NEXT((var), field), 1); \
+ (var) = (tvar))
+
+#define LIST_INIT(head) do { \
+ LIST_FIRST((head)) = NULL; \
+} while (0)
+
+#define LIST_INSERT_AFTER(listelm, elm, field) do { \
+ QMD_LIST_CHECK_NEXT(listelm, field); \
+ if ((LIST_NEXT((elm), field) = LIST_NEXT((listelm), field)) != NULL)\
+ LIST_NEXT((listelm), field)->field.le_prev = \
+ &LIST_NEXT((elm), field); \
+ LIST_NEXT((listelm), field) = (elm); \
+ (elm)->field.le_prev = &LIST_NEXT((listelm), field); \
+} while (0)
+
+#define LIST_INSERT_BEFORE(listelm, elm, field) do { \
+ QMD_LIST_CHECK_PREV(listelm, field); \
+ (elm)->field.le_prev = (listelm)->field.le_prev; \
+ LIST_NEXT((elm), field) = (listelm); \
+ *(listelm)->field.le_prev = (elm); \
+ (listelm)->field.le_prev = &LIST_NEXT((elm), field); \
+} while (0)
+
+#define LIST_INSERT_HEAD(head, elm, field) do { \
+ QMD_LIST_CHECK_HEAD((head), field); \
+ if ((LIST_NEXT((elm), field) = LIST_FIRST((head))) != NULL) \
+ LIST_FIRST((head))->field.le_prev = &LIST_NEXT((elm), field);\
+ LIST_FIRST((head)) = (elm); \
+ (elm)->field.le_prev = &LIST_FIRST((head)); \
+} while (0)
+
+#define LIST_NEXT(elm, field) ((elm)->field.le_next)
+
+#define LIST_PREV(elm, head, type, field) \
+ ((elm)->field.le_prev == &LIST_FIRST((head)) ? NULL : \
+ __containerof((elm)->field.le_prev, struct type, field.le_next))
+
+#define LIST_REMOVE(elm, field) do { \
+ QMD_SAVELINK(oldnext, (elm)->field.le_next); \
+ QMD_SAVELINK(oldprev, (elm)->field.le_prev); \
+ QMD_LIST_CHECK_NEXT(elm, field); \
+ QMD_LIST_CHECK_PREV(elm, field); \
+ if (LIST_NEXT((elm), field) != NULL) \
+ LIST_NEXT((elm), field)->field.le_prev = \
+ (elm)->field.le_prev; \
+ *(elm)->field.le_prev = LIST_NEXT((elm), field); \
+ TRASHIT(*oldnext); \
+ TRASHIT(*oldprev); \
+} while (0)
+
+#define LIST_SWAP(head1, head2, type, field) do { \
+ struct type *swap_tmp = LIST_FIRST((head1)); \
+ LIST_FIRST((head1)) = LIST_FIRST((head2)); \
+ LIST_FIRST((head2)) = swap_tmp; \
+ if ((swap_tmp = LIST_FIRST((head1))) != NULL) \
+ swap_tmp->field.le_prev = &LIST_FIRST((head1)); \
+ if ((swap_tmp = LIST_FIRST((head2))) != NULL) \
+ swap_tmp->field.le_prev = &LIST_FIRST((head2)); \
+} while (0)
+
+/*
+ * Tail queue declarations.
+ */
+#define TAILQ_HEAD(name, type) \
+struct name { \
+ struct type *tqh_first; /* first element */ \
+ struct type **tqh_last; /* addr of last next element */ \
+ TRACEBUF \
+}
+
+#define TAILQ_HEAD_INITIALIZER(head) \
+ { NULL, &(head).tqh_first, TRACEBUF_INITIALIZER }
+
+#define TAILQ_ENTRY(type) \
+struct { \
+ struct type *tqe_next; /* next element */ \
+ struct type **tqe_prev; /* address of previous next element */ \
+ TRACEBUF \
+}
+
+/*
+ * Tail queue functions.
+ */
+#if (defined(_KERNEL) && defined(INVARIANTS))
+#define QMD_TAILQ_CHECK_HEAD(head, field) do { \
+ if (!TAILQ_EMPTY(head) && \
+ TAILQ_FIRST((head))->field.tqe_prev != \
+ &TAILQ_FIRST((head))) \
+ panic("Bad tailq head %p first->prev != head", (head)); \
+} while (0)
+
+#define QMD_TAILQ_CHECK_TAIL(head, field) do { \
+ if (*(head)->tqh_last != NULL) \
+ panic("Bad tailq NEXT(%p->tqh_last) != NULL", (head)); \
+} while (0)
+
+#define QMD_TAILQ_CHECK_NEXT(elm, field) do { \
+ if (TAILQ_NEXT((elm), field) != NULL && \
+ TAILQ_NEXT((elm), field)->field.tqe_prev != \
+ &((elm)->field.tqe_next)) \
+ panic("Bad link elm %p next->prev != elm", (elm)); \
+} while (0)
+
+#define QMD_TAILQ_CHECK_PREV(elm, field) do { \
+ if (*(elm)->field.tqe_prev != (elm)) \
+ panic("Bad link elm %p prev->next != elm", (elm)); \
+} while (0)
+#else
+#define QMD_TAILQ_CHECK_HEAD(head, field)
+#define QMD_TAILQ_CHECK_TAIL(head, headname)
+#define QMD_TAILQ_CHECK_NEXT(elm, field)
+#define QMD_TAILQ_CHECK_PREV(elm, field)
+#endif /* (_KERNEL && INVARIANTS) */
+
+#define TAILQ_CONCAT(head1, head2, field) do { \
+ if (!TAILQ_EMPTY(head2)) { \
+ *(head1)->tqh_last = (head2)->tqh_first; \
+ (head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \
+ (head1)->tqh_last = (head2)->tqh_last; \
+ TAILQ_INIT((head2)); \
+ QMD_TRACE_HEAD(head1); \
+ QMD_TRACE_HEAD(head2); \
+ } \
+} while (0)
+
+#define TAILQ_EMPTY(head) ((head)->tqh_first == NULL)
+
+#define TAILQ_FIRST(head) ((head)->tqh_first)
+
+#define TAILQ_FOREACH(var, head, field) \
+ for ((var) = TAILQ_FIRST((head)); \
+ (var); \
+ (var) = TAILQ_NEXT((var), field))
+
+#define TAILQ_FOREACH_FROM(var, head, field) \
+ for ((var) = ((var) ? (var) : TAILQ_FIRST((head))); \
+ (var); \
+ (var) = TAILQ_NEXT((var), field))
+
+#define TAILQ_FOREACH_SAFE(var, head, field, tvar) \
+ for ((var) = TAILQ_FIRST((head)); \
+ (var) && ((tvar) = TAILQ_NEXT((var), field), 1); \
+ (var) = (tvar))
+
+#define TAILQ_FOREACH_FROM_SAFE(var, head, field, tvar) \
+ for ((var) = ((var) ? (var) : TAILQ_FIRST((head))); \
+ (var) && ((tvar) = TAILQ_NEXT((var), field), 1); \
+ (var) = (tvar))
+
+#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \
+ for ((var) = TAILQ_LAST((head), headname); \
+ (var); \
+ (var) = TAILQ_PREV((var), headname, field))
+
+#define TAILQ_FOREACH_REVERSE_FROM(var, head, headname, field) \
+ for ((var) = ((var) ? (var) : TAILQ_LAST((head), headname)); \
+ (var); \
+ (var) = TAILQ_PREV((var), headname, field))
+
+#define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, tvar) \
+ for ((var) = TAILQ_LAST((head), headname); \
+ (var) && ((tvar) = TAILQ_PREV((var), headname, field), 1); \
+ (var) = (tvar))
+
+#define TAILQ_FOREACH_REVERSE_FROM_SAFE(var, head, headname, field, tvar) \
+ for ((var) = ((var) ? (var) : TAILQ_LAST((head), headname)); \
+ (var) && ((tvar) = TAILQ_PREV((var), headname, field), 1); \
+ (var) = (tvar))
+
+#define TAILQ_INIT(head) do { \
+ TAILQ_FIRST((head)) = NULL; \
+ (head)->tqh_last = &TAILQ_FIRST((head)); \
+ QMD_TRACE_HEAD(head); \
+} while (0)
+
+#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
+ QMD_TAILQ_CHECK_NEXT(listelm, field); \
+ if ((TAILQ_NEXT((elm), field) = TAILQ_NEXT((listelm), field)) != NULL)\
+ TAILQ_NEXT((elm), field)->field.tqe_prev = \
+ &TAILQ_NEXT((elm), field); \
+ else { \
+ (head)->tqh_last = &TAILQ_NEXT((elm), field); \
+ QMD_TRACE_HEAD(head); \
+ } \
+ TAILQ_NEXT((listelm), field) = (elm); \
+ (elm)->field.tqe_prev = &TAILQ_NEXT((listelm), field); \
+ QMD_TRACE_ELEM(&(elm)->field); \
+ QMD_TRACE_ELEM(&(listelm)->field); \
+} while (0)
+
+#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
+ QMD_TAILQ_CHECK_PREV(listelm, field); \
+ (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
+ TAILQ_NEXT((elm), field) = (listelm); \
+ *(listelm)->field.tqe_prev = (elm); \
+ (listelm)->field.tqe_prev = &TAILQ_NEXT((elm), field); \
+ QMD_TRACE_ELEM(&(elm)->field); \
+ QMD_TRACE_ELEM(&(listelm)->field); \
+} while (0)
+
+#define TAILQ_INSERT_HEAD(head, elm, field) do { \
+ QMD_TAILQ_CHECK_HEAD(head, field); \
+ if ((TAILQ_NEXT((elm), field) = TAILQ_FIRST((head))) != NULL) \
+ TAILQ_FIRST((head))->field.tqe_prev = \
+ &TAILQ_NEXT((elm), field); \
+ else \
+ (head)->tqh_last = &TAILQ_NEXT((elm), field); \
+ TAILQ_FIRST((head)) = (elm); \
+ (elm)->field.tqe_prev = &TAILQ_FIRST((head)); \
+ QMD_TRACE_HEAD(head); \
+ QMD_TRACE_ELEM(&(elm)->field); \
+} while (0)
+
+#define TAILQ_INSERT_TAIL(head, elm, field) do { \
+ QMD_TAILQ_CHECK_TAIL(head, field); \
+ TAILQ_NEXT((elm), field) = NULL; \
+ (elm)->field.tqe_prev = (head)->tqh_last; \
+ *(head)->tqh_last = (elm); \
+ (head)->tqh_last = &TAILQ_NEXT((elm), field); \
+ QMD_TRACE_HEAD(head); \
+ QMD_TRACE_ELEM(&(elm)->field); \
+} while (0)
+
+#define TAILQ_LAST(head, headname) \
+ (*(((struct headname *)((head)->tqh_last))->tqh_last))
+
+#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
+
+#define TAILQ_PREV(elm, headname, field) \
+ (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
+
+#define TAILQ_REMOVE(head, elm, field) do { \
+ QMD_SAVELINK(oldnext, (elm)->field.tqe_next); \
+ QMD_SAVELINK(oldprev, (elm)->field.tqe_prev); \
+ QMD_TAILQ_CHECK_NEXT(elm, field); \
+ QMD_TAILQ_CHECK_PREV(elm, field); \
+ if ((TAILQ_NEXT((elm), field)) != NULL) \
+ TAILQ_NEXT((elm), field)->field.tqe_prev = \
+ (elm)->field.tqe_prev; \
+ else { \
+ (head)->tqh_last = (elm)->field.tqe_prev; \
+ QMD_TRACE_HEAD(head); \
+ } \
+ *(elm)->field.tqe_prev = TAILQ_NEXT((elm), field); \
+ TRASHIT(*oldnext); \
+ TRASHIT(*oldprev); \
+ QMD_TRACE_ELEM(&(elm)->field); \
+} while (0)
+
+#define TAILQ_SWAP(head1, head2, type, field) do { \
+ struct type *swap_first = (head1)->tqh_first; \
+ struct type **swap_last = (head1)->tqh_last; \
+ (head1)->tqh_first = (head2)->tqh_first; \
+ (head1)->tqh_last = (head2)->tqh_last; \
+ (head2)->tqh_first = swap_first; \
+ (head2)->tqh_last = swap_last; \
+ if ((swap_first = (head1)->tqh_first) != NULL) \
+ swap_first->field.tqe_prev = &(head1)->tqh_first; \
+ else \
+ (head1)->tqh_last = &(head1)->tqh_first; \
+ if ((swap_first = (head2)->tqh_first) != NULL) \
+ swap_first->field.tqe_prev = &(head2)->tqh_first; \
+ else \
+ (head2)->tqh_last = &(head2)->tqh_first; \
+} while (0)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !_SYS_QUEUE_H_ */