svn commit: r354354 - in stable/12/sys/cddl/contrib/opensolaris/uts/common/fs/zfs: . sys
Andriy Gapon
avg at FreeBSD.org
Tue Nov 5 07:11:13 UTC 2019
Author: avg
Date: Tue Nov 5 07:11:12 2019
New Revision: 354354
URL: https://svnweb.freebsd.org/changeset/base/354354
Log:
MFC r353565,r353568: MFV r353561: 10343 ZoL: Prefix all refcount functions with zfs_
Modified:
stable/12/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/abd.c
stable/12/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/arc.c
stable/12/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dbuf.c
stable/12/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dbuf_stats.c
stable/12/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu.c
stable/12/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_tx.c
stable/12/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dnode.c
stable/12/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dnode_sync.c
stable/12/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_dataset.c
stable/12/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_destroy.c
stable/12/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_scan.c
stable/12/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/metaslab.c
stable/12/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/refcount.c
stable/12/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/rrwlock.c
stable/12/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sa.c
stable/12/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/spa.c
stable/12/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/spa_misc.c
stable/12/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/abd.h
stable/12/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/arc.h
stable/12/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dbuf.h
stable/12/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dmu_tx.h
stable/12/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dnode.h
stable/12/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dsl_dataset.h
stable/12/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/metaslab_impl.h
stable/12/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/refcount.h
stable/12/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/rrwlock.h
stable/12/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/sa_impl.h
stable/12/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/spa_impl.h
stable/12/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zap.h
stable/12/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zio.c
Directory Properties:
stable/12/ (props changed)
Modified: stable/12/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/abd.c
==============================================================================
--- stable/12/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/abd.c Tue Nov 5 07:06:45 2019 (r354353)
+++ stable/12/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/abd.c Tue Nov 5 07:11:12 2019 (r354354)
@@ -304,7 +304,7 @@ abd_alloc(size_t size, boolean_t is_metadata)
}
abd->abd_size = size;
abd->abd_parent = NULL;
- refcount_create(&abd->abd_children);
+ zfs_refcount_create(&abd->abd_children);
abd->abd_u.abd_scatter.abd_offset = 0;
abd->abd_u.abd_scatter.abd_chunk_size = zfs_abd_chunk_size;
@@ -331,7 +331,7 @@ abd_free_scatter(abd_t *abd)
abd_free_chunk(abd->abd_u.abd_scatter.abd_chunks[i]);
}
- refcount_destroy(&abd->abd_children);
+ zfs_refcount_destroy(&abd->abd_children);
ABDSTAT_BUMPDOWN(abdstat_scatter_cnt);
ABDSTAT_INCR(abdstat_scatter_data_size, -(int)abd->abd_size);
ABDSTAT_INCR(abdstat_scatter_chunk_waste,
@@ -358,7 +358,7 @@ abd_alloc_linear(size_t size, boolean_t is_metadata)
}
abd->abd_size = size;
abd->abd_parent = NULL;
- refcount_create(&abd->abd_children);
+ zfs_refcount_create(&abd->abd_children);
if (is_metadata) {
abd->abd_u.abd_linear.abd_buf = zio_buf_alloc(size);
@@ -381,7 +381,7 @@ abd_free_linear(abd_t *abd)
zio_data_buf_free(abd->abd_u.abd_linear.abd_buf, abd->abd_size);
}
- refcount_destroy(&abd->abd_children);
+ zfs_refcount_destroy(&abd->abd_children);
ABDSTAT_BUMPDOWN(abdstat_linear_cnt);
ABDSTAT_INCR(abdstat_linear_data_size, -(int)abd->abd_size);
@@ -487,8 +487,8 @@ abd_get_offset(abd_t *sabd, size_t off)
abd->abd_size = sabd->abd_size - off;
abd->abd_parent = sabd;
- refcount_create(&abd->abd_children);
- (void) refcount_add_many(&sabd->abd_children, abd->abd_size, abd);
+ zfs_refcount_create(&abd->abd_children);
+ (void) zfs_refcount_add_many(&sabd->abd_children, abd->abd_size, abd);
return (abd);
}
@@ -512,7 +512,7 @@ abd_get_from_buf(void *buf, size_t size)
abd->abd_flags = ABD_FLAG_LINEAR;
abd->abd_size = size;
abd->abd_parent = NULL;
- refcount_create(&abd->abd_children);
+ zfs_refcount_create(&abd->abd_children);
abd->abd_u.abd_linear.abd_buf = buf;
@@ -530,11 +530,11 @@ abd_put(abd_t *abd)
ASSERT(!(abd->abd_flags & ABD_FLAG_OWNER));
if (abd->abd_parent != NULL) {
- (void) refcount_remove_many(&abd->abd_parent->abd_children,
+ (void) zfs_refcount_remove_many(&abd->abd_parent->abd_children,
abd->abd_size, abd);
}
- refcount_destroy(&abd->abd_children);
+ zfs_refcount_destroy(&abd->abd_children);
abd_free_struct(abd);
}
@@ -566,7 +566,7 @@ abd_borrow_buf(abd_t *abd, size_t n)
} else {
buf = zio_buf_alloc(n);
}
- (void) refcount_add_many(&abd->abd_children, n, buf);
+ (void) zfs_refcount_add_many(&abd->abd_children, n, buf);
return (buf);
}
@@ -598,7 +598,7 @@ abd_return_buf(abd_t *abd, void *buf, size_t n)
ASSERT0(abd_cmp_buf(abd, buf, n));
zio_buf_free(buf, n);
}
- (void) refcount_remove_many(&abd->abd_children, n, buf);
+ (void) zfs_refcount_remove_many(&abd->abd_children, n, buf);
}
void
Modified: stable/12/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/arc.c
==============================================================================
--- stable/12/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/arc.c Tue Nov 5 07:06:45 2019 (r354353)
+++ stable/12/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/arc.c Tue Nov 5 07:11:12 2019 (r354354)
@@ -538,12 +538,12 @@ typedef struct arc_state {
/*
* total amount of evictable data in this state
*/
- refcount_t arcs_esize[ARC_BUFC_NUMTYPES];
+ zfs_refcount_t arcs_esize[ARC_BUFC_NUMTYPES];
/*
* total amount of data in this state; this includes: evictable,
* non-evictable, ARC_BUFC_DATA, and ARC_BUFC_METADATA.
*/
- refcount_t arcs_size;
+ zfs_refcount_t arcs_size;
/*
* supports the "dbufs" kstat
*/
@@ -1158,7 +1158,7 @@ typedef struct l1arc_buf_hdr {
uint32_t b_l2_hits;
/* self protecting */
- refcount_t b_refcnt;
+ zfs_refcount_t b_refcnt;
arc_callback_t *b_acb;
abd_t *b_pabd;
@@ -1534,7 +1534,7 @@ struct l2arc_dev {
kmutex_t l2ad_mtx; /* lock for buffer list */
list_t l2ad_buflist; /* buffer list */
list_node_t l2ad_node; /* device list node */
- refcount_t l2ad_alloc; /* allocated bytes */
+ zfs_refcount_t l2ad_alloc; /* allocated bytes */
};
static list_t L2ARC_dev_list; /* device list */
@@ -1765,7 +1765,7 @@ hdr_full_cons(void *vbuf, void *unused, int kmflag)
bzero(hdr, HDR_FULL_SIZE);
cv_init(&hdr->b_l1hdr.b_cv, NULL, CV_DEFAULT, NULL);
- refcount_create(&hdr->b_l1hdr.b_refcnt);
+ zfs_refcount_create(&hdr->b_l1hdr.b_refcnt);
mutex_init(&hdr->b_l1hdr.b_freeze_lock, NULL, MUTEX_DEFAULT, NULL);
multilist_link_init(&hdr->b_l1hdr.b_arc_node);
arc_space_consume(HDR_FULL_SIZE, ARC_SPACE_HDRS);
@@ -1810,7 +1810,7 @@ hdr_full_dest(void *vbuf, void *unused)
ASSERT(HDR_EMPTY(hdr));
cv_destroy(&hdr->b_l1hdr.b_cv);
- refcount_destroy(&hdr->b_l1hdr.b_refcnt);
+ zfs_refcount_destroy(&hdr->b_l1hdr.b_refcnt);
mutex_destroy(&hdr->b_l1hdr.b_freeze_lock);
ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
arc_space_return(HDR_FULL_SIZE, ARC_SPACE_HDRS);
@@ -2495,21 +2495,21 @@ arc_evictable_space_increment(arc_buf_hdr_t *hdr, arc_
ASSERT0(hdr->b_l1hdr.b_bufcnt);
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
- (void) refcount_add_many(&state->arcs_esize[type],
+ (void) zfs_refcount_add_many(&state->arcs_esize[type],
HDR_GET_LSIZE(hdr), hdr);
return;
}
ASSERT(!GHOST_STATE(state));
if (hdr->b_l1hdr.b_pabd != NULL) {
- (void) refcount_add_many(&state->arcs_esize[type],
+ (void) zfs_refcount_add_many(&state->arcs_esize[type],
arc_hdr_size(hdr), hdr);
}
for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
buf = buf->b_next) {
if (arc_buf_is_shared(buf))
continue;
- (void) refcount_add_many(&state->arcs_esize[type],
+ (void) zfs_refcount_add_many(&state->arcs_esize[type],
arc_buf_size(buf), buf);
}
}
@@ -2530,21 +2530,21 @@ arc_evictable_space_decrement(arc_buf_hdr_t *hdr, arc_
ASSERT0(hdr->b_l1hdr.b_bufcnt);
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
- (void) refcount_remove_many(&state->arcs_esize[type],
+ (void) zfs_refcount_remove_many(&state->arcs_esize[type],
HDR_GET_LSIZE(hdr), hdr);
return;
}
ASSERT(!GHOST_STATE(state));
if (hdr->b_l1hdr.b_pabd != NULL) {
- (void) refcount_remove_many(&state->arcs_esize[type],
+ (void) zfs_refcount_remove_many(&state->arcs_esize[type],
arc_hdr_size(hdr), hdr);
}
for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
buf = buf->b_next) {
if (arc_buf_is_shared(buf))
continue;
- (void) refcount_remove_many(&state->arcs_esize[type],
+ (void) zfs_refcount_remove_many(&state->arcs_esize[type],
arc_buf_size(buf), buf);
}
}
@@ -2561,13 +2561,13 @@ add_reference(arc_buf_hdr_t *hdr, void *tag)
ASSERT(HDR_HAS_L1HDR(hdr));
if (!MUTEX_HELD(HDR_LOCK(hdr))) {
ASSERT(hdr->b_l1hdr.b_state == arc_anon);
- ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
+ ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
}
arc_state_t *state = hdr->b_l1hdr.b_state;
- if ((refcount_add(&hdr->b_l1hdr.b_refcnt, tag) == 1) &&
+ if ((zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, tag) == 1) &&
(state != arc_anon)) {
/* We don't use the L2-only state list. */
if (state != arc_l2c_only) {
@@ -2599,7 +2599,7 @@ remove_reference(arc_buf_hdr_t *hdr, kmutex_t *hash_lo
* arc_l2c_only counts as a ghost state so we don't need to explicitly
* check to prevent usage of the arc_l2c_only list.
*/
- if (((cnt = refcount_remove(&hdr->b_l1hdr.b_refcnt, tag)) == 0) &&
+ if (((cnt = zfs_refcount_remove(&hdr->b_l1hdr.b_refcnt, tag)) == 0) &&
(state != arc_anon)) {
multilist_insert(state->arcs_list[arc_buf_type(hdr)], hdr);
ASSERT3U(hdr->b_l1hdr.b_bufcnt, >, 0);
@@ -2644,7 +2644,7 @@ arc_buf_info(arc_buf_t *ab, arc_buf_info_t *abi, int s
abi->abi_mru_ghost_hits = l1hdr->b_mru_ghost_hits;
abi->abi_mfu_hits = l1hdr->b_mfu_hits;
abi->abi_mfu_ghost_hits = l1hdr->b_mfu_ghost_hits;
- abi->abi_holds = refcount_count(&l1hdr->b_refcnt);
+ abi->abi_holds = zfs_refcount_count(&l1hdr->b_refcnt);
}
if (l2hdr) {
@@ -2680,7 +2680,7 @@ arc_change_state(arc_state_t *new_state, arc_buf_hdr_t
*/
if (HDR_HAS_L1HDR(hdr)) {
old_state = hdr->b_l1hdr.b_state;
- refcnt = refcount_count(&hdr->b_l1hdr.b_refcnt);
+ refcnt = zfs_refcount_count(&hdr->b_l1hdr.b_refcnt);
bufcnt = hdr->b_l1hdr.b_bufcnt;
update_old = (bufcnt > 0 || hdr->b_l1hdr.b_pabd != NULL);
} else {
@@ -2750,7 +2750,7 @@ arc_change_state(arc_state_t *new_state, arc_buf_hdr_t
* the reference. As a result, we use the arc
* header pointer for the reference.
*/
- (void) refcount_add_many(&new_state->arcs_size,
+ (void) zfs_refcount_add_many(&new_state->arcs_size,
HDR_GET_LSIZE(hdr), hdr);
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
} else {
@@ -2776,13 +2776,15 @@ arc_change_state(arc_state_t *new_state, arc_buf_hdr_t
if (arc_buf_is_shared(buf))
continue;
- (void) refcount_add_many(&new_state->arcs_size,
+ (void) zfs_refcount_add_many(
+ &new_state->arcs_size,
arc_buf_size(buf), buf);
}
ASSERT3U(bufcnt, ==, buffers);
if (hdr->b_l1hdr.b_pabd != NULL) {
- (void) refcount_add_many(&new_state->arcs_size,
+ (void) zfs_refcount_add_many(
+ &new_state->arcs_size,
arc_hdr_size(hdr), hdr);
} else {
ASSERT(GHOST_STATE(old_state));
@@ -2804,7 +2806,7 @@ arc_change_state(arc_state_t *new_state, arc_buf_hdr_t
* header on the ghost state.
*/
- (void) refcount_remove_many(&old_state->arcs_size,
+ (void) zfs_refcount_remove_many(&old_state->arcs_size,
HDR_GET_LSIZE(hdr), hdr);
} else {
uint32_t buffers = 0;
@@ -2829,13 +2831,13 @@ arc_change_state(arc_state_t *new_state, arc_buf_hdr_t
if (arc_buf_is_shared(buf))
continue;
- (void) refcount_remove_many(
+ (void) zfs_refcount_remove_many(
&old_state->arcs_size, arc_buf_size(buf),
buf);
}
ASSERT3U(bufcnt, ==, buffers);
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
- (void) refcount_remove_many(
+ (void) zfs_refcount_remove_many(
&old_state->arcs_size, arc_hdr_size(hdr), hdr);
}
}
@@ -3101,8 +3103,8 @@ arc_return_buf(arc_buf_t *buf, void *tag)
ASSERT3P(buf->b_data, !=, NULL);
ASSERT(HDR_HAS_L1HDR(hdr));
- (void) refcount_add(&hdr->b_l1hdr.b_refcnt, tag);
- (void) refcount_remove(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
+ (void) zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, tag);
+ (void) zfs_refcount_remove(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
arc_loaned_bytes_update(-arc_buf_size(buf));
}
@@ -3115,8 +3117,8 @@ arc_loan_inuse_buf(arc_buf_t *buf, void *tag)
ASSERT3P(buf->b_data, !=, NULL);
ASSERT(HDR_HAS_L1HDR(hdr));
- (void) refcount_add(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
- (void) refcount_remove(&hdr->b_l1hdr.b_refcnt, tag);
+ (void) zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
+ (void) zfs_refcount_remove(&hdr->b_l1hdr.b_refcnt, tag);
arc_loaned_bytes_update(arc_buf_size(buf));
}
@@ -3143,13 +3145,13 @@ arc_hdr_free_on_write(arc_buf_hdr_t *hdr)
/* protected by hash lock, if in the hash table */
if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
- ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
+ ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
ASSERT(state != arc_anon && state != arc_l2c_only);
- (void) refcount_remove_many(&state->arcs_esize[type],
+ (void) zfs_refcount_remove_many(&state->arcs_esize[type],
size, hdr);
}
- (void) refcount_remove_many(&state->arcs_size, size, hdr);
+ (void) zfs_refcount_remove_many(&state->arcs_size, size, hdr);
if (type == ARC_BUFC_METADATA) {
arc_space_return(size, ARC_SPACE_META);
} else {
@@ -3179,7 +3181,7 @@ arc_share_buf(arc_buf_hdr_t *hdr, arc_buf_t *buf)
* refcount ownership to the hdr since it always owns
* the refcount whenever an arc_buf_t is shared.
*/
- refcount_transfer_ownership(&state->arcs_size, buf, hdr);
+ zfs_refcount_transfer_ownership(&state->arcs_size, buf, hdr);
hdr->b_l1hdr.b_pabd = abd_get_from_buf(buf->b_data, arc_buf_size(buf));
abd_take_ownership_of_buf(hdr->b_l1hdr.b_pabd,
HDR_ISTYPE_METADATA(hdr));
@@ -3209,7 +3211,7 @@ arc_unshare_buf(arc_buf_hdr_t *hdr, arc_buf_t *buf)
* We are no longer sharing this buffer so we need
* to transfer its ownership to the rightful owner.
*/
- refcount_transfer_ownership(&state->arcs_size, hdr, buf);
+ zfs_refcount_transfer_ownership(&state->arcs_size, hdr, buf);
arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA);
abd_release_ownership_of_buf(hdr->b_l1hdr.b_pabd);
abd_put(hdr->b_l1hdr.b_pabd);
@@ -3436,7 +3438,7 @@ arc_hdr_alloc(uint64_t spa, int32_t psize, int32_t lsi
* it references and compressed arc enablement.
*/
arc_hdr_alloc_pabd(hdr, B_TRUE);
- ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
+ ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
return (hdr);
}
@@ -3538,8 +3540,10 @@ arc_hdr_realloc(arc_buf_hdr_t *hdr, kmem_cache_t *old,
* the wrong pointer address when calling arc_hdr_destroy() later.
*/
- (void) refcount_remove_many(&dev->l2ad_alloc, arc_hdr_size(hdr), hdr);
- (void) refcount_add_many(&dev->l2ad_alloc, arc_hdr_size(nhdr), nhdr);
+ (void) zfs_refcount_remove_many(&dev->l2ad_alloc, arc_hdr_size(hdr),
+ hdr);
+ (void) zfs_refcount_add_many(&dev->l2ad_alloc, arc_hdr_size(nhdr),
+ nhdr);
buf_discard_identity(hdr);
kmem_cache_free(old, hdr);
@@ -3619,7 +3623,7 @@ arc_hdr_l2hdr_destroy(arc_buf_hdr_t *hdr)
vdev_space_update(dev->l2ad_vdev, -psize, 0, 0);
- (void) refcount_remove_many(&dev->l2ad_alloc, psize, hdr);
+ (void) zfs_refcount_remove_many(&dev->l2ad_alloc, psize, hdr);
arc_hdr_clear_flags(hdr, ARC_FLAG_HAS_L2HDR);
}
@@ -3629,7 +3633,7 @@ arc_hdr_destroy(arc_buf_hdr_t *hdr)
if (HDR_HAS_L1HDR(hdr)) {
ASSERT(hdr->b_l1hdr.b_buf == NULL ||
hdr->b_l1hdr.b_bufcnt > 0);
- ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
+ ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
}
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
@@ -3793,7 +3797,7 @@ arc_evict_hdr(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
return (bytes_evicted);
}
- ASSERT0(refcount_count(&hdr->b_l1hdr.b_refcnt));
+ ASSERT0(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt));
while (hdr->b_l1hdr.b_buf) {
arc_buf_t *buf = hdr->b_l1hdr.b_buf;
if (!mutex_tryenter(&buf->b_evict_lock)) {
@@ -4107,7 +4111,7 @@ arc_flush_state(arc_state_t *state, uint64_t spa, arc_
{
uint64_t evicted = 0;
- while (refcount_count(&state->arcs_esize[type]) != 0) {
+ while (zfs_refcount_count(&state->arcs_esize[type]) != 0) {
evicted += arc_evict_state(state, spa, ARC_EVICT_ALL, type);
if (!retry)
@@ -4130,7 +4134,7 @@ arc_prune_task(void *ptr)
if (func != NULL)
func(ap->p_adjust, ap->p_private);
- refcount_remove(&ap->p_refcnt, func);
+ zfs_refcount_remove(&ap->p_refcnt, func);
}
/*
@@ -4153,14 +4157,14 @@ arc_prune_async(int64_t adjust)
for (ap = list_head(&arc_prune_list); ap != NULL;
ap = list_next(&arc_prune_list, ap)) {
- if (refcount_count(&ap->p_refcnt) >= 2)
+ if (zfs_refcount_count(&ap->p_refcnt) >= 2)
continue;
- refcount_add(&ap->p_refcnt, ap->p_pfunc);
+ zfs_refcount_add(&ap->p_refcnt, ap->p_pfunc);
ap->p_adjust = adjust;
if (taskq_dispatch(arc_prune_taskq, arc_prune_task,
ap, TQ_SLEEP) == TASKQID_INVALID) {
- refcount_remove(&ap->p_refcnt, ap->p_pfunc);
+ zfs_refcount_remove(&ap->p_refcnt, ap->p_pfunc);
continue;
}
ARCSTAT_BUMP(arcstat_prune);
@@ -4182,8 +4186,9 @@ arc_adjust_impl(arc_state_t *state, uint64_t spa, int6
{
int64_t delta;
- if (bytes > 0 && refcount_count(&state->arcs_esize[type]) > 0) {
- delta = MIN(refcount_count(&state->arcs_esize[type]), bytes);
+ if (bytes > 0 && zfs_refcount_count(&state->arcs_esize[type]) > 0) {
+ delta = MIN(zfs_refcount_count(&state->arcs_esize[type]),
+ bytes);
return (arc_evict_state(state, spa, delta, type));
}
@@ -4226,8 +4231,9 @@ restart:
*/
adjustmnt = meta_used - arc_meta_limit;
- if (adjustmnt > 0 && refcount_count(&arc_mru->arcs_esize[type]) > 0) {
- delta = MIN(refcount_count(&arc_mru->arcs_esize[type]),
+ if (adjustmnt > 0 &&
+ zfs_refcount_count(&arc_mru->arcs_esize[type]) > 0) {
+ delta = MIN(zfs_refcount_count(&arc_mru->arcs_esize[type]),
adjustmnt);
total_evicted += arc_adjust_impl(arc_mru, 0, delta, type);
adjustmnt -= delta;
@@ -4243,8 +4249,9 @@ restart:
* simply decrement the amount of data evicted from the MRU.
*/
- if (adjustmnt > 0 && refcount_count(&arc_mfu->arcs_esize[type]) > 0) {
- delta = MIN(refcount_count(&arc_mfu->arcs_esize[type]),
+ if (adjustmnt > 0 &&
+ zfs_refcount_count(&arc_mfu->arcs_esize[type]) > 0) {
+ delta = MIN(zfs_refcount_count(&arc_mfu->arcs_esize[type]),
adjustmnt);
total_evicted += arc_adjust_impl(arc_mfu, 0, delta, type);
}
@@ -4252,17 +4259,17 @@ restart:
adjustmnt = meta_used - arc_meta_limit;
if (adjustmnt > 0 &&
- refcount_count(&arc_mru_ghost->arcs_esize[type]) > 0) {
+ zfs_refcount_count(&arc_mru_ghost->arcs_esize[type]) > 0) {
delta = MIN(adjustmnt,
- refcount_count(&arc_mru_ghost->arcs_esize[type]));
+ zfs_refcount_count(&arc_mru_ghost->arcs_esize[type]));
total_evicted += arc_adjust_impl(arc_mru_ghost, 0, delta, type);
adjustmnt -= delta;
}
if (adjustmnt > 0 &&
- refcount_count(&arc_mfu_ghost->arcs_esize[type]) > 0) {
+ zfs_refcount_count(&arc_mfu_ghost->arcs_esize[type]) > 0) {
delta = MIN(adjustmnt,
- refcount_count(&arc_mfu_ghost->arcs_esize[type]));
+ zfs_refcount_count(&arc_mfu_ghost->arcs_esize[type]));
total_evicted += arc_adjust_impl(arc_mfu_ghost, 0, delta, type);
}
@@ -4311,8 +4318,8 @@ arc_adjust_meta_only(uint64_t meta_used)
* evict some from the MRU here, and some from the MFU below.
*/
target = MIN((int64_t)(meta_used - arc_meta_limit),
- (int64_t)(refcount_count(&arc_anon->arcs_size) +
- refcount_count(&arc_mru->arcs_size) - arc_p));
+ (int64_t)(zfs_refcount_count(&arc_anon->arcs_size) +
+ zfs_refcount_count(&arc_mru->arcs_size) - arc_p));
total_evicted += arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_METADATA);
@@ -4322,7 +4329,7 @@ arc_adjust_meta_only(uint64_t meta_used)
* space allotted to the MFU (which is defined as arc_c - arc_p).
*/
target = MIN((int64_t)(meta_used - arc_meta_limit),
- (int64_t)(refcount_count(&arc_mfu->arcs_size) -
+ (int64_t)(zfs_refcount_count(&arc_mfu->arcs_size) -
(arc_c - arc_p)));
total_evicted += arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_METADATA);
@@ -4443,8 +4450,8 @@ arc_adjust(void)
* arc_p here, and then evict more from the MFU below.
*/
target = MIN((int64_t)(asize - arc_c),
- (int64_t)(refcount_count(&arc_anon->arcs_size) +
- refcount_count(&arc_mru->arcs_size) + ameta - arc_p));
+ (int64_t)(zfs_refcount_count(&arc_anon->arcs_size) +
+ zfs_refcount_count(&arc_mru->arcs_size) + ameta - arc_p));
/*
* If we're below arc_meta_min, always prefer to evict data.
@@ -4534,8 +4541,8 @@ arc_adjust(void)
* cache. The following logic enforces these limits on the ghost
* caches, and evicts from them as needed.
*/
- target = refcount_count(&arc_mru->arcs_size) +
- refcount_count(&arc_mru_ghost->arcs_size) - arc_c;
+ target = zfs_refcount_count(&arc_mru->arcs_size) +
+ zfs_refcount_count(&arc_mru_ghost->arcs_size) - arc_c;
bytes = arc_adjust_impl(arc_mru_ghost, 0, target, ARC_BUFC_DATA);
total_evicted += bytes;
@@ -4553,8 +4560,8 @@ arc_adjust(void)
* mru + mfu + mru ghost + mfu ghost <= 2 * arc_c
* mru ghost + mfu ghost <= arc_c
*/
- target = refcount_count(&arc_mru_ghost->arcs_size) +
- refcount_count(&arc_mfu_ghost->arcs_size) - arc_c;
+ target = zfs_refcount_count(&arc_mru_ghost->arcs_size) +
+ zfs_refcount_count(&arc_mfu_ghost->arcs_size) - arc_c;
bytes = arc_adjust_impl(arc_mfu_ghost, 0, target, ARC_BUFC_DATA);
total_evicted += bytes;
@@ -5069,8 +5076,8 @@ arc_adapt(int bytes, arc_state_t *state)
{
int mult;
uint64_t arc_p_min = (arc_c >> arc_p_min_shift);
- int64_t mrug_size = refcount_count(&arc_mru_ghost->arcs_size);
- int64_t mfug_size = refcount_count(&arc_mfu_ghost->arcs_size);
+ int64_t mrug_size = zfs_refcount_count(&arc_mru_ghost->arcs_size);
+ int64_t mfug_size = zfs_refcount_count(&arc_mfu_ghost->arcs_size);
if (state == arc_l2c_only)
return;
@@ -5248,7 +5255,7 @@ arc_get_data_impl(arc_buf_hdr_t *hdr, uint64_t size, v
*/
if (!GHOST_STATE(state)) {
- (void) refcount_add_many(&state->arcs_size, size, tag);
+ (void) zfs_refcount_add_many(&state->arcs_size, size, tag);
/*
* If this is reached via arc_read, the link is
@@ -5260,8 +5267,8 @@ arc_get_data_impl(arc_buf_hdr_t *hdr, uint64_t size, v
* trying to [add|remove]_reference it.
*/
if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
- ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
- (void) refcount_add_many(&state->arcs_esize[type],
+ ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
+ (void) zfs_refcount_add_many(&state->arcs_esize[type],
size, tag);
}
@@ -5271,8 +5278,8 @@ arc_get_data_impl(arc_buf_hdr_t *hdr, uint64_t size, v
*/
if (aggsum_upper_bound(&arc_size) < arc_c &&
hdr->b_l1hdr.b_state == arc_anon &&
- (refcount_count(&arc_anon->arcs_size) +
- refcount_count(&arc_mru->arcs_size) > arc_p))
+ (zfs_refcount_count(&arc_anon->arcs_size) +
+ zfs_refcount_count(&arc_mru->arcs_size) > arc_p))
arc_p = MIN(arc_c, arc_p + size);
}
ARCSTAT_BUMP(arcstat_allocated);
@@ -5310,13 +5317,13 @@ arc_free_data_impl(arc_buf_hdr_t *hdr, uint64_t size,
/* protected by hash lock, if in the hash table */
if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
- ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
+ ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
ASSERT(state != arc_anon && state != arc_l2c_only);
- (void) refcount_remove_many(&state->arcs_esize[type],
+ (void) zfs_refcount_remove_many(&state->arcs_esize[type],
size, tag);
}
- (void) refcount_remove_many(&state->arcs_size, size, tag);
+ (void) zfs_refcount_remove_many(&state->arcs_size, size, tag);
VERIFY3U(hdr->b_type, ==, type);
if (type == ARC_BUFC_METADATA) {
@@ -5363,7 +5370,7 @@ arc_access(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
* another prefetch (to make it less likely to be evicted).
*/
if (HDR_PREFETCH(hdr) || HDR_PRESCIENT_PREFETCH(hdr)) {
- if (refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) {
+ if (zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) {
/* link protected by hash lock */
ASSERT(multilist_link_active(
&hdr->b_l1hdr.b_arc_node));
@@ -5404,7 +5411,7 @@ arc_access(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
if (HDR_PREFETCH(hdr) || HDR_PRESCIENT_PREFETCH(hdr)) {
new_state = arc_mru;
- if (refcount_count(&hdr->b_l1hdr.b_refcnt) > 0) {
+ if (zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) > 0) {
arc_hdr_clear_flags(hdr,
ARC_FLAG_PREFETCH |
ARC_FLAG_PRESCIENT_PREFETCH);
@@ -5678,7 +5685,7 @@ arc_read_done(zio_t *zio)
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
}
- ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt) ||
+ ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt) ||
callback_list != NULL);
if (no_zio_error) {
@@ -5689,7 +5696,7 @@ arc_read_done(zio_t *zio)
arc_change_state(arc_anon, hdr, hash_lock);
if (HDR_IN_HASH_TABLE(hdr))
buf_hash_remove(hdr);
- freeable = refcount_is_zero(&hdr->b_l1hdr.b_refcnt);
+ freeable = zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt);
}
/*
@@ -5709,7 +5716,7 @@ arc_read_done(zio_t *zio)
* in the cache).
*/
ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
- freeable = refcount_is_zero(&hdr->b_l1hdr.b_refcnt);
+ freeable = zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt);
}
/* execute each callback and free its structure */
@@ -5875,7 +5882,7 @@ top:
ASSERT((zio_flags & ZIO_FLAG_SPECULATIVE) ||
rc == 0 || rc != ENOENT);
} else if (*arc_flags & ARC_FLAG_PREFETCH &&
- refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) {
+ zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) {
arc_hdr_set_flags(hdr, ARC_FLAG_PREFETCH);
}
DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
@@ -5933,7 +5940,7 @@ top:
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(GHOST_STATE(hdr->b_l1hdr.b_state));
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
- ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
+ ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
@@ -6154,10 +6161,10 @@ arc_add_prune_callback(arc_prune_func_t *func, void *p
p->p_pfunc = func;
p->p_private = private;
list_link_init(&p->p_node);
- refcount_create(&p->p_refcnt);
+ zfs_refcount_create(&p->p_refcnt);
mutex_enter(&arc_prune_mtx);
- refcount_add(&p->p_refcnt, &arc_prune_list);
+ zfs_refcount_add(&p->p_refcnt, &arc_prune_list);
list_insert_head(&arc_prune_list, p);
mutex_exit(&arc_prune_mtx);
@@ -6170,15 +6177,15 @@ arc_remove_prune_callback(arc_prune_t *p)
boolean_t wait = B_FALSE;
mutex_enter(&arc_prune_mtx);
list_remove(&arc_prune_list, p);
- if (refcount_remove(&p->p_refcnt, &arc_prune_list) > 0)
+ if (zfs_refcount_remove(&p->p_refcnt, &arc_prune_list) > 0)
wait = B_TRUE;
mutex_exit(&arc_prune_mtx);
/* wait for arc_prune_task to finish */
if (wait)
taskq_wait(arc_prune_taskq);
- ASSERT0(refcount_count(&p->p_refcnt));
- refcount_destroy(&p->p_refcnt);
+ ASSERT0(zfs_refcount_count(&p->p_refcnt));
+ zfs_refcount_destroy(&p->p_refcnt);
kmem_free(p, sizeof (*p));
}
@@ -6221,7 +6228,7 @@ arc_freed(spa_t *spa, const blkptr_t *bp)
* this hdr, then we don't destroy the hdr.
*/
if (!HDR_HAS_L1HDR(hdr) || (!HDR_IO_IN_PROGRESS(hdr) &&
- refcount_is_zero(&hdr->b_l1hdr.b_refcnt))) {
+ zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt))) {
arc_change_state(arc_anon, hdr, hash_lock);
arc_hdr_destroy(hdr);
mutex_exit(hash_lock);
@@ -6264,7 +6271,7 @@ arc_release(arc_buf_t *buf, void *tag)
ASSERT(!HDR_HAS_L2HDR(hdr));
ASSERT(HDR_EMPTY(hdr));
ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1);
- ASSERT3S(refcount_count(&hdr->b_l1hdr.b_refcnt), ==, 1);
+ ASSERT3S(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt), ==, 1);
ASSERT(!list_link_active(&hdr->b_l1hdr.b_arc_node));
hdr->b_l1hdr.b_arc_access = 0;
@@ -6292,7 +6299,7 @@ arc_release(arc_buf_t *buf, void *tag)
ASSERT3P(state, !=, arc_anon);
/* this buffer is not on any list */
- ASSERT3S(refcount_count(&hdr->b_l1hdr.b_refcnt), >, 0);
+ ASSERT3S(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt), >, 0);
if (HDR_HAS_L2HDR(hdr)) {
mutex_enter(&hdr->b_l2hdr.b_dev->l2ad_mtx);
@@ -6384,12 +6391,13 @@ arc_release(arc_buf_t *buf, void *tag)
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
ASSERT3P(state, !=, arc_l2c_only);
- (void) refcount_remove_many(&state->arcs_size,
+ (void) zfs_refcount_remove_many(&state->arcs_size,
arc_buf_size(buf), buf);
- if (refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) {
+ if (zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) {
ASSERT3P(state, !=, arc_l2c_only);
- (void) refcount_remove_many(&state->arcs_esize[type],
+ (void) zfs_refcount_remove_many(
+ &state->arcs_esize[type],
arc_buf_size(buf), buf);
}
@@ -6408,21 +6416,21 @@ arc_release(arc_buf_t *buf, void *tag)
nhdr = arc_hdr_alloc(spa, psize, lsize, compress, type);
ASSERT3P(nhdr->b_l1hdr.b_buf, ==, NULL);
ASSERT0(nhdr->b_l1hdr.b_bufcnt);
- ASSERT0(refcount_count(&nhdr->b_l1hdr.b_refcnt));
+ ASSERT0(zfs_refcount_count(&nhdr->b_l1hdr.b_refcnt));
VERIFY3U(nhdr->b_type, ==, type);
ASSERT(!HDR_SHARED_DATA(nhdr));
nhdr->b_l1hdr.b_buf = buf;
nhdr->b_l1hdr.b_bufcnt = 1;
- (void) refcount_add(&nhdr->b_l1hdr.b_refcnt, tag);
+ (void) zfs_refcount_add(&nhdr->b_l1hdr.b_refcnt, tag);
buf->b_hdr = nhdr;
mutex_exit(&buf->b_evict_lock);
- (void) refcount_add_many(&arc_anon->arcs_size,
+ (void) zfs_refcount_add_many(&arc_anon->arcs_size,
arc_buf_size(buf), buf);
} else {
mutex_exit(&buf->b_evict_lock);
- ASSERT(refcount_count(&hdr->b_l1hdr.b_refcnt) == 1);
+ ASSERT(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) == 1);
/* protected by hash lock, or hdr is on arc_anon */
ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
@@ -6454,7 +6462,7 @@ arc_referenced(arc_buf_t *buf)
int referenced;
mutex_enter(&buf->b_evict_lock);
- referenced = (refcount_count(&buf->b_hdr->b_l1hdr.b_refcnt));
+ referenced = (zfs_refcount_count(&buf->b_hdr->b_l1hdr.b_refcnt));
mutex_exit(&buf->b_evict_lock);
return (referenced);
}
@@ -6469,7 +6477,7 @@ arc_write_ready(zio_t *zio)
uint64_t psize = BP_IS_HOLE(zio->io_bp) ? 0 : BP_GET_PSIZE(zio->io_bp);
ASSERT(HDR_HAS_L1HDR(hdr));
- ASSERT(!refcount_is_zero(&buf->b_hdr->b_l1hdr.b_refcnt));
+ ASSERT(!zfs_refcount_is_zero(&buf->b_hdr->b_l1hdr.b_refcnt));
ASSERT(hdr->b_l1hdr.b_bufcnt > 0);
/*
@@ -6624,7 +6632,7 @@ arc_write_done(zio_t *zio)
if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
panic("bad overwrite, hdr=%p exists=%p",
(void *)hdr, (void *)exists);
- ASSERT(refcount_is_zero(
+ ASSERT(zfs_refcount_is_zero(
&exists->b_l1hdr.b_refcnt));
arc_change_state(arc_anon, exists, hash_lock);
mutex_exit(hash_lock);
@@ -6654,7 +6662,7 @@ arc_write_done(zio_t *zio)
arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
}
- ASSERT(!refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
+ ASSERT(!zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
callback->awcb_done(zio, buf, callback->awcb_private);
abd_put(zio->io_abd);
@@ -6800,7 +6808,7 @@ arc_tempreserve_space(spa_t *spa, uint64_t reserve, ui
/* assert that it has not wrapped around */
ASSERT3S(atomic_add_64_nv(&arc_loaned_bytes, 0), >=, 0);
- anon_size = MAX((int64_t)(refcount_count(&arc_anon->arcs_size) -
+ anon_size = MAX((int64_t)(zfs_refcount_count(&arc_anon->arcs_size) -
arc_loaned_bytes), 0);
/*
@@ -6835,9 +6843,10 @@ arc_tempreserve_space(spa_t *spa, uint64_t reserve, ui
anon_size > arc_c * zfs_arc_anon_limit_percent / 100 &&
spa_dirty_anon > anon_size * zfs_arc_pool_dirty_percent / 100) {
uint64_t meta_esize =
- refcount_count(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
+ zfs_refcount_count(
+ &arc_anon->arcs_esize[ARC_BUFC_METADATA]);
uint64_t data_esize =
- refcount_count(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
+ zfs_refcount_count(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK "
"anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n",
arc_tempreserve >> 10, meta_esize >> 10,
@@ -6852,11 +6861,11 @@ static void
arc_kstat_update_state(arc_state_t *state, kstat_named_t *size,
kstat_named_t *evict_data, kstat_named_t *evict_metadata)
{
- size->value.ui64 = refcount_count(&state->arcs_size);
+ size->value.ui64 = zfs_refcount_count(&state->arcs_size);
evict_data->value.ui64 =
- refcount_count(&state->arcs_esize[ARC_BUFC_DATA]);
+ zfs_refcount_count(&state->arcs_esize[ARC_BUFC_DATA]);
evict_metadata->value.ui64 =
- refcount_count(&state->arcs_esize[ARC_BUFC_METADATA]);
+ zfs_refcount_count(&state->arcs_esize[ARC_BUFC_METADATA]);
}
static int
@@ -7025,25 +7034,25 @@ arc_state_init(void)
offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
arc_state_multilist_index_func);
- refcount_create(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
- refcount_create(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
- refcount_create(&arc_mru->arcs_esize[ARC_BUFC_METADATA]);
- refcount_create(&arc_mru->arcs_esize[ARC_BUFC_DATA]);
- refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]);
- refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]);
- refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
- refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_DATA]);
- refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]);
- refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]);
- refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]);
- refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]);
+ zfs_refcount_create(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
+ zfs_refcount_create(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
+ zfs_refcount_create(&arc_mru->arcs_esize[ARC_BUFC_METADATA]);
+ zfs_refcount_create(&arc_mru->arcs_esize[ARC_BUFC_DATA]);
+ zfs_refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]);
+ zfs_refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]);
+ zfs_refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
+ zfs_refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_DATA]);
+ zfs_refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]);
+ zfs_refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]);
+ zfs_refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]);
+ zfs_refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]);
- refcount_create(&arc_anon->arcs_size);
- refcount_create(&arc_mru->arcs_size);
- refcount_create(&arc_mru_ghost->arcs_size);
- refcount_create(&arc_mfu->arcs_size);
- refcount_create(&arc_mfu_ghost->arcs_size);
- refcount_create(&arc_l2c_only->arcs_size);
+ zfs_refcount_create(&arc_anon->arcs_size);
+ zfs_refcount_create(&arc_mru->arcs_size);
+ zfs_refcount_create(&arc_mru_ghost->arcs_size);
+ zfs_refcount_create(&arc_mfu->arcs_size);
+ zfs_refcount_create(&arc_mfu_ghost->arcs_size);
+ zfs_refcount_create(&arc_l2c_only->arcs_size);
aggsum_init(&arc_meta_used, 0);
aggsum_init(&arc_size, 0);
@@ -7059,25 +7068,25 @@ arc_state_init(void)
static void
arc_state_fini(void)
{
- refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
- refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
- refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_METADATA]);
- refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_DATA]);
- refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]);
- refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]);
- refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
- refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_DATA]);
- refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]);
- refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]);
- refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]);
- refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]);
+ zfs_refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
+ zfs_refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
+ zfs_refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_METADATA]);
+ zfs_refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_DATA]);
+ zfs_refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]);
+ zfs_refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]);
+ zfs_refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
+ zfs_refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_DATA]);
+ zfs_refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]);
+ zfs_refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]);
+ zfs_refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]);
+ zfs_refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]);
- refcount_destroy(&arc_anon->arcs_size);
- refcount_destroy(&arc_mru->arcs_size);
- refcount_destroy(&arc_mru_ghost->arcs_size);
- refcount_destroy(&arc_mfu->arcs_size);
- refcount_destroy(&arc_mfu_ghost->arcs_size);
- refcount_destroy(&arc_l2c_only->arcs_size);
+ zfs_refcount_destroy(&arc_anon->arcs_size);
+ zfs_refcount_destroy(&arc_mru->arcs_size);
+ zfs_refcount_destroy(&arc_mru_ghost->arcs_size);
+ zfs_refcount_destroy(&arc_mfu->arcs_size);
+ zfs_refcount_destroy(&arc_mfu_ghost->arcs_size);
+ zfs_refcount_destroy(&arc_l2c_only->arcs_size);
multilist_destroy(arc_mru->arcs_list[ARC_BUFC_METADATA]);
multilist_destroy(arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]);
@@ -7359,8 +7368,8 @@ arc_fini(void)
mutex_enter(&arc_prune_mtx);
while ((p = list_head(&arc_prune_list)) != NULL) {
list_remove(&arc_prune_list, p);
- refcount_remove(&p->p_refcnt, &arc_prune_list);
- refcount_destroy(&p->p_refcnt);
+ zfs_refcount_remove(&p->p_refcnt, &arc_prune_list);
+ zfs_refcount_destroy(&p->p_refcnt);
kmem_free(p, sizeof (*p));
}
mutex_exit(&arc_prune_mtx);
@@ -7787,7 +7796,7 @@ top:
ARCSTAT_INCR(arcstat_l2_lsize, -HDR_GET_LSIZE(hdr));
bytes_dropped += arc_hdr_size(hdr);
- (void) refcount_remove_many(&dev->l2ad_alloc,
+ (void) zfs_refcount_remove_many(&dev->l2ad_alloc,
arc_hdr_size(hdr), hdr);
}
@@ -8205,7 +8214,8 @@ l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint
list_insert_head(&dev->l2ad_buflist, hdr);
mutex_exit(&dev->l2ad_mtx);
- (void) refcount_add_many(&dev->l2ad_alloc, psize, hdr);
+ (void) zfs_refcount_add_many(&dev->l2ad_alloc, psize,
+ hdr);
/*
* Normally the L2ARC can use the hdr's data, but if
@@ -8439,7 +8449,7 @@ l2arc_add_vdev(spa_t *spa, vdev_t *vd)
offsetof(arc_buf_hdr_t, b_l2hdr.b_l2node));
vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand);
- refcount_create(&adddev->l2ad_alloc);
+ zfs_refcount_create(&adddev->l2ad_alloc);
/*
* Add device to global list
@@ -8485,7 +8495,7 @@ l2arc_remove_vdev(vdev_t *vd)
l2arc_evict(remdev, 0, B_TRUE);
list_destroy(&remdev->l2ad_buflist);
mutex_destroy(&remdev->l2ad_mtx);
- refcount_destroy(&remdev->l2ad_alloc);
+ zfs_refcount_destroy(&remdev->l2ad_alloc);
kmem_free(remdev, sizeof (l2arc_dev_t));
}
Modified: stable/12/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dbuf.c
==============================================================================
--- stable/12/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dbuf.c Tue Nov 5 07:06:45 2019 (r354353)
+++ stable/12/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dbuf.c Tue Nov 5 07:11:12 2019 (r354354)
@@ -226,7 +226,7 @@ static boolean_t dbuf_evict_thread_exit;
*/
typedef struct dbuf_cache {
multilist_t *cache;
- refcount_t size;
+ zfs_refcount_t size;
} dbuf_cache_t;
dbuf_cache_t dbuf_caches[DB_CACHE_MAX];
@@ -316,7 +316,7 @@ dbuf_cons(void *vdb, void *unused, int kmflag)
mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL);
cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL);
multilist_link_init(&db->db_cache_link);
- refcount_create(&db->db_holds);
+ zfs_refcount_create(&db->db_holds);
return (0);
}
@@ -329,7 +329,7 @@ dbuf_dest(void *vdb, void *unused)
mutex_destroy(&db->db_mtx);
cv_destroy(&db->db_changed);
ASSERT(!multilist_link_active(&db->db_cache_link));
- refcount_destroy(&db->db_holds);
+ zfs_refcount_destroy(&db->db_holds);
*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
More information about the svn-src-stable
mailing list