PERFORCE change 113349 for review
Todd Miller
millert at FreeBSD.org
Mon Jan 22 16:08:01 UTC 2007
http://perforce.freebsd.org/chv.cgi?CH=113349
Change 113349 by millert at millert_macbook on 2007/01/22 16:07:40
Use a single read/write lock for the avc cache instead of
multiple spinlocks, based on the locking we used in sedarwin78.
Affected files ...
.. //depot/projects/trustedbsd/sedarwin8/policies/sedarwin/sedarwin/avc/avc.c#18 edit
Differences ...
==== //depot/projects/trustedbsd/sedarwin8/policies/sedarwin/sedarwin/avc/avc.c#18 (text+ko) ====
@@ -109,8 +109,7 @@
struct avc_cache {
LIST_HEAD(, avc_node) slots[AVC_CACHE_SLOTS];
- lck_spin_t *slots_lock[AVC_CACHE_SLOTS];
- atomic_t lru_hint; /* LRU hint for reclaim scan */
+ u32 lru_hint; /* LRU hint for reclaim scan */
atomic_t active_nodes;
u32 latest_notif; /* latest revocation notification */
};
@@ -139,8 +138,11 @@
static lck_grp_t *avc_lck_grp;
-#define AVC_LOCK(n) lck_spin_lock(avc_cache.slots_lock[n])
-#define AVC_UNLOCK(n) lck_spin_unlock(avc_cache.slots_lock[n])
+static lck_rw_t *avc_lock;
+#define AVC_RDLOCK lck_rw_lock_shared(avc_lock)
+#define AVC_WRLOCK lck_rw_lock_exclusive(avc_lock)
+#define AVC_RDUNLOCK lck_rw_unlock_shared(avc_lock)
+#define AVC_WRUNLOCK lck_rw_unlock_exclusive(avc_lock)
static lck_spin_t *notif_lock;
#define NOTIF_LOCK lck_spin_lock(notif_lock)
@@ -269,12 +271,11 @@
/* allocate avc locks */
avc_log_lock = lck_spin_alloc_init(avc_lck_grp, avc_lck_attr);
+ avc_lock = lck_rw_alloc_init(avc_lck_grp, avc_lck_attr);
notif_lock = lck_spin_alloc_init(avc_lck_grp, avc_lck_attr);
for (i = 0; i < AVC_CACHE_SLOTS; i++) {
LIST_INIT(&avc_cache.slots[i]);
- avc_cache.slots_lock[i] =
- lck_spin_alloc_init(avc_lck_grp, avc_lck_attr);
}
avc_cache.active_nodes = 0;
avc_cache.lru_hint = 0;
@@ -301,10 +302,11 @@
int i, chain_len, max_chain_len, slots_used;
struct avc_node *node;
+ AVC_RDLOCK;
+
slots_used = 0;
max_chain_len = 0;
for (i = 0; i < AVC_CACHE_SLOTS; i++) {
- AVC_LOCK(i);
if (!LIST_EMPTY(&avc_cache.slots[i])) {
slots_used++;
chain_len = 0;
@@ -313,9 +315,10 @@
if (chain_len > max_chain_len)
max_chain_len = chain_len;
}
- AVC_UNLOCK(i);
}
+ AVC_RDUNLOCK;
+
return scnprintf(page, PAGE_SIZE, "entries: %d\nbuckets used: %d/%d\n"
"longest chain: %d\n",
atomic_read(&avc_cache.active_nodes),
@@ -360,10 +363,12 @@
struct avc_node *node, *next;
int hvalue, try, ecx;
+ AVC_WRLOCK;
+
+ hvalue = avc_cache.lru_hint;
for (try = 0, ecx = 0; try < AVC_CACHE_SLOTS; try++ ) {
- hvalue = atomic_inc_return(&avc_cache.lru_hint) & (AVC_CACHE_SLOTS - 1);
+ hvalue = (hvalue + 1) & (AVC_CACHE_SLOTS - 1);
- AVC_LOCK(hvalue);
for (node = LIST_FIRST(&avc_cache.slots[hvalue]);
node != NULL; node = next) {
next = LIST_NEXT(node, list);
@@ -372,15 +377,14 @@
avc_node_delete(node);
avc_cache_stats_incr(reclaims);
ecx++;
- if (ecx >= AVC_CACHE_RECLAIM) {
- AVC_UNLOCK(hvalue);
+ if (ecx >= AVC_CACHE_RECLAIM)
goto out;
- }
}
}
- AVC_UNLOCK(hvalue);
}
out:
+ avc_cache.lru_hint = hvalue;
+ AVC_WRUNLOCK;
return ecx;
}
@@ -411,17 +415,13 @@
memcpy(&node->ae.avd, &ae->avd, sizeof(node->ae.avd));
}
-/*
- * Note: returns with read lock held for hvalue.
- */
-static inline struct avc_node *avc_search_node(u32 ssid, u32 tsid, u16 tclass,
- int *hvaluep)
+static inline struct avc_node *avc_search_node(u32 ssid, u32 tsid, u16 tclass)
{
struct avc_node *node, *ret = NULL;
+ int hvalue;
- *hvaluep = avc_hash(ssid, tsid, tclass);
- AVC_LOCK(*hvaluep);
- LIST_FOREACH(node, &avc_cache.slots[*hvaluep], list) {
+ hvalue = avc_hash(ssid, tsid, tclass);
+ LIST_FOREACH(node, &avc_cache.slots[hvalue], list) {
if (ssid == node->ae.ssid &&
tclass == node->ae.tclass &&
tsid == node->ae.tsid) {
@@ -448,28 +448,26 @@
* @tsid: target security identifier
* @tclass: target security class
* @requested: requested permissions, interpreted based on @tclass
- * @hvaluep: cache slot of the node on success
*
* Look up an AVC entry that is valid for the
* @requested permissions between the SID pair
* (@ssid, @tsid), interpreting the permissions
* based on @tclass. If a valid AVC entry exists,
- * then this function return the avc_node and read locks its slot.
+ * then this function return the avc_node.
* Otherwise, this function returns NULL.
*/
-static struct avc_node *avc_lookup(u32 ssid, u32 tsid, u16 tclass, u32 requested, int *hvaluep)
+static struct avc_node *avc_lookup(u32 ssid, u32 tsid, u16 tclass, u32 requested)
{
struct avc_node *node;
avc_cache_stats_incr(lookups);
- node = avc_search_node(ssid, tsid, tclass, hvaluep);
+ node = avc_search_node(ssid, tsid, tclass);
if (node && ((node->ae.avd.decided & requested) == requested)) {
avc_cache_stats_incr(hits);
goto out;
}
- AVC_UNLOCK(*hvaluep);
node = NULL;
avc_cache_stats_incr(misses);
out:
@@ -502,7 +500,6 @@
* @tsid: target security identifier
* @tclass: target security class
* @ae: AVC entry
- * @hvaluep: cache slot of the node on success
*
* Insert an AVC entry for the SID pair
* (@ssid, @tsid) and class @tclass.
@@ -511,32 +508,35 @@
* response to a security_compute_av() call. If the
* sequence number @ae->avd.seqno is not less than the latest
* revocation notification, then the function copies
- * the access vectors into a cache entry, returns (WRITE-locked)
+ * the access vectors into a cache entry, returns
* avc_node inserted. Otherwise, this function returns NULL.
*/
-static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, struct avc_entry *ae, int *hvaluep)
+static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, struct avc_entry *ae)
{
struct avc_node *pos, *node;
+ int hvalue;
if (avc_latest_notif_update(ae->avd.seqno, 1))
return NULL;
node = avc_alloc_node();
if (node) {
- *hvaluep = avc_hash(ssid, tsid, tclass);
+ hvalue = avc_hash(ssid, tsid, tclass);
avc_node_populate(node, ssid, tsid, tclass, ae);
- AVC_LOCK(*hvaluep);
+ AVC_WRLOCK;
- LIST_FOREACH(pos, &avc_cache.slots[*hvaluep], list) {
+ LIST_FOREACH(pos, &avc_cache.slots[hvalue], list) {
if (pos->ae.ssid == ssid &&
pos->ae.tsid == tsid &&
pos->ae.tclass == tclass) {
avc_node_replace(node, pos);
+ AVC_WRUNLOCK;
goto found;
}
}
- LIST_INSERT_HEAD(&avc_cache.slots[*hvaluep], node, list);
+ LIST_INSERT_HEAD(&avc_cache.slots[hvalue], node, list);
+ AVC_WRUNLOCK;
}
found:
return node;
@@ -834,9 +834,9 @@
goto out;
}
- /* Lock the target slot */
hvalue = avc_hash(ssid, tsid, tclass);
- AVC_LOCK(hvalue);
+
+ AVC_WRLOCK;
LIST_FOREACH(pos, &avc_cache.slots[hvalue], list){
if ( ssid==pos->ae.ssid &&
@@ -882,7 +882,7 @@
}
avc_node_replace(node, orig);
out_unlock:
- AVC_UNLOCK(hvalue);
+ AVC_WRUNLOCK;
out:
return rc;
}
@@ -897,14 +897,15 @@
int i, rc = 0, tmprc;
struct avc_node *node;
+ AVC_WRLOCK;
for (i = 0; i < AVC_CACHE_SLOTS; i++) {
- AVC_LOCK(i);
while ((node = LIST_FIRST(&avc_cache.slots[i])) != NULL)
avc_node_delete(node);
- AVC_UNLOCK(i);
}
+ AVC_WRUNLOCK;
+
for (c = avc_callbacks; c; c = c->next) {
if (c->events & AVC_CALLBACK_RESET) {
tmprc = c->callback(AVC_CALLBACK_RESET,
@@ -945,29 +946,28 @@
{
struct avc_node *node;
struct avc_entry entry, *p_ae;
- int hvalue, found, rc = 0;
+ int rc = 0;
u32 denied;
- node = avc_lookup(ssid, tsid, tclass, requested, &hvalue);
- found = node != NULL;
+ AVC_RDLOCK;
+ node = avc_lookup(ssid, tsid, tclass, requested);
+ AVC_RDUNLOCK;
- if (!found) {
+ if (!node) {
rc = security_compute_av(ssid,tsid,tclass,requested,&entry.avd);
if (rc)
goto out;
- node = avc_insert(ssid,tsid,tclass,&entry,&hvalue);
+ node = avc_insert(ssid,tsid,tclass,&entry);
}
+ AVC_RDLOCK;
p_ae = node ? &node->ae : &entry;
if (avd)
memcpy(avd, &p_ae->avd, sizeof(*avd));
denied = requested & ~(p_ae->avd.allowed);
- if (found)
- AVC_UNLOCK(hvalue); /* locked by avc_lookup() */
- else if (node)
- AVC_UNLOCK(hvalue); /* locked by avc_insert() */
+ AVC_RDUNLOCK;
if (!requested || denied) {
if (selinux_enforcing)
More information about the trustedbsd-cvs
mailing list