[patch] Compilation problems in sys/arm/arm/pmap.c when
PMAP_DEBUG is defined.
Stanislav Sedov
stas at FreeBSD.org
Fri Oct 2 11:56:08 UTC 2009
On Fri, 02 Oct 2009 06:13:16 +0000
Tom Judge <tom at tomjudge.com> mentioned:
> Hi,
>
> I ran into some issues this evening while I was building some kernels
> with PMAP_DEBUG defined.
>
> I have attached a patch that addresses the problems with the DPRINTF
> sections. (The first 2 hunks should probably be ignored).
>
> However there are 2 warnings about unused functions when PMAP_INLINE is
> defined as "". I did not know what the correct fix for this was so I
> defined PMAP_INLINE to __inline even when PMAP_DEBUG was set, which
> seemed to hide the problem again.
>
Actually, these two functions with PMAP_INLINE prefixes are unused. I
believe we can drop them.
Olivier, what do you think about the following patch?
Index: sys/arm/arm/pmap.c
===================================================================
--- sys/arm/arm/pmap.c (revision 197707)
+++ sys/arm/arm/pmap.c (working copy)
@@ -203,7 +203,6 @@
static void pmap_fix_cache(struct vm_page *, pmap_t, vm_offset_t);
static void pmap_alloc_l1(pmap_t);
static void pmap_free_l1(pmap_t);
-static void pmap_use_l1(pmap_t);
static int pmap_clearbit(struct vm_page *, u_int);
@@ -829,47 +828,6 @@
mtx_unlock(&l1_lru_lock);
}
-static PMAP_INLINE void
-pmap_use_l1(pmap_t pm)
-{
- struct l1_ttable *l1;
-
- /*
- * Do nothing if we're in interrupt context.
- * Access to an L1 by the kernel pmap must not affect
- * the LRU list.
- */
- if (pm == pmap_kernel())
- return;
-
- l1 = pm->pm_l1;
-
- /*
- * If the L1 is not currently on the LRU list, just return
- */
- if (l1->l1_domain_use_count == PMAP_DOMAINS)
- return;
-
- mtx_lock(&l1_lru_lock);
-
- /*
- * Check the use count again, now that we've acquired the lock
- */
- if (l1->l1_domain_use_count == PMAP_DOMAINS) {
- mtx_unlock(&l1_lru_lock);
- return;
- }
-
- /*
- * Move the L1 to the back of the LRU list
- */
- TAILQ_REMOVE(&l1_lru_list, l1, l1_lru);
- TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru);
-
- mtx_unlock(&l1_lru_lock);
-}
-
-
/*
* Returns a pointer to the L2 bucket associated with the specified pmap
* and VA, or NULL if no L2 bucket exists for the address.
@@ -1311,16 +1269,6 @@
}
}
-static PMAP_INLINE void
-pmap_dcache_wbinv_all(pmap_t pm)
-{
-
- if (pmap_is_current(pm)) {
- cpu_dcache_wbinv_all();
- cpu_l2cache_wbinv_all();
- }
-}
-
/*
* PTE_SYNC_CURRENT:
*
@@ -1914,7 +1862,7 @@
{
int shpgperproc = PMAP_SHPGPERPROC;
- PDEBUG(1, printf("pmap_init: phys_start = %08x\n"));
+ PDEBUG(1, printf("pmap_init: phys_start = %08x\n", PHYSADDR));
/*
* init the pv free list
@@ -2373,8 +2321,8 @@
vm_size_t size;
int l1idx, l2idx, l2next = 0;
- PDEBUG(1, printf("firstaddr = %08x, loadaddr = %08x\n",
- firstaddr, loadaddr));
+ PDEBUG(1, printf("firstaddr = %08x, lastaddr = %08x\n",
+ firstaddr, lastaddr));
virtual_avail = firstaddr;
kernel_pmap->pm_l1 = l1;
@@ -4246,96 +4194,7 @@
pmap_zero_page(m);
}
-#if 0
/*
- * pmap_clean_page()
- *
- * This is a local function used to work out the best strategy to clean
- * a single page referenced by its entry in the PV table. It's used by
- * pmap_copy_page, pmap_zero page and maybe some others later on.
- *
- * Its policy is effectively:
- * o If there are no mappings, we don't bother doing anything with the cache.
- * o If there is one mapping, we clean just that page.
- * o If there are multiple mappings, we clean the entire cache.
- *
- * So that some functions can be further optimised, it returns 0 if it didn't
- * clean the entire cache, or 1 if it did.
- *
- * XXX One bug in this routine is that if the pv_entry has a single page
- * mapped at 0x00000000 a whole cache clean will be performed rather than
- * just the 1 page. Since this should not occur in everyday use and if it does
- * it will just result in not the most efficient clean for the page.
- */
-static int
-pmap_clean_page(struct pv_entry *pv, boolean_t is_src)
-{
- pmap_t pm, pm_to_clean = NULL;
- struct pv_entry *npv;
- u_int cache_needs_cleaning = 0;
- u_int flags = 0;
- vm_offset_t page_to_clean = 0;
-
- if (pv == NULL) {
- /* nothing mapped in so nothing to flush */
- return (0);
- }
-
- /*
- * Since we flush the cache each time we change to a different
- * user vmspace, we only need to flush the page if it is in the
- * current pmap.
- */
- if (curthread)
- pm = vmspace_pmap(curproc->p_vmspace);
- else
- pm = pmap_kernel();
-
- for (npv = pv; npv; npv = TAILQ_NEXT(npv, pv_list)) {
- if (npv->pv_pmap == pmap_kernel() || npv->pv_pmap == pm) {
- flags |= npv->pv_flags;
- /*
- * The page is mapped non-cacheable in
- * this map. No need to flush the cache.
- */
- if (npv->pv_flags & PVF_NC) {
-#ifdef DIAGNOSTIC
- if (cache_needs_cleaning)
- panic("pmap_clean_page: "
- "cache inconsistency");
-#endif
- break;
- } else if (is_src && (npv->pv_flags & PVF_WRITE) == 0)
- continue;
- if (cache_needs_cleaning) {
- page_to_clean = 0;
- break;
- } else {
- page_to_clean = npv->pv_va;
- pm_to_clean = npv->pv_pmap;
- }
- cache_needs_cleaning = 1;
- }
- }
- if (page_to_clean) {
- if (PV_BEEN_EXECD(flags))
- pmap_idcache_wbinv_range(pm_to_clean, page_to_clean,
- PAGE_SIZE);
- else
- pmap_dcache_wb_range(pm_to_clean, page_to_clean,
- PAGE_SIZE, !is_src, (flags & PVF_WRITE) == 0);
- } else if (cache_needs_cleaning) {
- if (PV_BEEN_EXECD(flags))
- pmap_idcache_wbinv_all(pm);
- else
- pmap_dcache_wbinv_all(pm);
- return (1);
- }
- return (0);
-}
-#endif
-
-/*
* pmap_copy_page copies the specified (machine independent)
* page by mapping the page into virtual memory and using
* bcopy to copy the page, one machine dependent page at a
--
Stanislav Sedov
ST4096-RIPE
-------------- next part --------------
A non-text attachment was scrubbed...
Name: not available
Type: application/pgp-signature
Size: 801 bytes
Desc: not available
Url : http://lists.freebsd.org/pipermail/freebsd-arm/attachments/20091002/f53ec0fd/attachment.pgp
More information about the freebsd-arm
mailing list