Version:  2.0.40 2.2.26 2.4.37 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4 4.5 4.6 4.7 4.8 4.9 4.10

Linux/kernel/memremap.c

  1 /*
  2  * Copyright(c) 2015 Intel Corporation. All rights reserved.
  3  *
  4  * This program is free software; you can redistribute it and/or modify
  5  * it under the terms of version 2 of the GNU General Public License as
  6  * published by the Free Software Foundation.
  7  *
  8  * This program is distributed in the hope that it will be useful, but
  9  * WITHOUT ANY WARRANTY; without even the implied warranty of
 10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 11  * General Public License for more details.
 12  */
 13 #include <linux/radix-tree.h>
 14 #include <linux/memremap.h>
 15 #include <linux/device.h>
 16 #include <linux/types.h>
 17 #include <linux/pfn_t.h>
 18 #include <linux/io.h>
 19 #include <linux/mm.h>
 20 #include <linux/memory_hotplug.h>
 21 
 22 #ifndef ioremap_cache
 23 /* temporary while we convert existing ioremap_cache users to memremap */
 24 __weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size)
 25 {
 26         return ioremap(offset, size);
 27 }
 28 #endif
 29 
 30 #ifndef arch_memremap_wb
 31 static void *arch_memremap_wb(resource_size_t offset, unsigned long size)
 32 {
 33         return (__force void *)ioremap_cache(offset, size);
 34 }
 35 #endif
 36 
 37 static void *try_ram_remap(resource_size_t offset, size_t size)
 38 {
 39         unsigned long pfn = PHYS_PFN(offset);
 40 
 41         /* In the simple case just return the existing linear address */
 42         if (pfn_valid(pfn) && !PageHighMem(pfn_to_page(pfn)))
 43                 return __va(offset);
 44         return NULL; /* fallback to arch_memremap_wb */
 45 }
 46 
 47 /**
 48  * memremap() - remap an iomem_resource as cacheable memory
 49  * @offset: iomem resource start address
 50  * @size: size of remap
 51  * @flags: any of MEMREMAP_WB, MEMREMAP_WT and MEMREMAP_WC
 52  *
 53  * memremap() is "ioremap" for cases where it is known that the resource
 54  * being mapped does not have i/o side effects and the __iomem
 55  * annotation is not applicable. In the case of multiple flags, the different
 56  * mapping types will be attempted in the order listed below until one of
 57  * them succeeds.
 58  *
 59  * MEMREMAP_WB - matches the default mapping for System RAM on
 60  * the architecture.  This is usually a read-allocate write-back cache.
 61  * Morever, if MEMREMAP_WB is specified and the requested remap region is RAM
 62  * memremap() will bypass establishing a new mapping and instead return
 63  * a pointer into the direct map.
 64  *
 65  * MEMREMAP_WT - establish a mapping whereby writes either bypass the
 66  * cache or are written through to memory and never exist in a
 67  * cache-dirty state with respect to program visibility.  Attempts to
 68  * map System RAM with this mapping type will fail.
 69  *
 70  * MEMREMAP_WC - establish a writecombine mapping, whereby writes may
 71  * be coalesced together (e.g. in the CPU's write buffers), but is otherwise
 72  * uncached. Attempts to map System RAM with this mapping type will fail.
 73  */
 74 void *memremap(resource_size_t offset, size_t size, unsigned long flags)
 75 {
 76         int is_ram = region_intersects(offset, size,
 77                                        IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
 78         void *addr = NULL;
 79 
 80         if (!flags)
 81                 return NULL;
 82 
 83         if (is_ram == REGION_MIXED) {
 84                 WARN_ONCE(1, "memremap attempted on mixed range %pa size: %#lx\n",
 85                                 &offset, (unsigned long) size);
 86                 return NULL;
 87         }
 88 
 89         /* Try all mapping types requested until one returns non-NULL */
 90         if (flags & MEMREMAP_WB) {
 91                 /*
 92                  * MEMREMAP_WB is special in that it can be satisifed
 93                  * from the direct map.  Some archs depend on the
 94                  * capability of memremap() to autodetect cases where
 95                  * the requested range is potentially in System RAM.
 96                  */
 97                 if (is_ram == REGION_INTERSECTS)
 98                         addr = try_ram_remap(offset, size);
 99                 if (!addr)
100                         addr = arch_memremap_wb(offset, size);
101         }
102 
103         /*
104          * If we don't have a mapping yet and other request flags are
105          * present then we will be attempting to establish a new virtual
106          * address mapping.  Enforce that this mapping is not aliasing
107          * System RAM.
108          */
109         if (!addr && is_ram == REGION_INTERSECTS && flags != MEMREMAP_WB) {
110                 WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx\n",
111                                 &offset, (unsigned long) size);
112                 return NULL;
113         }
114 
115         if (!addr && (flags & MEMREMAP_WT))
116                 addr = ioremap_wt(offset, size);
117 
118         if (!addr && (flags & MEMREMAP_WC))
119                 addr = ioremap_wc(offset, size);
120 
121         return addr;
122 }
123 EXPORT_SYMBOL(memremap);
124 
125 void memunmap(void *addr)
126 {
127         if (is_vmalloc_addr(addr))
128                 iounmap((void __iomem *) addr);
129 }
130 EXPORT_SYMBOL(memunmap);
131 
132 static void devm_memremap_release(struct device *dev, void *res)
133 {
134         memunmap(*(void **)res);
135 }
136 
137 static int devm_memremap_match(struct device *dev, void *res, void *match_data)
138 {
139         return *(void **)res == match_data;
140 }
141 
142 void *devm_memremap(struct device *dev, resource_size_t offset,
143                 size_t size, unsigned long flags)
144 {
145         void **ptr, *addr;
146 
147         ptr = devres_alloc_node(devm_memremap_release, sizeof(*ptr), GFP_KERNEL,
148                         dev_to_node(dev));
149         if (!ptr)
150                 return ERR_PTR(-ENOMEM);
151 
152         addr = memremap(offset, size, flags);
153         if (addr) {
154                 *ptr = addr;
155                 devres_add(dev, ptr);
156         } else {
157                 devres_free(ptr);
158                 return ERR_PTR(-ENXIO);
159         }
160 
161         return addr;
162 }
163 EXPORT_SYMBOL(devm_memremap);
164 
165 void devm_memunmap(struct device *dev, void *addr)
166 {
167         WARN_ON(devres_release(dev, devm_memremap_release,
168                                 devm_memremap_match, addr));
169 }
170 EXPORT_SYMBOL(devm_memunmap);
171 
172 #ifdef CONFIG_ZONE_DEVICE
173 static DEFINE_MUTEX(pgmap_lock);
174 static RADIX_TREE(pgmap_radix, GFP_KERNEL);
175 #define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
176 #define SECTION_SIZE (1UL << PA_SECTION_SHIFT)
177 
178 struct page_map {
179         struct resource res;
180         struct percpu_ref *ref;
181         struct dev_pagemap pgmap;
182         struct vmem_altmap altmap;
183 };
184 
185 void get_zone_device_page(struct page *page)
186 {
187         percpu_ref_get(page->pgmap->ref);
188 }
189 EXPORT_SYMBOL(get_zone_device_page);
190 
191 void put_zone_device_page(struct page *page)
192 {
193         put_dev_pagemap(page->pgmap);
194 }
195 EXPORT_SYMBOL(put_zone_device_page);
196 
197 static void pgmap_radix_release(struct resource *res)
198 {
199         resource_size_t key, align_start, align_size, align_end;
200 
201         align_start = res->start & ~(SECTION_SIZE - 1);
202         align_size = ALIGN(resource_size(res), SECTION_SIZE);
203         align_end = align_start + align_size - 1;
204 
205         mutex_lock(&pgmap_lock);
206         for (key = res->start; key <= res->end; key += SECTION_SIZE)
207                 radix_tree_delete(&pgmap_radix, key >> PA_SECTION_SHIFT);
208         mutex_unlock(&pgmap_lock);
209 }
210 
211 static unsigned long pfn_first(struct page_map *page_map)
212 {
213         struct dev_pagemap *pgmap = &page_map->pgmap;
214         const struct resource *res = &page_map->res;
215         struct vmem_altmap *altmap = pgmap->altmap;
216         unsigned long pfn;
217 
218         pfn = res->start >> PAGE_SHIFT;
219         if (altmap)
220                 pfn += vmem_altmap_offset(altmap);
221         return pfn;
222 }
223 
224 static unsigned long pfn_end(struct page_map *page_map)
225 {
226         const struct resource *res = &page_map->res;
227 
228         return (res->start + resource_size(res)) >> PAGE_SHIFT;
229 }
230 
231 #define for_each_device_pfn(pfn, map) \
232         for (pfn = pfn_first(map); pfn < pfn_end(map); pfn++)
233 
234 static void devm_memremap_pages_release(struct device *dev, void *data)
235 {
236         struct page_map *page_map = data;
237         struct resource *res = &page_map->res;
238         resource_size_t align_start, align_size;
239         struct dev_pagemap *pgmap = &page_map->pgmap;
240 
241         if (percpu_ref_tryget_live(pgmap->ref)) {
242                 dev_WARN(dev, "%s: page mapping is still live!\n", __func__);
243                 percpu_ref_put(pgmap->ref);
244         }
245 
246         /* pages are dead and unused, undo the arch mapping */
247         align_start = res->start & ~(SECTION_SIZE - 1);
248         align_size = ALIGN(resource_size(res), SECTION_SIZE);
249         mem_hotplug_begin();
250         arch_remove_memory(align_start, align_size);
251         mem_hotplug_done();
252         untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
253         pgmap_radix_release(res);
254         dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc,
255                         "%s: failed to free all reserved pages\n", __func__);
256 }
257 
258 /* assumes rcu_read_lock() held at entry */
259 struct dev_pagemap *find_dev_pagemap(resource_size_t phys)
260 {
261         struct page_map *page_map;
262 
263         WARN_ON_ONCE(!rcu_read_lock_held());
264 
265         page_map = radix_tree_lookup(&pgmap_radix, phys >> PA_SECTION_SHIFT);
266         return page_map ? &page_map->pgmap : NULL;
267 }
268 
269 /**
270  * devm_memremap_pages - remap and provide memmap backing for the given resource
271  * @dev: hosting device for @res
272  * @res: "host memory" address range
273  * @ref: a live per-cpu reference count
274  * @altmap: optional descriptor for allocating the memmap from @res
275  *
276  * Notes:
277  * 1/ @ref must be 'live' on entry and 'dead' before devm_memunmap_pages() time
278  *    (or devm release event).
279  *
280  * 2/ @res is expected to be a host memory range that could feasibly be
281  *    treated as a "System RAM" range, i.e. not a device mmio range, but
282  *    this is not enforced.
283  */
284 void *devm_memremap_pages(struct device *dev, struct resource *res,
285                 struct percpu_ref *ref, struct vmem_altmap *altmap)
286 {
287         resource_size_t key, align_start, align_size, align_end;
288         pgprot_t pgprot = PAGE_KERNEL;
289         struct dev_pagemap *pgmap;
290         struct page_map *page_map;
291         int error, nid, is_ram;
292         unsigned long pfn;
293 
294         align_start = res->start & ~(SECTION_SIZE - 1);
295         align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
296                 - align_start;
297         is_ram = region_intersects(align_start, align_size,
298                 IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
299 
300         if (is_ram == REGION_MIXED) {
301                 WARN_ONCE(1, "%s attempted on mixed region %pr\n",
302                                 __func__, res);
303                 return ERR_PTR(-ENXIO);
304         }
305 
306         if (is_ram == REGION_INTERSECTS)
307                 return __va(res->start);
308 
309         if (!ref)
310                 return ERR_PTR(-EINVAL);
311 
312         page_map = devres_alloc_node(devm_memremap_pages_release,
313                         sizeof(*page_map), GFP_KERNEL, dev_to_node(dev));
314         if (!page_map)
315                 return ERR_PTR(-ENOMEM);
316         pgmap = &page_map->pgmap;
317 
318         memcpy(&page_map->res, res, sizeof(*res));
319 
320         pgmap->dev = dev;
321         if (altmap) {
322                 memcpy(&page_map->altmap, altmap, sizeof(*altmap));
323                 pgmap->altmap = &page_map->altmap;
324         }
325         pgmap->ref = ref;
326         pgmap->res = &page_map->res;
327 
328         mutex_lock(&pgmap_lock);
329         error = 0;
330         align_end = align_start + align_size - 1;
331         for (key = align_start; key <= align_end; key += SECTION_SIZE) {
332                 struct dev_pagemap *dup;
333 
334                 rcu_read_lock();
335                 dup = find_dev_pagemap(key);
336                 rcu_read_unlock();
337                 if (dup) {
338                         dev_err(dev, "%s: %pr collides with mapping for %s\n",
339                                         __func__, res, dev_name(dup->dev));
340                         error = -EBUSY;
341                         break;
342                 }
343                 error = radix_tree_insert(&pgmap_radix, key >> PA_SECTION_SHIFT,
344                                 page_map);
345                 if (error) {
346                         dev_err(dev, "%s: failed: %d\n", __func__, error);
347                         break;
348                 }
349         }
350         mutex_unlock(&pgmap_lock);
351         if (error)
352                 goto err_radix;
353 
354         nid = dev_to_node(dev);
355         if (nid < 0)
356                 nid = numa_mem_id();
357 
358         error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(align_start), 0,
359                         align_size);
360         if (error)
361                 goto err_pfn_remap;
362 
363         mem_hotplug_begin();
364         error = arch_add_memory(nid, align_start, align_size, true);
365         mem_hotplug_done();
366         if (error)
367                 goto err_add_memory;
368 
369         for_each_device_pfn(pfn, page_map) {
370                 struct page *page = pfn_to_page(pfn);
371 
372                 /*
373                  * ZONE_DEVICE pages union ->lru with a ->pgmap back
374                  * pointer.  It is a bug if a ZONE_DEVICE page is ever
375                  * freed or placed on a driver-private list.  Seed the
376                  * storage with LIST_POISON* values.
377                  */
378                 list_del(&page->lru);
379                 page->pgmap = pgmap;
380         }
381         devres_add(dev, page_map);
382         return __va(res->start);
383 
384  err_add_memory:
385         untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
386  err_pfn_remap:
387  err_radix:
388         pgmap_radix_release(res);
389         devres_free(page_map);
390         return ERR_PTR(error);
391 }
392 EXPORT_SYMBOL(devm_memremap_pages);
393 
394 unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
395 {
396         /* number of pfns from base where pfn_to_page() is valid */
397         return altmap->reserve + altmap->free;
398 }
399 
400 void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns)
401 {
402         altmap->alloc -= nr_pfns;
403 }
404 
405 struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start)
406 {
407         /*
408          * 'memmap_start' is the virtual address for the first "struct
409          * page" in this range of the vmemmap array.  In the case of
410          * CONFIG_SPARSEMEM_VMEMMAP a page_to_pfn conversion is simple
411          * pointer arithmetic, so we can perform this to_vmem_altmap()
412          * conversion without concern for the initialization state of
413          * the struct page fields.
414          */
415         struct page *page = (struct page *) memmap_start;
416         struct dev_pagemap *pgmap;
417 
418         /*
419          * Unconditionally retrieve a dev_pagemap associated with the
420          * given physical address, this is only for use in the
421          * arch_{add|remove}_memory() for setting up and tearing down
422          * the memmap.
423          */
424         rcu_read_lock();
425         pgmap = find_dev_pagemap(__pfn_to_phys(page_to_pfn(page)));
426         rcu_read_unlock();
427 
428         return pgmap ? pgmap->altmap : NULL;
429 }
430 #endif /* CONFIG_ZONE_DEVICE */
431 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us