Version:  2.0.40 2.2.26 2.4.37 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4 4.5 4.6 4.7 4.8 4.9 4.10

Linux/drivers/misc/sram.c

  1 /*
  2  * Generic on-chip SRAM allocation driver
  3  *
  4  * Copyright (C) 2012 Philipp Zabel, Pengutronix
  5  *
  6  * This program is free software; you can redistribute it and/or
  7  * modify it under the terms of the GNU General Public License
  8  * as published by the Free Software Foundation; either version 2
  9  * of the License, or (at your option) any later version.
 10  * This program is distributed in the hope that it will be useful,
 11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
 12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 13  * GNU General Public License for more details.
 14  *
 15  * You should have received a copy of the GNU General Public License
 16  * along with this program; if not, write to the Free Software
 17  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
 18  * MA 02110-1301, USA.
 19  */
 20 
 21 #include <linux/clk.h>
 22 #include <linux/delay.h>
 23 #include <linux/genalloc.h>
 24 #include <linux/io.h>
 25 #include <linux/list_sort.h>
 26 #include <linux/of_address.h>
 27 #include <linux/of_device.h>
 28 #include <linux/platform_device.h>
 29 #include <linux/regmap.h>
 30 #include <linux/slab.h>
 31 #include <linux/mfd/syscon.h>
 32 #include <soc/at91/atmel-secumod.h>
 33 
 34 #define SRAM_GRANULARITY        32
 35 
 36 struct sram_partition {
 37         void __iomem *base;
 38 
 39         struct gen_pool *pool;
 40         struct bin_attribute battr;
 41         struct mutex lock;
 42 };
 43 
 44 struct sram_dev {
 45         struct device *dev;
 46         void __iomem *virt_base;
 47 
 48         struct gen_pool *pool;
 49         struct clk *clk;
 50 
 51         struct sram_partition *partition;
 52         u32 partitions;
 53 };
 54 
 55 struct sram_reserve {
 56         struct list_head list;
 57         u32 start;
 58         u32 size;
 59         bool export;
 60         bool pool;
 61         const char *label;
 62 };
 63 
 64 static ssize_t sram_read(struct file *filp, struct kobject *kobj,
 65                          struct bin_attribute *attr,
 66                          char *buf, loff_t pos, size_t count)
 67 {
 68         struct sram_partition *part;
 69 
 70         part = container_of(attr, struct sram_partition, battr);
 71 
 72         mutex_lock(&part->lock);
 73         memcpy_fromio(buf, part->base + pos, count);
 74         mutex_unlock(&part->lock);
 75 
 76         return count;
 77 }
 78 
 79 static ssize_t sram_write(struct file *filp, struct kobject *kobj,
 80                           struct bin_attribute *attr,
 81                           char *buf, loff_t pos, size_t count)
 82 {
 83         struct sram_partition *part;
 84 
 85         part = container_of(attr, struct sram_partition, battr);
 86 
 87         mutex_lock(&part->lock);
 88         memcpy_toio(part->base + pos, buf, count);
 89         mutex_unlock(&part->lock);
 90 
 91         return count;
 92 }
 93 
 94 static int sram_add_pool(struct sram_dev *sram, struct sram_reserve *block,
 95                          phys_addr_t start, struct sram_partition *part)
 96 {
 97         int ret;
 98 
 99         part->pool = devm_gen_pool_create(sram->dev, ilog2(SRAM_GRANULARITY),
100                                           NUMA_NO_NODE, block->label);
101         if (IS_ERR(part->pool))
102                 return PTR_ERR(part->pool);
103 
104         ret = gen_pool_add_virt(part->pool, (unsigned long)part->base, start,
105                                 block->size, NUMA_NO_NODE);
106         if (ret < 0) {
107                 dev_err(sram->dev, "failed to register subpool: %d\n", ret);
108                 return ret;
109         }
110 
111         return 0;
112 }
113 
114 static int sram_add_export(struct sram_dev *sram, struct sram_reserve *block,
115                            phys_addr_t start, struct sram_partition *part)
116 {
117         sysfs_bin_attr_init(&part->battr);
118         part->battr.attr.name = devm_kasprintf(sram->dev, GFP_KERNEL,
119                                                "%llx.sram",
120                                                (unsigned long long)start);
121         if (!part->battr.attr.name)
122                 return -ENOMEM;
123 
124         part->battr.attr.mode = S_IRUSR | S_IWUSR;
125         part->battr.read = sram_read;
126         part->battr.write = sram_write;
127         part->battr.size = block->size;
128 
129         return device_create_bin_file(sram->dev, &part->battr);
130 }
131 
132 static int sram_add_partition(struct sram_dev *sram, struct sram_reserve *block,
133                               phys_addr_t start)
134 {
135         int ret;
136         struct sram_partition *part = &sram->partition[sram->partitions];
137 
138         mutex_init(&part->lock);
139         part->base = sram->virt_base + block->start;
140 
141         if (block->pool) {
142                 ret = sram_add_pool(sram, block, start, part);
143                 if (ret)
144                         return ret;
145         }
146         if (block->export) {
147                 ret = sram_add_export(sram, block, start, part);
148                 if (ret)
149                         return ret;
150         }
151         sram->partitions++;
152 
153         return 0;
154 }
155 
156 static void sram_free_partitions(struct sram_dev *sram)
157 {
158         struct sram_partition *part;
159 
160         if (!sram->partitions)
161                 return;
162 
163         part = &sram->partition[sram->partitions - 1];
164         for (; sram->partitions; sram->partitions--, part--) {
165                 if (part->battr.size)
166                         device_remove_bin_file(sram->dev, &part->battr);
167 
168                 if (part->pool &&
169                     gen_pool_avail(part->pool) < gen_pool_size(part->pool))
170                         dev_err(sram->dev, "removed pool while SRAM allocated\n");
171         }
172 }
173 
174 static int sram_reserve_cmp(void *priv, struct list_head *a,
175                                         struct list_head *b)
176 {
177         struct sram_reserve *ra = list_entry(a, struct sram_reserve, list);
178         struct sram_reserve *rb = list_entry(b, struct sram_reserve, list);
179 
180         return ra->start - rb->start;
181 }
182 
183 static int sram_reserve_regions(struct sram_dev *sram, struct resource *res)
184 {
185         struct device_node *np = sram->dev->of_node, *child;
186         unsigned long size, cur_start, cur_size;
187         struct sram_reserve *rblocks, *block;
188         struct list_head reserve_list;
189         unsigned int nblocks, exports = 0;
190         const char *label;
191         int ret = 0;
192 
193         INIT_LIST_HEAD(&reserve_list);
194 
195         size = resource_size(res);
196 
197         /*
198          * We need an additional block to mark the end of the memory region
199          * after the reserved blocks from the dt are processed.
200          */
201         nblocks = (np) ? of_get_available_child_count(np) + 1 : 1;
202         rblocks = kzalloc((nblocks) * sizeof(*rblocks), GFP_KERNEL);
203         if (!rblocks)
204                 return -ENOMEM;
205 
206         block = &rblocks[0];
207         for_each_available_child_of_node(np, child) {
208                 struct resource child_res;
209 
210                 ret = of_address_to_resource(child, 0, &child_res);
211                 if (ret < 0) {
212                         dev_err(sram->dev,
213                                 "could not get address for node %s\n",
214                                 child->full_name);
215                         goto err_chunks;
216                 }
217 
218                 if (child_res.start < res->start || child_res.end > res->end) {
219                         dev_err(sram->dev,
220                                 "reserved block %s outside the sram area\n",
221                                 child->full_name);
222                         ret = -EINVAL;
223                         goto err_chunks;
224                 }
225 
226                 block->start = child_res.start - res->start;
227                 block->size = resource_size(&child_res);
228                 list_add_tail(&block->list, &reserve_list);
229 
230                 if (of_find_property(child, "export", NULL))
231                         block->export = true;
232 
233                 if (of_find_property(child, "pool", NULL))
234                         block->pool = true;
235 
236                 if ((block->export || block->pool) && block->size) {
237                         exports++;
238 
239                         label = NULL;
240                         ret = of_property_read_string(child, "label", &label);
241                         if (ret && ret != -EINVAL) {
242                                 dev_err(sram->dev,
243                                         "%s has invalid label name\n",
244                                         child->full_name);
245                                 goto err_chunks;
246                         }
247                         if (!label)
248                                 label = child->name;
249 
250                         block->label = devm_kstrdup(sram->dev,
251                                                     label, GFP_KERNEL);
252                         if (!block->label)
253                                 goto err_chunks;
254 
255                         dev_dbg(sram->dev, "found %sblock '%s' 0x%x-0x%x\n",
256                                 block->export ? "exported " : "", block->label,
257                                 block->start, block->start + block->size);
258                 } else {
259                         dev_dbg(sram->dev, "found reserved block 0x%x-0x%x\n",
260                                 block->start, block->start + block->size);
261                 }
262 
263                 block++;
264         }
265         child = NULL;
266 
267         /* the last chunk marks the end of the region */
268         rblocks[nblocks - 1].start = size;
269         rblocks[nblocks - 1].size = 0;
270         list_add_tail(&rblocks[nblocks - 1].list, &reserve_list);
271 
272         list_sort(NULL, &reserve_list, sram_reserve_cmp);
273 
274         if (exports) {
275                 sram->partition = devm_kzalloc(sram->dev,
276                                        exports * sizeof(*sram->partition),
277                                        GFP_KERNEL);
278                 if (!sram->partition) {
279                         ret = -ENOMEM;
280                         goto err_chunks;
281                 }
282         }
283 
284         cur_start = 0;
285         list_for_each_entry(block, &reserve_list, list) {
286                 /* can only happen if sections overlap */
287                 if (block->start < cur_start) {
288                         dev_err(sram->dev,
289                                 "block at 0x%x starts after current offset 0x%lx\n",
290                                 block->start, cur_start);
291                         ret = -EINVAL;
292                         sram_free_partitions(sram);
293                         goto err_chunks;
294                 }
295 
296                 if ((block->export || block->pool) && block->size) {
297                         ret = sram_add_partition(sram, block,
298                                                  res->start + block->start);
299                         if (ret) {
300                                 sram_free_partitions(sram);
301                                 goto err_chunks;
302                         }
303                 }
304 
305                 /* current start is in a reserved block, so continue after it */
306                 if (block->start == cur_start) {
307                         cur_start = block->start + block->size;
308                         continue;
309                 }
310 
311                 /*
312                  * allocate the space between the current starting
313                  * address and the following reserved block, or the
314                  * end of the region.
315                  */
316                 cur_size = block->start - cur_start;
317 
318                 dev_dbg(sram->dev, "adding chunk 0x%lx-0x%lx\n",
319                         cur_start, cur_start + cur_size);
320 
321                 ret = gen_pool_add_virt(sram->pool,
322                                 (unsigned long)sram->virt_base + cur_start,
323                                 res->start + cur_start, cur_size, -1);
324                 if (ret < 0) {
325                         sram_free_partitions(sram);
326                         goto err_chunks;
327                 }
328 
329                 /* next allocation after this reserved block */
330                 cur_start = block->start + block->size;
331         }
332 
333  err_chunks:
334         if (child)
335                 of_node_put(child);
336 
337         kfree(rblocks);
338 
339         return ret;
340 }
341 
342 static int atmel_securam_wait(void)
343 {
344         struct regmap *regmap;
345         u32 val;
346 
347         regmap = syscon_regmap_lookup_by_compatible("atmel,sama5d2-secumod");
348         if (IS_ERR(regmap))
349                 return -ENODEV;
350 
351         return regmap_read_poll_timeout(regmap, AT91_SECUMOD_RAMRDY, val,
352                                         val & AT91_SECUMOD_RAMRDY_READY,
353                                         10000, 500000);
354 }
355 
356 static const struct of_device_id sram_dt_ids[] = {
357         { .compatible = "mmio-sram" },
358         { .compatible = "atmel,sama5d2-securam", .data = atmel_securam_wait },
359         {}
360 };
361 
362 static int sram_probe(struct platform_device *pdev)
363 {
364         struct sram_dev *sram;
365         struct resource *res;
366         size_t size;
367         int ret;
368         int (*init_func)(void);
369 
370         sram = devm_kzalloc(&pdev->dev, sizeof(*sram), GFP_KERNEL);
371         if (!sram)
372                 return -ENOMEM;
373 
374         sram->dev = &pdev->dev;
375 
376         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
377         if (!res) {
378                 dev_err(sram->dev, "found no memory resource\n");
379                 return -EINVAL;
380         }
381 
382         size = resource_size(res);
383 
384         if (!devm_request_mem_region(sram->dev, res->start, size, pdev->name)) {
385                 dev_err(sram->dev, "could not request region for resource\n");
386                 return -EBUSY;
387         }
388 
389         if (of_property_read_bool(pdev->dev.of_node, "no-memory-wc"))
390                 sram->virt_base = devm_ioremap(sram->dev, res->start, size);
391         else
392                 sram->virt_base = devm_ioremap_wc(sram->dev, res->start, size);
393         if (!sram->virt_base)
394                 return -ENOMEM;
395 
396         sram->pool = devm_gen_pool_create(sram->dev, ilog2(SRAM_GRANULARITY),
397                                           NUMA_NO_NODE, NULL);
398         if (IS_ERR(sram->pool))
399                 return PTR_ERR(sram->pool);
400 
401         ret = sram_reserve_regions(sram, res);
402         if (ret)
403                 return ret;
404 
405         sram->clk = devm_clk_get(sram->dev, NULL);
406         if (IS_ERR(sram->clk))
407                 sram->clk = NULL;
408         else
409                 clk_prepare_enable(sram->clk);
410 
411         platform_set_drvdata(pdev, sram);
412 
413         init_func = of_device_get_match_data(&pdev->dev);
414         if (init_func) {
415                 ret = init_func();
416                 if (ret)
417                         return ret;
418         }
419 
420         dev_dbg(sram->dev, "SRAM pool: %zu KiB @ 0x%p\n",
421                 gen_pool_size(sram->pool) / 1024, sram->virt_base);
422 
423         return 0;
424 }
425 
426 static int sram_remove(struct platform_device *pdev)
427 {
428         struct sram_dev *sram = platform_get_drvdata(pdev);
429 
430         sram_free_partitions(sram);
431 
432         if (gen_pool_avail(sram->pool) < gen_pool_size(sram->pool))
433                 dev_err(sram->dev, "removed while SRAM allocated\n");
434 
435         if (sram->clk)
436                 clk_disable_unprepare(sram->clk);
437 
438         return 0;
439 }
440 
441 static struct platform_driver sram_driver = {
442         .driver = {
443                 .name = "sram",
444                 .of_match_table = sram_dt_ids,
445         },
446         .probe = sram_probe,
447         .remove = sram_remove,
448 };
449 
450 static int __init sram_init(void)
451 {
452         return platform_driver_register(&sram_driver);
453 }
454 
455 postcore_initcall(sram_init);
456 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us