Version:  2.0.40 2.2.26 2.4.37 3.11 3.12 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4 4.5 4.6 4.7 4.8

Linux/drivers/iommu/arm-smmu.c

  1 /*
  2  * IOMMU API for ARM architected SMMU implementations.
  3  *
  4  * This program is free software; you can redistribute it and/or modify
  5  * it under the terms of the GNU General Public License version 2 as
  6  * published by the Free Software Foundation.
  7  *
  8  * This program is distributed in the hope that it will be useful,
  9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
 10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 11  * GNU General Public License for more details.
 12  *
 13  * You should have received a copy of the GNU General Public License
 14  * along with this program; if not, write to the Free Software
 15  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 16  *
 17  * Copyright (C) 2013 ARM Limited
 18  *
 19  * Author: Will Deacon <will.deacon@arm.com>
 20  *
 21  * This driver currently supports:
 22  *      - SMMUv1 and v2 implementations
 23  *      - Stream-matching and stream-indexing
 24  *      - v7/v8 long-descriptor format
 25  *      - Non-secure access to the SMMU
 26  *      - Context fault reporting
 27  */
 28 
 29 #define pr_fmt(fmt) "arm-smmu: " fmt
 30 
 31 #include <linux/delay.h>
 32 #include <linux/dma-iommu.h>
 33 #include <linux/dma-mapping.h>
 34 #include <linux/err.h>
 35 #include <linux/interrupt.h>
 36 #include <linux/io.h>
 37 #include <linux/io-64-nonatomic-hi-lo.h>
 38 #include <linux/iommu.h>
 39 #include <linux/iopoll.h>
 40 #include <linux/module.h>
 41 #include <linux/of.h>
 42 #include <linux/of_address.h>
 43 #include <linux/pci.h>
 44 #include <linux/platform_device.h>
 45 #include <linux/slab.h>
 46 #include <linux/spinlock.h>
 47 
 48 #include <linux/amba/bus.h>
 49 
 50 #include "io-pgtable.h"
 51 
 52 /* Maximum number of stream IDs assigned to a single device */
 53 #define MAX_MASTER_STREAMIDS            128
 54 
 55 /* Maximum number of context banks per SMMU */
 56 #define ARM_SMMU_MAX_CBS                128
 57 
 58 /* Maximum number of mapping groups per SMMU */
 59 #define ARM_SMMU_MAX_SMRS               128
 60 
 61 /* SMMU global address space */
 62 #define ARM_SMMU_GR0(smmu)              ((smmu)->base)
 63 #define ARM_SMMU_GR1(smmu)              ((smmu)->base + (1 << (smmu)->pgshift))
 64 
 65 /*
 66  * SMMU global address space with conditional offset to access secure
 67  * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
 68  * nsGFSYNR0: 0x450)
 69  */
 70 #define ARM_SMMU_GR0_NS(smmu)                                           \
 71         ((smmu)->base +                                                 \
 72                 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS)       \
 73                         ? 0x400 : 0))
 74 
 75 /*
 76  * Some 64-bit registers only make sense to write atomically, but in such
 77  * cases all the data relevant to AArch32 formats lies within the lower word,
 78  * therefore this actually makes more sense than it might first appear.
 79  */
 80 #ifdef CONFIG_64BIT
 81 #define smmu_write_atomic_lq            writeq_relaxed
 82 #else
 83 #define smmu_write_atomic_lq            writel_relaxed
 84 #endif
 85 
 86 /* Configuration registers */
 87 #define ARM_SMMU_GR0_sCR0               0x0
 88 #define sCR0_CLIENTPD                   (1 << 0)
 89 #define sCR0_GFRE                       (1 << 1)
 90 #define sCR0_GFIE                       (1 << 2)
 91 #define sCR0_GCFGFRE                    (1 << 4)
 92 #define sCR0_GCFGFIE                    (1 << 5)
 93 #define sCR0_USFCFG                     (1 << 10)
 94 #define sCR0_VMIDPNE                    (1 << 11)
 95 #define sCR0_PTM                        (1 << 12)
 96 #define sCR0_FB                         (1 << 13)
 97 #define sCR0_VMID16EN                   (1 << 31)
 98 #define sCR0_BSU_SHIFT                  14
 99 #define sCR0_BSU_MASK                   0x3
100 
101 /* Auxiliary Configuration register */
102 #define ARM_SMMU_GR0_sACR               0x10
103 
104 /* Identification registers */
105 #define ARM_SMMU_GR0_ID0                0x20
106 #define ARM_SMMU_GR0_ID1                0x24
107 #define ARM_SMMU_GR0_ID2                0x28
108 #define ARM_SMMU_GR0_ID3                0x2c
109 #define ARM_SMMU_GR0_ID4                0x30
110 #define ARM_SMMU_GR0_ID5                0x34
111 #define ARM_SMMU_GR0_ID6                0x38
112 #define ARM_SMMU_GR0_ID7                0x3c
113 #define ARM_SMMU_GR0_sGFSR              0x48
114 #define ARM_SMMU_GR0_sGFSYNR0           0x50
115 #define ARM_SMMU_GR0_sGFSYNR1           0x54
116 #define ARM_SMMU_GR0_sGFSYNR2           0x58
117 
118 #define ID0_S1TS                        (1 << 30)
119 #define ID0_S2TS                        (1 << 29)
120 #define ID0_NTS                         (1 << 28)
121 #define ID0_SMS                         (1 << 27)
122 #define ID0_ATOSNS                      (1 << 26)
123 #define ID0_PTFS_NO_AARCH32             (1 << 25)
124 #define ID0_PTFS_NO_AARCH32S            (1 << 24)
125 #define ID0_CTTW                        (1 << 14)
126 #define ID0_NUMIRPT_SHIFT               16
127 #define ID0_NUMIRPT_MASK                0xff
128 #define ID0_NUMSIDB_SHIFT               9
129 #define ID0_NUMSIDB_MASK                0xf
130 #define ID0_NUMSMRG_SHIFT               0
131 #define ID0_NUMSMRG_MASK                0xff
132 
133 #define ID1_PAGESIZE                    (1 << 31)
134 #define ID1_NUMPAGENDXB_SHIFT           28
135 #define ID1_NUMPAGENDXB_MASK            7
136 #define ID1_NUMS2CB_SHIFT               16
137 #define ID1_NUMS2CB_MASK                0xff
138 #define ID1_NUMCB_SHIFT                 0
139 #define ID1_NUMCB_MASK                  0xff
140 
141 #define ID2_OAS_SHIFT                   4
142 #define ID2_OAS_MASK                    0xf
143 #define ID2_IAS_SHIFT                   0
144 #define ID2_IAS_MASK                    0xf
145 #define ID2_UBS_SHIFT                   8
146 #define ID2_UBS_MASK                    0xf
147 #define ID2_PTFS_4K                     (1 << 12)
148 #define ID2_PTFS_16K                    (1 << 13)
149 #define ID2_PTFS_64K                    (1 << 14)
150 #define ID2_VMID16                      (1 << 15)
151 
152 #define ID7_MAJOR_SHIFT                 4
153 #define ID7_MAJOR_MASK                  0xf
154 
155 /* Global TLB invalidation */
156 #define ARM_SMMU_GR0_TLBIVMID           0x64
157 #define ARM_SMMU_GR0_TLBIALLNSNH        0x68
158 #define ARM_SMMU_GR0_TLBIALLH           0x6c
159 #define ARM_SMMU_GR0_sTLBGSYNC          0x70
160 #define ARM_SMMU_GR0_sTLBGSTATUS        0x74
161 #define sTLBGSTATUS_GSACTIVE            (1 << 0)
162 #define TLB_LOOP_TIMEOUT                1000000 /* 1s! */
163 
164 /* Stream mapping registers */
165 #define ARM_SMMU_GR0_SMR(n)             (0x800 + ((n) << 2))
166 #define SMR_VALID                       (1 << 31)
167 #define SMR_MASK_SHIFT                  16
168 #define SMR_MASK_MASK                   0x7fff
169 #define SMR_ID_SHIFT                    0
170 #define SMR_ID_MASK                     0x7fff
171 
172 #define ARM_SMMU_GR0_S2CR(n)            (0xc00 + ((n) << 2))
173 #define S2CR_CBNDX_SHIFT                0
174 #define S2CR_CBNDX_MASK                 0xff
175 #define S2CR_TYPE_SHIFT                 16
176 #define S2CR_TYPE_MASK                  0x3
177 #define S2CR_TYPE_TRANS                 (0 << S2CR_TYPE_SHIFT)
178 #define S2CR_TYPE_BYPASS                (1 << S2CR_TYPE_SHIFT)
179 #define S2CR_TYPE_FAULT                 (2 << S2CR_TYPE_SHIFT)
180 
181 #define S2CR_PRIVCFG_SHIFT              24
182 #define S2CR_PRIVCFG_UNPRIV             (2 << S2CR_PRIVCFG_SHIFT)
183 
184 /* Context bank attribute registers */
185 #define ARM_SMMU_GR1_CBAR(n)            (0x0 + ((n) << 2))
186 #define CBAR_VMID_SHIFT                 0
187 #define CBAR_VMID_MASK                  0xff
188 #define CBAR_S1_BPSHCFG_SHIFT           8
189 #define CBAR_S1_BPSHCFG_MASK            3
190 #define CBAR_S1_BPSHCFG_NSH             3
191 #define CBAR_S1_MEMATTR_SHIFT           12
192 #define CBAR_S1_MEMATTR_MASK            0xf
193 #define CBAR_S1_MEMATTR_WB              0xf
194 #define CBAR_TYPE_SHIFT                 16
195 #define CBAR_TYPE_MASK                  0x3
196 #define CBAR_TYPE_S2_TRANS              (0 << CBAR_TYPE_SHIFT)
197 #define CBAR_TYPE_S1_TRANS_S2_BYPASS    (1 << CBAR_TYPE_SHIFT)
198 #define CBAR_TYPE_S1_TRANS_S2_FAULT     (2 << CBAR_TYPE_SHIFT)
199 #define CBAR_TYPE_S1_TRANS_S2_TRANS     (3 << CBAR_TYPE_SHIFT)
200 #define CBAR_IRPTNDX_SHIFT              24
201 #define CBAR_IRPTNDX_MASK               0xff
202 
203 #define ARM_SMMU_GR1_CBA2R(n)           (0x800 + ((n) << 2))
204 #define CBA2R_RW64_32BIT                (0 << 0)
205 #define CBA2R_RW64_64BIT                (1 << 0)
206 #define CBA2R_VMID_SHIFT                16
207 #define CBA2R_VMID_MASK                 0xffff
208 
209 /* Translation context bank */
210 #define ARM_SMMU_CB_BASE(smmu)          ((smmu)->base + ((smmu)->size >> 1))
211 #define ARM_SMMU_CB(smmu, n)            ((n) * (1 << (smmu)->pgshift))
212 
213 #define ARM_SMMU_CB_SCTLR               0x0
214 #define ARM_SMMU_CB_ACTLR               0x4
215 #define ARM_SMMU_CB_RESUME              0x8
216 #define ARM_SMMU_CB_TTBCR2              0x10
217 #define ARM_SMMU_CB_TTBR0               0x20
218 #define ARM_SMMU_CB_TTBR1               0x28
219 #define ARM_SMMU_CB_TTBCR               0x30
220 #define ARM_SMMU_CB_S1_MAIR0            0x38
221 #define ARM_SMMU_CB_S1_MAIR1            0x3c
222 #define ARM_SMMU_CB_PAR                 0x50
223 #define ARM_SMMU_CB_FSR                 0x58
224 #define ARM_SMMU_CB_FAR                 0x60
225 #define ARM_SMMU_CB_FSYNR0              0x68
226 #define ARM_SMMU_CB_S1_TLBIVA           0x600
227 #define ARM_SMMU_CB_S1_TLBIASID         0x610
228 #define ARM_SMMU_CB_S1_TLBIVAL          0x620
229 #define ARM_SMMU_CB_S2_TLBIIPAS2        0x630
230 #define ARM_SMMU_CB_S2_TLBIIPAS2L       0x638
231 #define ARM_SMMU_CB_ATS1PR              0x800
232 #define ARM_SMMU_CB_ATSR                0x8f0
233 
234 #define SCTLR_S1_ASIDPNE                (1 << 12)
235 #define SCTLR_CFCFG                     (1 << 7)
236 #define SCTLR_CFIE                      (1 << 6)
237 #define SCTLR_CFRE                      (1 << 5)
238 #define SCTLR_E                         (1 << 4)
239 #define SCTLR_AFE                       (1 << 2)
240 #define SCTLR_TRE                       (1 << 1)
241 #define SCTLR_M                         (1 << 0)
242 #define SCTLR_EAE_SBOP                  (SCTLR_AFE | SCTLR_TRE)
243 
244 #define ARM_MMU500_ACTLR_CPRE           (1 << 1)
245 
246 #define ARM_MMU500_ACR_CACHE_LOCK       (1 << 26)
247 
248 #define CB_PAR_F                        (1 << 0)
249 
250 #define ATSR_ACTIVE                     (1 << 0)
251 
252 #define RESUME_RETRY                    (0 << 0)
253 #define RESUME_TERMINATE                (1 << 0)
254 
255 #define TTBCR2_SEP_SHIFT                15
256 #define TTBCR2_SEP_UPSTREAM             (0x7 << TTBCR2_SEP_SHIFT)
257 
258 #define TTBRn_ASID_SHIFT                48
259 
260 #define FSR_MULTI                       (1 << 31)
261 #define FSR_SS                          (1 << 30)
262 #define FSR_UUT                         (1 << 8)
263 #define FSR_ASF                         (1 << 7)
264 #define FSR_TLBLKF                      (1 << 6)
265 #define FSR_TLBMCF                      (1 << 5)
266 #define FSR_EF                          (1 << 4)
267 #define FSR_PF                          (1 << 3)
268 #define FSR_AFF                         (1 << 2)
269 #define FSR_TF                          (1 << 1)
270 
271 #define FSR_IGN                         (FSR_AFF | FSR_ASF | \
272                                          FSR_TLBMCF | FSR_TLBLKF)
273 #define FSR_FAULT                       (FSR_MULTI | FSR_SS | FSR_UUT | \
274                                          FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
275 
276 #define FSYNR0_WNR                      (1 << 4)
277 
278 static int force_stage;
279 module_param(force_stage, int, S_IRUGO);
280 MODULE_PARM_DESC(force_stage,
281         "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
282 static bool disable_bypass;
283 module_param(disable_bypass, bool, S_IRUGO);
284 MODULE_PARM_DESC(disable_bypass,
285         "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
286 
287 enum arm_smmu_arch_version {
288         ARM_SMMU_V1,
289         ARM_SMMU_V1_64K,
290         ARM_SMMU_V2,
291 };
292 
293 enum arm_smmu_implementation {
294         GENERIC_SMMU,
295         ARM_MMU500,
296         CAVIUM_SMMUV2,
297 };
298 
299 struct arm_smmu_smr {
300         u8                              idx;
301         u16                             mask;
302         u16                             id;
303 };
304 
305 struct arm_smmu_master_cfg {
306         int                             num_streamids;
307         u16                             streamids[MAX_MASTER_STREAMIDS];
308         struct arm_smmu_smr             *smrs;
309 };
310 
311 struct arm_smmu_master {
312         struct device_node              *of_node;
313         struct rb_node                  node;
314         struct arm_smmu_master_cfg      cfg;
315 };
316 
317 struct arm_smmu_device {
318         struct device                   *dev;
319 
320         void __iomem                    *base;
321         unsigned long                   size;
322         unsigned long                   pgshift;
323 
324 #define ARM_SMMU_FEAT_COHERENT_WALK     (1 << 0)
325 #define ARM_SMMU_FEAT_STREAM_MATCH      (1 << 1)
326 #define ARM_SMMU_FEAT_TRANS_S1          (1 << 2)
327 #define ARM_SMMU_FEAT_TRANS_S2          (1 << 3)
328 #define ARM_SMMU_FEAT_TRANS_NESTED      (1 << 4)
329 #define ARM_SMMU_FEAT_TRANS_OPS         (1 << 5)
330 #define ARM_SMMU_FEAT_VMID16            (1 << 6)
331 #define ARM_SMMU_FEAT_FMT_AARCH64_4K    (1 << 7)
332 #define ARM_SMMU_FEAT_FMT_AARCH64_16K   (1 << 8)
333 #define ARM_SMMU_FEAT_FMT_AARCH64_64K   (1 << 9)
334 #define ARM_SMMU_FEAT_FMT_AARCH32_L     (1 << 10)
335 #define ARM_SMMU_FEAT_FMT_AARCH32_S     (1 << 11)
336         u32                             features;
337 
338 #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
339         u32                             options;
340         enum arm_smmu_arch_version      version;
341         enum arm_smmu_implementation    model;
342 
343         u32                             num_context_banks;
344         u32                             num_s2_context_banks;
345         DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
346         atomic_t                        irptndx;
347 
348         u32                             num_mapping_groups;
349         DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
350 
351         unsigned long                   va_size;
352         unsigned long                   ipa_size;
353         unsigned long                   pa_size;
354         unsigned long                   pgsize_bitmap;
355 
356         u32                             num_global_irqs;
357         u32                             num_context_irqs;
358         unsigned int                    *irqs;
359 
360         struct list_head                list;
361         struct rb_root                  masters;
362 
363         u32                             cavium_id_base; /* Specific to Cavium */
364 };
365 
366 enum arm_smmu_context_fmt {
367         ARM_SMMU_CTX_FMT_NONE,
368         ARM_SMMU_CTX_FMT_AARCH64,
369         ARM_SMMU_CTX_FMT_AARCH32_L,
370         ARM_SMMU_CTX_FMT_AARCH32_S,
371 };
372 
373 struct arm_smmu_cfg {
374         u8                              cbndx;
375         u8                              irptndx;
376         u32                             cbar;
377         enum arm_smmu_context_fmt       fmt;
378 };
379 #define INVALID_IRPTNDX                 0xff
380 
381 #define ARM_SMMU_CB_ASID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx)
382 #define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
383 
384 enum arm_smmu_domain_stage {
385         ARM_SMMU_DOMAIN_S1 = 0,
386         ARM_SMMU_DOMAIN_S2,
387         ARM_SMMU_DOMAIN_NESTED,
388 };
389 
390 struct arm_smmu_domain {
391         struct arm_smmu_device          *smmu;
392         struct io_pgtable_ops           *pgtbl_ops;
393         spinlock_t                      pgtbl_lock;
394         struct arm_smmu_cfg             cfg;
395         enum arm_smmu_domain_stage      stage;
396         struct mutex                    init_mutex; /* Protects smmu pointer */
397         struct iommu_domain             domain;
398 };
399 
400 struct arm_smmu_phandle_args {
401         struct device_node *np;
402         int args_count;
403         uint32_t args[MAX_MASTER_STREAMIDS];
404 };
405 
406 static DEFINE_SPINLOCK(arm_smmu_devices_lock);
407 static LIST_HEAD(arm_smmu_devices);
408 
409 struct arm_smmu_option_prop {
410         u32 opt;
411         const char *prop;
412 };
413 
414 static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
415 
416 static struct arm_smmu_option_prop arm_smmu_options[] = {
417         { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
418         { 0, NULL},
419 };
420 
421 static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
422 {
423         return container_of(dom, struct arm_smmu_domain, domain);
424 }
425 
426 static void parse_driver_options(struct arm_smmu_device *smmu)
427 {
428         int i = 0;
429 
430         do {
431                 if (of_property_read_bool(smmu->dev->of_node,
432                                                 arm_smmu_options[i].prop)) {
433                         smmu->options |= arm_smmu_options[i].opt;
434                         dev_notice(smmu->dev, "option %s\n",
435                                 arm_smmu_options[i].prop);
436                 }
437         } while (arm_smmu_options[++i].opt);
438 }
439 
440 static struct device_node *dev_get_dev_node(struct device *dev)
441 {
442         if (dev_is_pci(dev)) {
443                 struct pci_bus *bus = to_pci_dev(dev)->bus;
444 
445                 while (!pci_is_root_bus(bus))
446                         bus = bus->parent;
447                 return bus->bridge->parent->of_node;
448         }
449 
450         return dev->of_node;
451 }
452 
453 static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
454                                                 struct device_node *dev_node)
455 {
456         struct rb_node *node = smmu->masters.rb_node;
457 
458         while (node) {
459                 struct arm_smmu_master *master;
460 
461                 master = container_of(node, struct arm_smmu_master, node);
462 
463                 if (dev_node < master->of_node)
464                         node = node->rb_left;
465                 else if (dev_node > master->of_node)
466                         node = node->rb_right;
467                 else
468                         return master;
469         }
470 
471         return NULL;
472 }
473 
474 static struct arm_smmu_master_cfg *
475 find_smmu_master_cfg(struct device *dev)
476 {
477         struct arm_smmu_master_cfg *cfg = NULL;
478         struct iommu_group *group = iommu_group_get(dev);
479 
480         if (group) {
481                 cfg = iommu_group_get_iommudata(group);
482                 iommu_group_put(group);
483         }
484 
485         return cfg;
486 }
487 
488 static int insert_smmu_master(struct arm_smmu_device *smmu,
489                               struct arm_smmu_master *master)
490 {
491         struct rb_node **new, *parent;
492 
493         new = &smmu->masters.rb_node;
494         parent = NULL;
495         while (*new) {
496                 struct arm_smmu_master *this
497                         = container_of(*new, struct arm_smmu_master, node);
498 
499                 parent = *new;
500                 if (master->of_node < this->of_node)
501                         new = &((*new)->rb_left);
502                 else if (master->of_node > this->of_node)
503                         new = &((*new)->rb_right);
504                 else
505                         return -EEXIST;
506         }
507 
508         rb_link_node(&master->node, parent, new);
509         rb_insert_color(&master->node, &smmu->masters);
510         return 0;
511 }
512 
513 static int register_smmu_master(struct arm_smmu_device *smmu,
514                                 struct device *dev,
515                                 struct arm_smmu_phandle_args *masterspec)
516 {
517         int i;
518         struct arm_smmu_master *master;
519 
520         master = find_smmu_master(smmu, masterspec->np);
521         if (master) {
522                 dev_err(dev,
523                         "rejecting multiple registrations for master device %s\n",
524                         masterspec->np->name);
525                 return -EBUSY;
526         }
527 
528         if (masterspec->args_count > MAX_MASTER_STREAMIDS) {
529                 dev_err(dev,
530                         "reached maximum number (%d) of stream IDs for master device %s\n",
531                         MAX_MASTER_STREAMIDS, masterspec->np->name);
532                 return -ENOSPC;
533         }
534 
535         master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
536         if (!master)
537                 return -ENOMEM;
538 
539         master->of_node                 = masterspec->np;
540         master->cfg.num_streamids       = masterspec->args_count;
541 
542         for (i = 0; i < master->cfg.num_streamids; ++i) {
543                 u16 streamid = masterspec->args[i];
544 
545                 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
546                      (streamid >= smmu->num_mapping_groups)) {
547                         dev_err(dev,
548                                 "stream ID for master device %s greater than maximum allowed (%d)\n",
549                                 masterspec->np->name, smmu->num_mapping_groups);
550                         return -ERANGE;
551                 }
552                 master->cfg.streamids[i] = streamid;
553         }
554         return insert_smmu_master(smmu, master);
555 }
556 
557 static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
558 {
559         struct arm_smmu_device *smmu;
560         struct arm_smmu_master *master = NULL;
561         struct device_node *dev_node = dev_get_dev_node(dev);
562 
563         spin_lock(&arm_smmu_devices_lock);
564         list_for_each_entry(smmu, &arm_smmu_devices, list) {
565                 master = find_smmu_master(smmu, dev_node);
566                 if (master)
567                         break;
568         }
569         spin_unlock(&arm_smmu_devices_lock);
570 
571         return master ? smmu : NULL;
572 }
573 
574 static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
575 {
576         int idx;
577 
578         do {
579                 idx = find_next_zero_bit(map, end, start);
580                 if (idx == end)
581                         return -ENOSPC;
582         } while (test_and_set_bit(idx, map));
583 
584         return idx;
585 }
586 
587 static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
588 {
589         clear_bit(idx, map);
590 }
591 
592 /* Wait for any pending TLB invalidations to complete */
593 static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
594 {
595         int count = 0;
596         void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
597 
598         writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
599         while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
600                & sTLBGSTATUS_GSACTIVE) {
601                 cpu_relax();
602                 if (++count == TLB_LOOP_TIMEOUT) {
603                         dev_err_ratelimited(smmu->dev,
604                         "TLB sync timed out -- SMMU may be deadlocked\n");
605                         return;
606                 }
607                 udelay(1);
608         }
609 }
610 
611 static void arm_smmu_tlb_sync(void *cookie)
612 {
613         struct arm_smmu_domain *smmu_domain = cookie;
614         __arm_smmu_tlb_sync(smmu_domain->smmu);
615 }
616 
617 static void arm_smmu_tlb_inv_context(void *cookie)
618 {
619         struct arm_smmu_domain *smmu_domain = cookie;
620         struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
621         struct arm_smmu_device *smmu = smmu_domain->smmu;
622         bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
623         void __iomem *base;
624 
625         if (stage1) {
626                 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
627                 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
628                                base + ARM_SMMU_CB_S1_TLBIASID);
629         } else {
630                 base = ARM_SMMU_GR0(smmu);
631                 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
632                                base + ARM_SMMU_GR0_TLBIVMID);
633         }
634 
635         __arm_smmu_tlb_sync(smmu);
636 }
637 
638 static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
639                                           size_t granule, bool leaf, void *cookie)
640 {
641         struct arm_smmu_domain *smmu_domain = cookie;
642         struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
643         struct arm_smmu_device *smmu = smmu_domain->smmu;
644         bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
645         void __iomem *reg;
646 
647         if (stage1) {
648                 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
649                 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
650 
651                 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
652                         iova &= ~12UL;
653                         iova |= ARM_SMMU_CB_ASID(smmu, cfg);
654                         do {
655                                 writel_relaxed(iova, reg);
656                                 iova += granule;
657                         } while (size -= granule);
658                 } else {
659                         iova >>= 12;
660                         iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
661                         do {
662                                 writeq_relaxed(iova, reg);
663                                 iova += granule >> 12;
664                         } while (size -= granule);
665                 }
666         } else if (smmu->version == ARM_SMMU_V2) {
667                 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
668                 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
669                               ARM_SMMU_CB_S2_TLBIIPAS2;
670                 iova >>= 12;
671                 do {
672                         smmu_write_atomic_lq(iova, reg);
673                         iova += granule >> 12;
674                 } while (size -= granule);
675         } else {
676                 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
677                 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
678         }
679 }
680 
681 static struct iommu_gather_ops arm_smmu_gather_ops = {
682         .tlb_flush_all  = arm_smmu_tlb_inv_context,
683         .tlb_add_flush  = arm_smmu_tlb_inv_range_nosync,
684         .tlb_sync       = arm_smmu_tlb_sync,
685 };
686 
687 static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
688 {
689         u32 fsr, fsynr;
690         unsigned long iova;
691         struct iommu_domain *domain = dev;
692         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
693         struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
694         struct arm_smmu_device *smmu = smmu_domain->smmu;
695         void __iomem *cb_base;
696 
697         cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
698         fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
699 
700         if (!(fsr & FSR_FAULT))
701                 return IRQ_NONE;
702 
703         fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
704         iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
705 
706         dev_err_ratelimited(smmu->dev,
707         "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cb=%d\n",
708                             fsr, iova, fsynr, cfg->cbndx);
709 
710         writel(fsr, cb_base + ARM_SMMU_CB_FSR);
711         return IRQ_HANDLED;
712 }
713 
714 static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
715 {
716         u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
717         struct arm_smmu_device *smmu = dev;
718         void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
719 
720         gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
721         gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
722         gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
723         gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
724 
725         if (!gfsr)
726                 return IRQ_NONE;
727 
728         dev_err_ratelimited(smmu->dev,
729                 "Unexpected global fault, this could be serious\n");
730         dev_err_ratelimited(smmu->dev,
731                 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
732                 gfsr, gfsynr0, gfsynr1, gfsynr2);
733 
734         writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
735         return IRQ_HANDLED;
736 }
737 
738 static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
739                                        struct io_pgtable_cfg *pgtbl_cfg)
740 {
741         u32 reg;
742         u64 reg64;
743         bool stage1;
744         struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
745         struct arm_smmu_device *smmu = smmu_domain->smmu;
746         void __iomem *cb_base, *gr1_base;
747 
748         gr1_base = ARM_SMMU_GR1(smmu);
749         stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
750         cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
751 
752         if (smmu->version > ARM_SMMU_V1) {
753                 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
754                         reg = CBA2R_RW64_64BIT;
755                 else
756                         reg = CBA2R_RW64_32BIT;
757                 /* 16-bit VMIDs live in CBA2R */
758                 if (smmu->features & ARM_SMMU_FEAT_VMID16)
759                         reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
760 
761                 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
762         }
763 
764         /* CBAR */
765         reg = cfg->cbar;
766         if (smmu->version < ARM_SMMU_V2)
767                 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
768 
769         /*
770          * Use the weakest shareability/memory types, so they are
771          * overridden by the ttbcr/pte.
772          */
773         if (stage1) {
774                 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
775                         (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
776         } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
777                 /* 8-bit VMIDs live in CBAR */
778                 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
779         }
780         writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
781 
782         /* TTBRs */
783         if (stage1) {
784                 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
785 
786                 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
787                 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
788 
789                 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
790                 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
791                 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
792         } else {
793                 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
794                 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
795         }
796 
797         /* TTBCR */
798         if (stage1) {
799                 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
800                 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
801                 if (smmu->version > ARM_SMMU_V1) {
802                         reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
803                         reg |= TTBCR2_SEP_UPSTREAM;
804                         writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
805                 }
806         } else {
807                 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
808                 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
809         }
810 
811         /* MAIRs (stage-1 only) */
812         if (stage1) {
813                 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
814                 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
815                 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
816                 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1);
817         }
818 
819         /* SCTLR */
820         reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
821         if (stage1)
822                 reg |= SCTLR_S1_ASIDPNE;
823 #ifdef __BIG_ENDIAN
824         reg |= SCTLR_E;
825 #endif
826         writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
827 }
828 
829 static int arm_smmu_init_domain_context(struct iommu_domain *domain,
830                                         struct arm_smmu_device *smmu)
831 {
832         int irq, start, ret = 0;
833         unsigned long ias, oas;
834         struct io_pgtable_ops *pgtbl_ops;
835         struct io_pgtable_cfg pgtbl_cfg;
836         enum io_pgtable_fmt fmt;
837         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
838         struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
839 
840         mutex_lock(&smmu_domain->init_mutex);
841         if (smmu_domain->smmu)
842                 goto out_unlock;
843 
844         /* We're bypassing these SIDs, so don't allocate an actual context */
845         if (domain->type == IOMMU_DOMAIN_DMA) {
846                 smmu_domain->smmu = smmu;
847                 goto out_unlock;
848         }
849 
850         /*
851          * Mapping the requested stage onto what we support is surprisingly
852          * complicated, mainly because the spec allows S1+S2 SMMUs without
853          * support for nested translation. That means we end up with the
854          * following table:
855          *
856          * Requested        Supported        Actual
857          *     S1               N              S1
858          *     S1             S1+S2            S1
859          *     S1               S2             S2
860          *     S1               S1             S1
861          *     N                N              N
862          *     N              S1+S2            S2
863          *     N                S2             S2
864          *     N                S1             S1
865          *
866          * Note that you can't actually request stage-2 mappings.
867          */
868         if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
869                 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
870         if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
871                 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
872 
873         /*
874          * Choosing a suitable context format is even more fiddly. Until we
875          * grow some way for the caller to express a preference, and/or move
876          * the decision into the io-pgtable code where it arguably belongs,
877          * just aim for the closest thing to the rest of the system, and hope
878          * that the hardware isn't esoteric enough that we can't assume AArch64
879          * support to be a superset of AArch32 support...
880          */
881         if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
882                 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
883         if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
884             (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
885                                ARM_SMMU_FEAT_FMT_AARCH64_16K |
886                                ARM_SMMU_FEAT_FMT_AARCH64_4K)))
887                 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
888 
889         if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
890                 ret = -EINVAL;
891                 goto out_unlock;
892         }
893 
894         switch (smmu_domain->stage) {
895         case ARM_SMMU_DOMAIN_S1:
896                 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
897                 start = smmu->num_s2_context_banks;
898                 ias = smmu->va_size;
899                 oas = smmu->ipa_size;
900                 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
901                         fmt = ARM_64_LPAE_S1;
902                 } else {
903                         fmt = ARM_32_LPAE_S1;
904                         ias = min(ias, 32UL);
905                         oas = min(oas, 40UL);
906                 }
907                 break;
908         case ARM_SMMU_DOMAIN_NESTED:
909                 /*
910                  * We will likely want to change this if/when KVM gets
911                  * involved.
912                  */
913         case ARM_SMMU_DOMAIN_S2:
914                 cfg->cbar = CBAR_TYPE_S2_TRANS;
915                 start = 0;
916                 ias = smmu->ipa_size;
917                 oas = smmu->pa_size;
918                 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
919                         fmt = ARM_64_LPAE_S2;
920                 } else {
921                         fmt = ARM_32_LPAE_S2;
922                         ias = min(ias, 40UL);
923                         oas = min(oas, 40UL);
924                 }
925                 break;
926         default:
927                 ret = -EINVAL;
928                 goto out_unlock;
929         }
930 
931         ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
932                                       smmu->num_context_banks);
933         if (ret < 0)
934                 goto out_unlock;
935 
936         cfg->cbndx = ret;
937         if (smmu->version < ARM_SMMU_V2) {
938                 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
939                 cfg->irptndx %= smmu->num_context_irqs;
940         } else {
941                 cfg->irptndx = cfg->cbndx;
942         }
943 
944         pgtbl_cfg = (struct io_pgtable_cfg) {
945                 .pgsize_bitmap  = smmu->pgsize_bitmap,
946                 .ias            = ias,
947                 .oas            = oas,
948                 .tlb            = &arm_smmu_gather_ops,
949                 .iommu_dev      = smmu->dev,
950         };
951 
952         smmu_domain->smmu = smmu;
953         pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
954         if (!pgtbl_ops) {
955                 ret = -ENOMEM;
956                 goto out_clear_smmu;
957         }
958 
959         /* Update the domain's page sizes to reflect the page table format */
960         domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
961 
962         /* Initialise the context bank with our page table cfg */
963         arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
964 
965         /*
966          * Request context fault interrupt. Do this last to avoid the
967          * handler seeing a half-initialised domain state.
968          */
969         irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
970         ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
971                                IRQF_SHARED, "arm-smmu-context-fault", domain);
972         if (ret < 0) {
973                 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
974                         cfg->irptndx, irq);
975                 cfg->irptndx = INVALID_IRPTNDX;
976         }
977 
978         mutex_unlock(&smmu_domain->init_mutex);
979 
980         /* Publish page table ops for map/unmap */
981         smmu_domain->pgtbl_ops = pgtbl_ops;
982         return 0;
983 
984 out_clear_smmu:
985         smmu_domain->smmu = NULL;
986 out_unlock:
987         mutex_unlock(&smmu_domain->init_mutex);
988         return ret;
989 }
990 
991 static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
992 {
993         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
994         struct arm_smmu_device *smmu = smmu_domain->smmu;
995         struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
996         void __iomem *cb_base;
997         int irq;
998 
999         if (!smmu || domain->type == IOMMU_DOMAIN_DMA)
1000                 return;
1001 
1002         /*
1003          * Disable the context bank and free the page tables before freeing
1004          * it.
1005          */
1006         cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1007         writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1008 
1009         if (cfg->irptndx != INVALID_IRPTNDX) {
1010                 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
1011                 devm_free_irq(smmu->dev, irq, domain);
1012         }
1013 
1014         free_io_pgtable_ops(smmu_domain->pgtbl_ops);
1015         __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
1016 }
1017 
1018 static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
1019 {
1020         struct arm_smmu_domain *smmu_domain;
1021 
1022         if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
1023                 return NULL;
1024         /*
1025          * Allocate the domain and initialise some of its data structures.
1026          * We can't really do anything meaningful until we've added a
1027          * master.
1028          */
1029         smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1030         if (!smmu_domain)
1031                 return NULL;
1032 
1033         if (type == IOMMU_DOMAIN_DMA &&
1034             iommu_get_dma_cookie(&smmu_domain->domain)) {
1035                 kfree(smmu_domain);
1036                 return NULL;
1037         }
1038 
1039         mutex_init(&smmu_domain->init_mutex);
1040         spin_lock_init(&smmu_domain->pgtbl_lock);
1041 
1042         return &smmu_domain->domain;
1043 }
1044 
1045 static void arm_smmu_domain_free(struct iommu_domain *domain)
1046 {
1047         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1048 
1049         /*
1050          * Free the domain resources. We assume that all devices have
1051          * already been detached.
1052          */
1053         iommu_put_dma_cookie(domain);
1054         arm_smmu_destroy_domain_context(domain);
1055         kfree(smmu_domain);
1056 }
1057 
1058 static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
1059                                           struct arm_smmu_master_cfg *cfg)
1060 {
1061         int i;
1062         struct arm_smmu_smr *smrs;
1063         void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1064 
1065         if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH))
1066                 return 0;
1067 
1068         if (cfg->smrs)
1069                 return -EEXIST;
1070 
1071         smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL);
1072         if (!smrs) {
1073                 dev_err(smmu->dev, "failed to allocate %d SMRs\n",
1074                         cfg->num_streamids);
1075                 return -ENOMEM;
1076         }
1077 
1078         /* Allocate the SMRs on the SMMU */
1079         for (i = 0; i < cfg->num_streamids; ++i) {
1080                 int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
1081                                                   smmu->num_mapping_groups);
1082                 if (idx < 0) {
1083                         dev_err(smmu->dev, "failed to allocate free SMR\n");
1084                         goto err_free_smrs;
1085                 }
1086 
1087                 smrs[i] = (struct arm_smmu_smr) {
1088                         .idx    = idx,
1089                         .mask   = 0, /* We don't currently share SMRs */
1090                         .id     = cfg->streamids[i],
1091                 };
1092         }
1093 
1094         /* It worked! Now, poke the actual hardware */
1095         for (i = 0; i < cfg->num_streamids; ++i) {
1096                 u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT |
1097                           smrs[i].mask << SMR_MASK_SHIFT;
1098                 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx));
1099         }
1100 
1101         cfg->smrs = smrs;
1102         return 0;
1103 
1104 err_free_smrs:
1105         while (--i >= 0)
1106                 __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx);
1107         kfree(smrs);
1108         return -ENOSPC;
1109 }
1110 
1111 static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
1112                                       struct arm_smmu_master_cfg *cfg)
1113 {
1114         int i;
1115         void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1116         struct arm_smmu_smr *smrs = cfg->smrs;
1117 
1118         if (!smrs)
1119                 return;
1120 
1121         /* Invalidate the SMRs before freeing back to the allocator */
1122         for (i = 0; i < cfg->num_streamids; ++i) {
1123                 u8 idx = smrs[i].idx;
1124 
1125                 writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx));
1126                 __arm_smmu_free_bitmap(smmu->smr_map, idx);
1127         }
1128 
1129         cfg->smrs = NULL;
1130         kfree(smrs);
1131 }
1132 
1133 static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
1134                                       struct arm_smmu_master_cfg *cfg)
1135 {
1136         int i, ret;
1137         struct arm_smmu_device *smmu = smmu_domain->smmu;
1138         void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1139 
1140         /*
1141          * FIXME: This won't be needed once we have IOMMU-backed DMA ops
1142          * for all devices behind the SMMU. Note that we need to take
1143          * care configuring SMRs for devices both a platform_device and
1144          * and a PCI device (i.e. a PCI host controller)
1145          */
1146         if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
1147                 return 0;
1148 
1149         /* Devices in an IOMMU group may already be configured */
1150         ret = arm_smmu_master_configure_smrs(smmu, cfg);
1151         if (ret)
1152                 return ret == -EEXIST ? 0 : ret;
1153 
1154         for (i = 0; i < cfg->num_streamids; ++i) {
1155                 u32 idx, s2cr;
1156 
1157                 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
1158                 s2cr = S2CR_TYPE_TRANS | S2CR_PRIVCFG_UNPRIV |
1159                        (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT);
1160                 writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
1161         }
1162 
1163         return 0;
1164 }
1165 
1166 static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
1167                                           struct arm_smmu_master_cfg *cfg)
1168 {
1169         int i;
1170         struct arm_smmu_device *smmu = smmu_domain->smmu;
1171         void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1172 
1173         /* An IOMMU group is torn down by the first device to be removed */
1174         if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs)
1175                 return;
1176 
1177         /*
1178          * We *must* clear the S2CR first, because freeing the SMR means
1179          * that it can be re-allocated immediately.
1180          */
1181         for (i = 0; i < cfg->num_streamids; ++i) {
1182                 u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
1183                 u32 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
1184 
1185                 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(idx));
1186         }
1187 
1188         arm_smmu_master_free_smrs(smmu, cfg);
1189 }
1190 
1191 static void arm_smmu_detach_dev(struct device *dev,
1192                                 struct arm_smmu_master_cfg *cfg)
1193 {
1194         struct iommu_domain *domain = dev->archdata.iommu;
1195         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1196 
1197         dev->archdata.iommu = NULL;
1198         arm_smmu_domain_remove_master(smmu_domain, cfg);
1199 }
1200 
1201 static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1202 {
1203         int ret;
1204         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1205         struct arm_smmu_device *smmu;
1206         struct arm_smmu_master_cfg *cfg;
1207 
1208         smmu = find_smmu_for_device(dev);
1209         if (!smmu) {
1210                 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1211                 return -ENXIO;
1212         }
1213 
1214         /* Ensure that the domain is finalised */
1215         ret = arm_smmu_init_domain_context(domain, smmu);
1216         if (ret < 0)
1217                 return ret;
1218 
1219         /*
1220          * Sanity check the domain. We don't support domains across
1221          * different SMMUs.
1222          */
1223         if (smmu_domain->smmu != smmu) {
1224                 dev_err(dev,
1225                         "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
1226                         dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
1227                 return -EINVAL;
1228         }
1229 
1230         /* Looks ok, so add the device to the domain */
1231         cfg = find_smmu_master_cfg(dev);
1232         if (!cfg)
1233                 return -ENODEV;
1234 
1235         /* Detach the dev from its current domain */
1236         if (dev->archdata.iommu)
1237                 arm_smmu_detach_dev(dev, cfg);
1238 
1239         ret = arm_smmu_domain_add_master(smmu_domain, cfg);
1240         if (!ret)
1241                 dev->archdata.iommu = domain;
1242         return ret;
1243 }
1244 
1245 static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
1246                         phys_addr_t paddr, size_t size, int prot)
1247 {
1248         int ret;
1249         unsigned long flags;
1250         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1251         struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1252 
1253         if (!ops)
1254                 return -ENODEV;
1255 
1256         spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1257         ret = ops->map(ops, iova, paddr, size, prot);
1258         spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1259         return ret;
1260 }
1261 
1262 static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1263                              size_t size)
1264 {
1265         size_t ret;
1266         unsigned long flags;
1267         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1268         struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1269 
1270         if (!ops)
1271                 return 0;
1272 
1273         spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1274         ret = ops->unmap(ops, iova, size);
1275         spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1276         return ret;
1277 }
1278 
1279 static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1280                                               dma_addr_t iova)
1281 {
1282         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1283         struct arm_smmu_device *smmu = smmu_domain->smmu;
1284         struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1285         struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1286         struct device *dev = smmu->dev;
1287         void __iomem *cb_base;
1288         u32 tmp;
1289         u64 phys;
1290         unsigned long va;
1291 
1292         cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1293 
1294         /* ATS1 registers can only be written atomically */
1295         va = iova & ~0xfffUL;
1296         if (smmu->version == ARM_SMMU_V2)
1297                 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1298         else /* Register is only 32-bit in v1 */
1299                 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
1300 
1301         if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1302                                       !(tmp & ATSR_ACTIVE), 5, 50)) {
1303                 dev_err(dev,
1304                         "iova to phys timed out on %pad. Falling back to software table walk.\n",
1305                         &iova);
1306                 return ops->iova_to_phys(ops, iova);
1307         }
1308 
1309         phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
1310         if (phys & CB_PAR_F) {
1311                 dev_err(dev, "translation fault!\n");
1312                 dev_err(dev, "PAR = 0x%llx\n", phys);
1313                 return 0;
1314         }
1315 
1316         return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1317 }
1318 
1319 static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
1320                                         dma_addr_t iova)
1321 {
1322         phys_addr_t ret;
1323         unsigned long flags;
1324         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1325         struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1326 
1327         if (!ops)
1328                 return 0;
1329 
1330         spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1331         if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1332                         smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1333                 ret = arm_smmu_iova_to_phys_hard(domain, iova);
1334         } else {
1335                 ret = ops->iova_to_phys(ops, iova);
1336         }
1337 
1338         spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1339 
1340         return ret;
1341 }
1342 
1343 static bool arm_smmu_capable(enum iommu_cap cap)
1344 {
1345         switch (cap) {
1346         case IOMMU_CAP_CACHE_COHERENCY:
1347                 /*
1348                  * Return true here as the SMMU can always send out coherent
1349                  * requests.
1350                  */
1351                 return true;
1352         case IOMMU_CAP_INTR_REMAP:
1353                 return true; /* MSIs are just memory writes */
1354         case IOMMU_CAP_NOEXEC:
1355                 return true;
1356         default:
1357                 return false;
1358         }
1359 }
1360 
1361 static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
1362 {
1363         *((u16 *)data) = alias;
1364         return 0; /* Continue walking */
1365 }
1366 
1367 static void __arm_smmu_release_pci_iommudata(void *data)
1368 {
1369         kfree(data);
1370 }
1371 
1372 static int arm_smmu_init_pci_device(struct pci_dev *pdev,
1373                                     struct iommu_group *group)
1374 {
1375         struct arm_smmu_master_cfg *cfg;
1376         u16 sid;
1377         int i;
1378 
1379         cfg = iommu_group_get_iommudata(group);
1380         if (!cfg) {
1381                 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
1382                 if (!cfg)
1383                         return -ENOMEM;
1384 
1385                 iommu_group_set_iommudata(group, cfg,
1386                                           __arm_smmu_release_pci_iommudata);
1387         }
1388 
1389         if (cfg->num_streamids >= MAX_MASTER_STREAMIDS)
1390                 return -ENOSPC;
1391 
1392         /*
1393          * Assume Stream ID == Requester ID for now.
1394          * We need a way to describe the ID mappings in FDT.
1395          */
1396         pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
1397         for (i = 0; i < cfg->num_streamids; ++i)
1398                 if (cfg->streamids[i] == sid)
1399                         break;
1400 
1401         /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
1402         if (i == cfg->num_streamids)
1403                 cfg->streamids[cfg->num_streamids++] = sid;
1404 
1405         return 0;
1406 }
1407 
1408 static int arm_smmu_init_platform_device(struct device *dev,
1409                                          struct iommu_group *group)
1410 {
1411         struct arm_smmu_device *smmu = find_smmu_for_device(dev);
1412         struct arm_smmu_master *master;
1413 
1414         if (!smmu)
1415                 return -ENODEV;
1416 
1417         master = find_smmu_master(smmu, dev->of_node);
1418         if (!master)
1419                 return -ENODEV;
1420 
1421         iommu_group_set_iommudata(group, &master->cfg, NULL);
1422 
1423         return 0;
1424 }
1425 
1426 static int arm_smmu_add_device(struct device *dev)
1427 {
1428         struct iommu_group *group;
1429 
1430         group = iommu_group_get_for_dev(dev);
1431         if (IS_ERR(group))
1432                 return PTR_ERR(group);
1433 
1434         iommu_group_put(group);
1435         return 0;
1436 }
1437 
1438 static void arm_smmu_remove_device(struct device *dev)
1439 {
1440         iommu_group_remove_device(dev);
1441 }
1442 
1443 static struct iommu_group *arm_smmu_device_group(struct device *dev)
1444 {
1445         struct iommu_group *group;
1446         int ret;
1447 
1448         if (dev_is_pci(dev))
1449                 group = pci_device_group(dev);
1450         else
1451                 group = generic_device_group(dev);
1452 
1453         if (IS_ERR(group))
1454                 return group;
1455 
1456         if (dev_is_pci(dev))
1457                 ret = arm_smmu_init_pci_device(to_pci_dev(dev), group);
1458         else
1459                 ret = arm_smmu_init_platform_device(dev, group);
1460 
1461         if (ret) {
1462                 iommu_group_put(group);
1463                 group = ERR_PTR(ret);
1464         }
1465 
1466         return group;
1467 }
1468 
1469 static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1470                                     enum iommu_attr attr, void *data)
1471 {
1472         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1473 
1474         switch (attr) {
1475         case DOMAIN_ATTR_NESTING:
1476                 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1477                 return 0;
1478         default:
1479                 return -ENODEV;
1480         }
1481 }
1482 
1483 static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1484                                     enum iommu_attr attr, void *data)
1485 {
1486         int ret = 0;
1487         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1488 
1489         mutex_lock(&smmu_domain->init_mutex);
1490 
1491         switch (attr) {
1492         case DOMAIN_ATTR_NESTING:
1493                 if (smmu_domain->smmu) {
1494                         ret = -EPERM;
1495                         goto out_unlock;
1496                 }
1497 
1498                 if (*(int *)data)
1499                         smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1500                 else
1501                         smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1502 
1503                 break;
1504         default:
1505                 ret = -ENODEV;
1506         }
1507 
1508 out_unlock:
1509         mutex_unlock(&smmu_domain->init_mutex);
1510         return ret;
1511 }
1512 
1513 static struct iommu_ops arm_smmu_ops = {
1514         .capable                = arm_smmu_capable,
1515         .domain_alloc           = arm_smmu_domain_alloc,
1516         .domain_free            = arm_smmu_domain_free,
1517         .attach_dev             = arm_smmu_attach_dev,
1518         .map                    = arm_smmu_map,
1519         .unmap                  = arm_smmu_unmap,
1520         .map_sg                 = default_iommu_map_sg,
1521         .iova_to_phys           = arm_smmu_iova_to_phys,
1522         .add_device             = arm_smmu_add_device,
1523         .remove_device          = arm_smmu_remove_device,
1524         .device_group           = arm_smmu_device_group,
1525         .domain_get_attr        = arm_smmu_domain_get_attr,
1526         .domain_set_attr        = arm_smmu_domain_set_attr,
1527         .pgsize_bitmap          = -1UL, /* Restricted during device attach */
1528 };
1529 
1530 static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1531 {
1532         void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1533         void __iomem *cb_base;
1534         int i = 0;
1535         u32 reg, major;
1536 
1537         /* clear global FSR */
1538         reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1539         writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1540 
1541         /* Mark all SMRn as invalid and all S2CRn as bypass unless overridden */
1542         reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
1543         for (i = 0; i < smmu->num_mapping_groups; ++i) {
1544                 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i));
1545                 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(i));
1546         }
1547 
1548         /*
1549          * Before clearing ARM_MMU500_ACTLR_CPRE, need to
1550          * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
1551          * bit is only present in MMU-500r2 onwards.
1552          */
1553         reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
1554         major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
1555         if ((smmu->model == ARM_MMU500) && (major >= 2)) {
1556                 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
1557                 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
1558                 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
1559         }
1560 
1561         /* Make sure all context banks are disabled and clear CB_FSR  */
1562         for (i = 0; i < smmu->num_context_banks; ++i) {
1563                 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
1564                 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1565                 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
1566                 /*
1567                  * Disable MMU-500's not-particularly-beneficial next-page
1568                  * prefetcher for the sake of errata #841119 and #826419.
1569                  */
1570                 if (smmu->model == ARM_MMU500) {
1571                         reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
1572                         reg &= ~ARM_MMU500_ACTLR_CPRE;
1573                         writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1574                 }
1575         }
1576 
1577         /* Invalidate the TLB, just in case */
1578         writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1579         writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1580 
1581         reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
1582 
1583         /* Enable fault reporting */
1584         reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
1585 
1586         /* Disable TLB broadcasting. */
1587         reg |= (sCR0_VMIDPNE | sCR0_PTM);
1588 
1589         /* Enable client access, handling unmatched streams as appropriate */
1590         reg &= ~sCR0_CLIENTPD;
1591         if (disable_bypass)
1592                 reg |= sCR0_USFCFG;
1593         else
1594                 reg &= ~sCR0_USFCFG;
1595 
1596         /* Disable forced broadcasting */
1597         reg &= ~sCR0_FB;
1598 
1599         /* Don't upgrade barriers */
1600         reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
1601 
1602         if (smmu->features & ARM_SMMU_FEAT_VMID16)
1603                 reg |= sCR0_VMID16EN;
1604 
1605         /* Push the button */
1606         __arm_smmu_tlb_sync(smmu);
1607         writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
1608 }
1609 
1610 static int arm_smmu_id_size_to_bits(int size)
1611 {
1612         switch (size) {
1613         case 0:
1614                 return 32;
1615         case 1:
1616                 return 36;
1617         case 2:
1618                 return 40;
1619         case 3:
1620                 return 42;
1621         case 4:
1622                 return 44;
1623         case 5:
1624         default:
1625                 return 48;
1626         }
1627 }
1628 
1629 static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1630 {
1631         unsigned long size;
1632         void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1633         u32 id;
1634         bool cttw_dt, cttw_reg;
1635 
1636         dev_notice(smmu->dev, "probing hardware configuration...\n");
1637         dev_notice(smmu->dev, "SMMUv%d with:\n",
1638                         smmu->version == ARM_SMMU_V2 ? 2 : 1);
1639 
1640         /* ID0 */
1641         id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
1642 
1643         /* Restrict available stages based on module parameter */
1644         if (force_stage == 1)
1645                 id &= ~(ID0_S2TS | ID0_NTS);
1646         else if (force_stage == 2)
1647                 id &= ~(ID0_S1TS | ID0_NTS);
1648 
1649         if (id & ID0_S1TS) {
1650                 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1651                 dev_notice(smmu->dev, "\tstage 1 translation\n");
1652         }
1653 
1654         if (id & ID0_S2TS) {
1655                 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1656                 dev_notice(smmu->dev, "\tstage 2 translation\n");
1657         }
1658 
1659         if (id & ID0_NTS) {
1660                 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1661                 dev_notice(smmu->dev, "\tnested translation\n");
1662         }
1663 
1664         if (!(smmu->features &
1665                 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
1666                 dev_err(smmu->dev, "\tno translation support!\n");
1667                 return -ENODEV;
1668         }
1669 
1670         if ((id & ID0_S1TS) &&
1671                 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
1672                 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1673                 dev_notice(smmu->dev, "\taddress translation ops\n");
1674         }
1675 
1676         /*
1677          * In order for DMA API calls to work properly, we must defer to what
1678          * the DT says about coherency, regardless of what the hardware claims.
1679          * Fortunately, this also opens up a workaround for systems where the
1680          * ID register value has ended up configured incorrectly.
1681          */
1682         cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
1683         cttw_reg = !!(id & ID0_CTTW);
1684         if (cttw_dt)
1685                 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
1686         if (cttw_dt || cttw_reg)
1687                 dev_notice(smmu->dev, "\t%scoherent table walk\n",
1688                            cttw_dt ? "" : "non-");
1689         if (cttw_dt != cttw_reg)
1690                 dev_notice(smmu->dev,
1691                            "\t(IDR0.CTTW overridden by dma-coherent property)\n");
1692 
1693         if (id & ID0_SMS) {
1694                 u32 smr, sid, mask;
1695 
1696                 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
1697                 smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) &
1698                                            ID0_NUMSMRG_MASK;
1699                 if (smmu->num_mapping_groups == 0) {
1700                         dev_err(smmu->dev,
1701                                 "stream-matching supported, but no SMRs present!\n");
1702                         return -ENODEV;
1703                 }
1704 
1705                 smr = SMR_MASK_MASK << SMR_MASK_SHIFT;
1706                 smr |= (SMR_ID_MASK << SMR_ID_SHIFT);
1707                 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1708                 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1709 
1710                 mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
1711                 sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK;
1712                 if ((mask & sid) != sid) {
1713                         dev_err(smmu->dev,
1714                                 "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
1715                                 mask, sid);
1716                         return -ENODEV;
1717                 }
1718 
1719                 dev_notice(smmu->dev,
1720                            "\tstream matching with %u register groups, mask 0x%x",
1721                            smmu->num_mapping_groups, mask);
1722         } else {
1723                 smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) &
1724                                            ID0_NUMSIDB_MASK;
1725         }
1726 
1727         if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1728                 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1729                 if (!(id & ID0_PTFS_NO_AARCH32S))
1730                         smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1731         }
1732 
1733         /* ID1 */
1734         id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
1735         smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
1736 
1737         /* Check for size mismatch of SMMU address space from mapped region */
1738         size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
1739         size *= 2 << smmu->pgshift;
1740         if (smmu->size != size)
1741                 dev_warn(smmu->dev,
1742                         "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
1743                         size, smmu->size);
1744 
1745         smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
1746         smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1747         if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1748                 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1749                 return -ENODEV;
1750         }
1751         dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1752                    smmu->num_context_banks, smmu->num_s2_context_banks);
1753         /*
1754          * Cavium CN88xx erratum #27704.
1755          * Ensure ASID and VMID allocation is unique across all SMMUs in
1756          * the system.
1757          */
1758         if (smmu->model == CAVIUM_SMMUV2) {
1759                 smmu->cavium_id_base =
1760                         atomic_add_return(smmu->num_context_banks,
1761                                           &cavium_smmu_context_count);
1762                 smmu->cavium_id_base -= smmu->num_context_banks;
1763         }
1764 
1765         /* ID2 */
1766         id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1767         size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
1768         smmu->ipa_size = size;
1769 
1770         /* The output mask is also applied for bypass */
1771         size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
1772         smmu->pa_size = size;
1773 
1774         if (id & ID2_VMID16)
1775                 smmu->features |= ARM_SMMU_FEAT_VMID16;
1776 
1777         /*
1778          * What the page table walker can address actually depends on which
1779          * descriptor format is in use, but since a) we don't know that yet,
1780          * and b) it can vary per context bank, this will have to do...
1781          */
1782         if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1783                 dev_warn(smmu->dev,
1784                          "failed to set DMA mask for table walker\n");
1785 
1786         if (smmu->version < ARM_SMMU_V2) {
1787                 smmu->va_size = smmu->ipa_size;
1788                 if (smmu->version == ARM_SMMU_V1_64K)
1789                         smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
1790         } else {
1791                 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
1792                 smmu->va_size = arm_smmu_id_size_to_bits(size);
1793                 if (id & ID2_PTFS_4K)
1794                         smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
1795                 if (id & ID2_PTFS_16K)
1796                         smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
1797                 if (id & ID2_PTFS_64K)
1798                         smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
1799         }
1800 
1801         /* Now we've corralled the various formats, what'll it do? */
1802         if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
1803                 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
1804         if (smmu->features &
1805             (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
1806                 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
1807         if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
1808                 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
1809         if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
1810                 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
1811 
1812         if (arm_smmu_ops.pgsize_bitmap == -1UL)
1813                 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
1814         else
1815                 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
1816         dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
1817                    smmu->pgsize_bitmap);
1818 
1819 
1820         if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1821                 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
1822                            smmu->va_size, smmu->ipa_size);
1823 
1824         if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1825                 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
1826                            smmu->ipa_size, smmu->pa_size);
1827 
1828         return 0;
1829 }
1830 
1831 struct arm_smmu_match_data {
1832         enum arm_smmu_arch_version version;
1833         enum arm_smmu_implementation model;
1834 };
1835 
1836 #define ARM_SMMU_MATCH_DATA(name, ver, imp)     \
1837 static struct arm_smmu_match_data name = { .version = ver, .model = imp }
1838 
1839 ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
1840 ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
1841 ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
1842 ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
1843 ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
1844 
1845 static const struct of_device_id arm_smmu_of_match[] = {
1846         { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1847         { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1848         { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
1849         { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
1850         { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
1851         { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
1852         { },
1853 };
1854 MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
1855 
1856 static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1857 {
1858         const struct of_device_id *of_id;
1859         const struct arm_smmu_match_data *data;
1860         struct resource *res;
1861         struct arm_smmu_device *smmu;
1862         struct device *dev = &pdev->dev;
1863         struct rb_node *node;
1864         struct of_phandle_iterator it;
1865         struct arm_smmu_phandle_args *masterspec;
1866         int num_irqs, i, err;
1867 
1868         smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
1869         if (!smmu) {
1870                 dev_err(dev, "failed to allocate arm_smmu_device\n");
1871                 return -ENOMEM;
1872         }
1873         smmu->dev = dev;
1874 
1875         of_id = of_match_node(arm_smmu_of_match, dev->of_node);
1876         data = of_id->data;
1877         smmu->version = data->version;
1878         smmu->model = data->model;
1879 
1880         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1881         smmu->base = devm_ioremap_resource(dev, res);
1882         if (IS_ERR(smmu->base))
1883                 return PTR_ERR(smmu->base);
1884         smmu->size = resource_size(res);
1885 
1886         if (of_property_read_u32(dev->of_node, "#global-interrupts",
1887                                  &smmu->num_global_irqs)) {
1888                 dev_err(dev, "missing #global-interrupts property\n");
1889                 return -ENODEV;
1890         }
1891 
1892         num_irqs = 0;
1893         while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
1894                 num_irqs++;
1895                 if (num_irqs > smmu->num_global_irqs)
1896                         smmu->num_context_irqs++;
1897         }
1898 
1899         if (!smmu->num_context_irqs) {
1900                 dev_err(dev, "found %d interrupts but expected at least %d\n",
1901                         num_irqs, smmu->num_global_irqs + 1);
1902                 return -ENODEV;
1903         }
1904 
1905         smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
1906                                   GFP_KERNEL);
1907         if (!smmu->irqs) {
1908                 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
1909                 return -ENOMEM;
1910         }
1911 
1912         for (i = 0; i < num_irqs; ++i) {
1913                 int irq = platform_get_irq(pdev, i);
1914 
1915                 if (irq < 0) {
1916                         dev_err(dev, "failed to get irq index %d\n", i);
1917                         return -ENODEV;
1918                 }
1919                 smmu->irqs[i] = irq;
1920         }
1921 
1922         err = arm_smmu_device_cfg_probe(smmu);
1923         if (err)
1924                 return err;
1925 
1926         i = 0;
1927         smmu->masters = RB_ROOT;
1928 
1929         err = -ENOMEM;
1930         /* No need to zero the memory for masterspec */
1931         masterspec = kmalloc(sizeof(*masterspec), GFP_KERNEL);
1932         if (!masterspec)
1933                 goto out_put_masters;
1934 
1935         of_for_each_phandle(&it, err, dev->of_node,
1936                             "mmu-masters", "#stream-id-cells", 0) {
1937                 int count = of_phandle_iterator_args(&it, masterspec->args,
1938                                                      MAX_MASTER_STREAMIDS);
1939                 masterspec->np          = of_node_get(it.node);
1940                 masterspec->args_count  = count;
1941 
1942                 err = register_smmu_master(smmu, dev, masterspec);
1943                 if (err) {
1944                         dev_err(dev, "failed to add master %s\n",
1945                                 masterspec->np->name);
1946                         kfree(masterspec);
1947                         goto out_put_masters;
1948                 }
1949 
1950                 i++;
1951         }
1952 
1953         dev_notice(dev, "registered %d master devices\n", i);
1954 
1955         kfree(masterspec);
1956 
1957         parse_driver_options(smmu);
1958 
1959         if (smmu->version == ARM_SMMU_V2 &&
1960             smmu->num_context_banks != smmu->num_context_irqs) {
1961                 dev_err(dev,
1962                         "found only %d context interrupt(s) but %d required\n",
1963                         smmu->num_context_irqs, smmu->num_context_banks);
1964                 err = -ENODEV;
1965                 goto out_put_masters;
1966         }
1967 
1968         for (i = 0; i < smmu->num_global_irqs; ++i) {
1969                 err = devm_request_irq(smmu->dev, smmu->irqs[i],
1970                                        arm_smmu_global_fault,
1971                                        IRQF_SHARED,
1972                                        "arm-smmu global fault",
1973                                        smmu);
1974                 if (err) {
1975                         dev_err(dev, "failed to request global IRQ %d (%u)\n",
1976                                 i, smmu->irqs[i]);
1977                         goto out_put_masters;
1978                 }
1979         }
1980 
1981         INIT_LIST_HEAD(&smmu->list);
1982         spin_lock(&arm_smmu_devices_lock);
1983         list_add(&smmu->list, &arm_smmu_devices);
1984         spin_unlock(&arm_smmu_devices_lock);
1985 
1986         arm_smmu_device_reset(smmu);
1987         return 0;
1988 
1989 out_put_masters:
1990         for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
1991                 struct arm_smmu_master *master
1992                         = container_of(node, struct arm_smmu_master, node);
1993                 of_node_put(master->of_node);
1994         }
1995 
1996         return err;
1997 }
1998 
1999 static int arm_smmu_device_remove(struct platform_device *pdev)
2000 {
2001         int i;
2002         struct device *dev = &pdev->dev;
2003         struct arm_smmu_device *curr, *smmu = NULL;
2004         struct rb_node *node;
2005 
2006         spin_lock(&arm_smmu_devices_lock);
2007         list_for_each_entry(curr, &arm_smmu_devices, list) {
2008                 if (curr->dev == dev) {
2009                         smmu = curr;
2010                         list_del(&smmu->list);
2011                         break;
2012                 }
2013         }
2014         spin_unlock(&arm_smmu_devices_lock);
2015 
2016         if (!smmu)
2017                 return -ENODEV;
2018 
2019         for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
2020                 struct arm_smmu_master *master
2021                         = container_of(node, struct arm_smmu_master, node);
2022                 of_node_put(master->of_node);
2023         }
2024 
2025         if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
2026                 dev_err(dev, "removing device with active domains!\n");
2027 
2028         for (i = 0; i < smmu->num_global_irqs; ++i)
2029                 devm_free_irq(smmu->dev, smmu->irqs[i], smmu);
2030 
2031         /* Turn the thing off */
2032         writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
2033         return 0;
2034 }
2035 
2036 static struct platform_driver arm_smmu_driver = {
2037         .driver = {
2038                 .name           = "arm-smmu",
2039                 .of_match_table = of_match_ptr(arm_smmu_of_match),
2040         },
2041         .probe  = arm_smmu_device_dt_probe,
2042         .remove = arm_smmu_device_remove,
2043 };
2044 
2045 static int __init arm_smmu_init(void)
2046 {
2047         struct device_node *np;
2048         int ret;
2049 
2050         /*
2051          * Play nice with systems that don't have an ARM SMMU by checking that
2052          * an ARM SMMU exists in the system before proceeding with the driver
2053          * and IOMMU bus operation registration.
2054          */
2055         np = of_find_matching_node(NULL, arm_smmu_of_match);
2056         if (!np)
2057                 return 0;
2058 
2059         of_node_put(np);
2060 
2061         ret = platform_driver_register(&arm_smmu_driver);
2062         if (ret)
2063                 return ret;
2064 
2065         /* Oh, for a proper bus abstraction */
2066         if (!iommu_present(&platform_bus_type))
2067                 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2068 
2069 #ifdef CONFIG_ARM_AMBA
2070         if (!iommu_present(&amba_bustype))
2071                 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
2072 #endif
2073 
2074 #ifdef CONFIG_PCI
2075         if (!iommu_present(&pci_bus_type)) {
2076                 pci_request_acs();
2077                 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2078         }
2079 #endif
2080 
2081         return 0;
2082 }
2083 
2084 static void __exit arm_smmu_exit(void)
2085 {
2086         return platform_driver_unregister(&arm_smmu_driver);
2087 }
2088 
2089 subsys_initcall(arm_smmu_init);
2090 module_exit(arm_smmu_exit);
2091 
2092 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2093 MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2094 MODULE_LICENSE("GPL v2");
2095 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us