Version:  2.0.40 2.2.26 2.4.37 3.0 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15 3.16

Linux/arch/x86/kernel/amd_nb.c

  1 /*
  2  * Shared support code for AMD K8 northbridges and derivates.
  3  * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
  4  */
  5 
  6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  7 
  8 #include <linux/types.h>
  9 #include <linux/slab.h>
 10 #include <linux/init.h>
 11 #include <linux/errno.h>
 12 #include <linux/module.h>
 13 #include <linux/spinlock.h>
 14 #include <asm/amd_nb.h>
 15 
 16 static u32 *flush_words;
 17 
 18 const struct pci_device_id amd_nb_misc_ids[] = {
 19         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
 20         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
 21         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
 22         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
 23         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
 24         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
 25         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
 26         {}
 27 };
 28 EXPORT_SYMBOL(amd_nb_misc_ids);
 29 
 30 static const struct pci_device_id amd_nb_link_ids[] = {
 31         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
 32         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
 33         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
 34         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
 35         {}
 36 };
 37 
 38 const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
 39         { 0x00, 0x18, 0x20 },
 40         { 0xff, 0x00, 0x20 },
 41         { 0xfe, 0x00, 0x20 },
 42         { }
 43 };
 44 
 45 struct amd_northbridge_info amd_northbridges;
 46 EXPORT_SYMBOL(amd_northbridges);
 47 
 48 static struct pci_dev *next_northbridge(struct pci_dev *dev,
 49                                         const struct pci_device_id *ids)
 50 {
 51         do {
 52                 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
 53                 if (!dev)
 54                         break;
 55         } while (!pci_match_id(ids, dev));
 56         return dev;
 57 }
 58 
 59 int amd_cache_northbridges(void)
 60 {
 61         u16 i = 0;
 62         struct amd_northbridge *nb;
 63         struct pci_dev *misc, *link;
 64 
 65         if (amd_nb_num())
 66                 return 0;
 67 
 68         misc = NULL;
 69         while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
 70                 i++;
 71 
 72         if (i == 0)
 73                 return 0;
 74 
 75         nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
 76         if (!nb)
 77                 return -ENOMEM;
 78 
 79         amd_northbridges.nb = nb;
 80         amd_northbridges.num = i;
 81 
 82         link = misc = NULL;
 83         for (i = 0; i != amd_nb_num(); i++) {
 84                 node_to_amd_nb(i)->misc = misc =
 85                         next_northbridge(misc, amd_nb_misc_ids);
 86                 node_to_amd_nb(i)->link = link =
 87                         next_northbridge(link, amd_nb_link_ids);
 88         }
 89 
 90         /* GART present only on Fam15h upto model 0fh */
 91         if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
 92             (boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model < 0x10))
 93                 amd_northbridges.flags |= AMD_NB_GART;
 94 
 95         /*
 96          * Check for L3 cache presence.
 97          */
 98         if (!cpuid_edx(0x80000006))
 99                 return 0;
100 
101         /*
102          * Some CPU families support L3 Cache Index Disable. There are some
103          * limitations because of E382 and E388 on family 0x10.
104          */
105         if (boot_cpu_data.x86 == 0x10 &&
106             boot_cpu_data.x86_model >= 0x8 &&
107             (boot_cpu_data.x86_model > 0x9 ||
108              boot_cpu_data.x86_mask >= 0x1))
109                 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
110 
111         if (boot_cpu_data.x86 == 0x15)
112                 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
113 
114         /* L3 cache partitioning is supported on family 0x15 */
115         if (boot_cpu_data.x86 == 0x15)
116                 amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
117 
118         return 0;
119 }
120 EXPORT_SYMBOL_GPL(amd_cache_northbridges);
121 
122 /*
123  * Ignores subdevice/subvendor but as far as I can figure out
124  * they're useless anyways
125  */
126 bool __init early_is_amd_nb(u32 device)
127 {
128         const struct pci_device_id *id;
129         u32 vendor = device & 0xffff;
130 
131         device >>= 16;
132         for (id = amd_nb_misc_ids; id->vendor; id++)
133                 if (vendor == id->vendor && device == id->device)
134                         return true;
135         return false;
136 }
137 
138 struct resource *amd_get_mmconfig_range(struct resource *res)
139 {
140         u32 address;
141         u64 base, msr;
142         unsigned segn_busn_bits;
143 
144         if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
145                 return NULL;
146 
147         /* assume all cpus from fam10h have mmconfig */
148         if (boot_cpu_data.x86 < 0x10)
149                 return NULL;
150 
151         address = MSR_FAM10H_MMIO_CONF_BASE;
152         rdmsrl(address, msr);
153 
154         /* mmconfig is not enabled */
155         if (!(msr & FAM10H_MMIO_CONF_ENABLE))
156                 return NULL;
157 
158         base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
159 
160         segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
161                          FAM10H_MMIO_CONF_BUSRANGE_MASK;
162 
163         res->flags = IORESOURCE_MEM;
164         res->start = base;
165         res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
166         return res;
167 }
168 
169 int amd_get_subcaches(int cpu)
170 {
171         struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
172         unsigned int mask;
173         int cuid;
174 
175         if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
176                 return 0;
177 
178         pci_read_config_dword(link, 0x1d4, &mask);
179 
180         cuid = cpu_data(cpu).compute_unit_id;
181         return (mask >> (4 * cuid)) & 0xf;
182 }
183 
184 int amd_set_subcaches(int cpu, unsigned long mask)
185 {
186         static unsigned int reset, ban;
187         struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
188         unsigned int reg;
189         int cuid;
190 
191         if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
192                 return -EINVAL;
193 
194         /* if necessary, collect reset state of L3 partitioning and BAN mode */
195         if (reset == 0) {
196                 pci_read_config_dword(nb->link, 0x1d4, &reset);
197                 pci_read_config_dword(nb->misc, 0x1b8, &ban);
198                 ban &= 0x180000;
199         }
200 
201         /* deactivate BAN mode if any subcaches are to be disabled */
202         if (mask != 0xf) {
203                 pci_read_config_dword(nb->misc, 0x1b8, &reg);
204                 pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
205         }
206 
207         cuid = cpu_data(cpu).compute_unit_id;
208         mask <<= 4 * cuid;
209         mask |= (0xf ^ (1 << cuid)) << 26;
210 
211         pci_write_config_dword(nb->link, 0x1d4, mask);
212 
213         /* reset BAN mode if L3 partitioning returned to reset state */
214         pci_read_config_dword(nb->link, 0x1d4, &reg);
215         if (reg == reset) {
216                 pci_read_config_dword(nb->misc, 0x1b8, &reg);
217                 reg &= ~0x180000;
218                 pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
219         }
220 
221         return 0;
222 }
223 
224 static int amd_cache_gart(void)
225 {
226         u16 i;
227 
228        if (!amd_nb_has_feature(AMD_NB_GART))
229                return 0;
230 
231        flush_words = kmalloc(amd_nb_num() * sizeof(u32), GFP_KERNEL);
232        if (!flush_words) {
233                amd_northbridges.flags &= ~AMD_NB_GART;
234                return -ENOMEM;
235        }
236 
237        for (i = 0; i != amd_nb_num(); i++)
238                pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c,
239                                      &flush_words[i]);
240 
241        return 0;
242 }
243 
244 void amd_flush_garts(void)
245 {
246         int flushed, i;
247         unsigned long flags;
248         static DEFINE_SPINLOCK(gart_lock);
249 
250         if (!amd_nb_has_feature(AMD_NB_GART))
251                 return;
252 
253         /* Avoid races between AGP and IOMMU. In theory it's not needed
254            but I'm not sure if the hardware won't lose flush requests
255            when another is pending. This whole thing is so expensive anyways
256            that it doesn't matter to serialize more. -AK */
257         spin_lock_irqsave(&gart_lock, flags);
258         flushed = 0;
259         for (i = 0; i < amd_nb_num(); i++) {
260                 pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
261                                        flush_words[i] | 1);
262                 flushed++;
263         }
264         for (i = 0; i < amd_nb_num(); i++) {
265                 u32 w;
266                 /* Make sure the hardware actually executed the flush*/
267                 for (;;) {
268                         pci_read_config_dword(node_to_amd_nb(i)->misc,
269                                               0x9c, &w);
270                         if (!(w & 1))
271                                 break;
272                         cpu_relax();
273                 }
274         }
275         spin_unlock_irqrestore(&gart_lock, flags);
276         if (!flushed)
277                 pr_notice("nothing to flush?\n");
278 }
279 EXPORT_SYMBOL_GPL(amd_flush_garts);
280 
281 static __init int init_amd_nbs(void)
282 {
283         int err = 0;
284 
285         err = amd_cache_northbridges();
286 
287         if (err < 0)
288                 pr_notice("Cannot enumerate AMD northbridges\n");
289 
290         if (amd_cache_gart() < 0)
291                 pr_notice("Cannot initialize GART flush words, GART support disabled\n");
292 
293         return err;
294 }
295 
296 /* This has to go after the PCI subsystem */
297 fs_initcall(init_amd_nbs);
298 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us