Version:  2.0.40 2.2.26 2.4.37 3.9 3.10 3.11 3.12 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4 4.5 4.6

Linux/drivers/cpufreq/acpi-cpufreq.c

  1 /*
  2  * acpi-cpufreq.c - ACPI Processor P-States Driver
  3  *
  4  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
  5  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
  6  *  Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de>
  7  *  Copyright (C) 2006       Denis Sadykov <denis.m.sadykov@intel.com>
  8  *
  9  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 10  *
 11  *  This program is free software; you can redistribute it and/or modify
 12  *  it under the terms of the GNU General Public License as published by
 13  *  the Free Software Foundation; either version 2 of the License, or (at
 14  *  your option) any later version.
 15  *
 16  *  This program is distributed in the hope that it will be useful, but
 17  *  WITHOUT ANY WARRANTY; without even the implied warranty of
 18  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 19  *  General Public License for more details.
 20  *
 21  *  You should have received a copy of the GNU General Public License along
 22  *  with this program; if not, write to the Free Software Foundation, Inc.,
 23  *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
 24  *
 25  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 26  */
 27 
 28 #include <linux/kernel.h>
 29 #include <linux/module.h>
 30 #include <linux/init.h>
 31 #include <linux/smp.h>
 32 #include <linux/sched.h>
 33 #include <linux/cpufreq.h>
 34 #include <linux/compiler.h>
 35 #include <linux/dmi.h>
 36 #include <linux/slab.h>
 37 
 38 #include <linux/acpi.h>
 39 #include <linux/io.h>
 40 #include <linux/delay.h>
 41 #include <linux/uaccess.h>
 42 
 43 #include <acpi/processor.h>
 44 
 45 #include <asm/msr.h>
 46 #include <asm/processor.h>
 47 #include <asm/cpufeature.h>
 48 
 49 MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
 50 MODULE_DESCRIPTION("ACPI Processor P-States Driver");
 51 MODULE_LICENSE("GPL");
 52 
 53 #define PFX "acpi-cpufreq: "
 54 
 55 enum {
 56         UNDEFINED_CAPABLE = 0,
 57         SYSTEM_INTEL_MSR_CAPABLE,
 58         SYSTEM_AMD_MSR_CAPABLE,
 59         SYSTEM_IO_CAPABLE,
 60 };
 61 
 62 #define INTEL_MSR_RANGE         (0xffff)
 63 #define AMD_MSR_RANGE           (0x7)
 64 
 65 #define MSR_K7_HWCR_CPB_DIS     (1ULL << 25)
 66 
 67 struct acpi_cpufreq_data {
 68         struct cpufreq_frequency_table *freq_table;
 69         unsigned int resume;
 70         unsigned int cpu_feature;
 71         unsigned int acpi_perf_cpu;
 72         cpumask_var_t freqdomain_cpus;
 73         void (*cpu_freq_write)(struct acpi_pct_register *reg, u32 val);
 74         u32 (*cpu_freq_read)(struct acpi_pct_register *reg);
 75 };
 76 
 77 /* acpi_perf_data is a pointer to percpu data. */
 78 static struct acpi_processor_performance __percpu *acpi_perf_data;
 79 
 80 static inline struct acpi_processor_performance *to_perf_data(struct acpi_cpufreq_data *data)
 81 {
 82         return per_cpu_ptr(acpi_perf_data, data->acpi_perf_cpu);
 83 }
 84 
 85 static struct cpufreq_driver acpi_cpufreq_driver;
 86 
 87 static unsigned int acpi_pstate_strict;
 88 static struct msr __percpu *msrs;
 89 
 90 static bool boost_state(unsigned int cpu)
 91 {
 92         u32 lo, hi;
 93         u64 msr;
 94 
 95         switch (boot_cpu_data.x86_vendor) {
 96         case X86_VENDOR_INTEL:
 97                 rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi);
 98                 msr = lo | ((u64)hi << 32);
 99                 return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
100         case X86_VENDOR_AMD:
101                 rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
102                 msr = lo | ((u64)hi << 32);
103                 return !(msr & MSR_K7_HWCR_CPB_DIS);
104         }
105         return false;
106 }
107 
108 static void boost_set_msrs(bool enable, const struct cpumask *cpumask)
109 {
110         u32 cpu;
111         u32 msr_addr;
112         u64 msr_mask;
113 
114         switch (boot_cpu_data.x86_vendor) {
115         case X86_VENDOR_INTEL:
116                 msr_addr = MSR_IA32_MISC_ENABLE;
117                 msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
118                 break;
119         case X86_VENDOR_AMD:
120                 msr_addr = MSR_K7_HWCR;
121                 msr_mask = MSR_K7_HWCR_CPB_DIS;
122                 break;
123         default:
124                 return;
125         }
126 
127         rdmsr_on_cpus(cpumask, msr_addr, msrs);
128 
129         for_each_cpu(cpu, cpumask) {
130                 struct msr *reg = per_cpu_ptr(msrs, cpu);
131                 if (enable)
132                         reg->q &= ~msr_mask;
133                 else
134                         reg->q |= msr_mask;
135         }
136 
137         wrmsr_on_cpus(cpumask, msr_addr, msrs);
138 }
139 
140 static int set_boost(int val)
141 {
142         get_online_cpus();
143         boost_set_msrs(val, cpu_online_mask);
144         put_online_cpus();
145         pr_debug("Core Boosting %sabled.\n", val ? "en" : "dis");
146 
147         return 0;
148 }
149 
150 static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
151 {
152         struct acpi_cpufreq_data *data = policy->driver_data;
153 
154         if (unlikely(!data))
155                 return -ENODEV;
156 
157         return cpufreq_show_cpus(data->freqdomain_cpus, buf);
158 }
159 
160 cpufreq_freq_attr_ro(freqdomain_cpus);
161 
162 #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
163 static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
164                          size_t count)
165 {
166         int ret;
167         unsigned int val = 0;
168 
169         if (!acpi_cpufreq_driver.set_boost)
170                 return -EINVAL;
171 
172         ret = kstrtouint(buf, 10, &val);
173         if (ret || val > 1)
174                 return -EINVAL;
175 
176         set_boost(val);
177 
178         return count;
179 }
180 
181 static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf)
182 {
183         return sprintf(buf, "%u\n", acpi_cpufreq_driver.boost_enabled);
184 }
185 
186 cpufreq_freq_attr_rw(cpb);
187 #endif
188 
189 static int check_est_cpu(unsigned int cpuid)
190 {
191         struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
192 
193         return cpu_has(cpu, X86_FEATURE_EST);
194 }
195 
196 static int check_amd_hwpstate_cpu(unsigned int cpuid)
197 {
198         struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
199 
200         return cpu_has(cpu, X86_FEATURE_HW_PSTATE);
201 }
202 
203 static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
204 {
205         struct acpi_processor_performance *perf;
206         int i;
207 
208         perf = to_perf_data(data);
209 
210         for (i = 0; i < perf->state_count; i++) {
211                 if (value == perf->states[i].status)
212                         return data->freq_table[i].frequency;
213         }
214         return 0;
215 }
216 
217 static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
218 {
219         struct cpufreq_frequency_table *pos;
220         struct acpi_processor_performance *perf;
221 
222         if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
223                 msr &= AMD_MSR_RANGE;
224         else
225                 msr &= INTEL_MSR_RANGE;
226 
227         perf = to_perf_data(data);
228 
229         cpufreq_for_each_entry(pos, data->freq_table)
230                 if (msr == perf->states[pos->driver_data].status)
231                         return pos->frequency;
232         return data->freq_table[0].frequency;
233 }
234 
235 static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data)
236 {
237         switch (data->cpu_feature) {
238         case SYSTEM_INTEL_MSR_CAPABLE:
239         case SYSTEM_AMD_MSR_CAPABLE:
240                 return extract_msr(val, data);
241         case SYSTEM_IO_CAPABLE:
242                 return extract_io(val, data);
243         default:
244                 return 0;
245         }
246 }
247 
248 static u32 cpu_freq_read_intel(struct acpi_pct_register *not_used)
249 {
250         u32 val, dummy;
251 
252         rdmsr(MSR_IA32_PERF_CTL, val, dummy);
253         return val;
254 }
255 
256 static void cpu_freq_write_intel(struct acpi_pct_register *not_used, u32 val)
257 {
258         u32 lo, hi;
259 
260         rdmsr(MSR_IA32_PERF_CTL, lo, hi);
261         lo = (lo & ~INTEL_MSR_RANGE) | (val & INTEL_MSR_RANGE);
262         wrmsr(MSR_IA32_PERF_CTL, lo, hi);
263 }
264 
265 static u32 cpu_freq_read_amd(struct acpi_pct_register *not_used)
266 {
267         u32 val, dummy;
268 
269         rdmsr(MSR_AMD_PERF_CTL, val, dummy);
270         return val;
271 }
272 
273 static void cpu_freq_write_amd(struct acpi_pct_register *not_used, u32 val)
274 {
275         wrmsr(MSR_AMD_PERF_CTL, val, 0);
276 }
277 
278 static u32 cpu_freq_read_io(struct acpi_pct_register *reg)
279 {
280         u32 val;
281 
282         acpi_os_read_port(reg->address, &val, reg->bit_width);
283         return val;
284 }
285 
286 static void cpu_freq_write_io(struct acpi_pct_register *reg, u32 val)
287 {
288         acpi_os_write_port(reg->address, val, reg->bit_width);
289 }
290 
291 struct drv_cmd {
292         struct acpi_pct_register *reg;
293         u32 val;
294         union {
295                 void (*write)(struct acpi_pct_register *reg, u32 val);
296                 u32 (*read)(struct acpi_pct_register *reg);
297         } func;
298 };
299 
300 /* Called via smp_call_function_single(), on the target CPU */
301 static void do_drv_read(void *_cmd)
302 {
303         struct drv_cmd *cmd = _cmd;
304 
305         cmd->val = cmd->func.read(cmd->reg);
306 }
307 
308 static u32 drv_read(struct acpi_cpufreq_data *data, const struct cpumask *mask)
309 {
310         struct acpi_processor_performance *perf = to_perf_data(data);
311         struct drv_cmd cmd = {
312                 .reg = &perf->control_register,
313                 .func.read = data->cpu_freq_read,
314         };
315         int err;
316 
317         err = smp_call_function_any(mask, do_drv_read, &cmd, 1);
318         WARN_ON_ONCE(err);      /* smp_call_function_any() was buggy? */
319         return cmd.val;
320 }
321 
322 /* Called via smp_call_function_many(), on the target CPUs */
323 static void do_drv_write(void *_cmd)
324 {
325         struct drv_cmd *cmd = _cmd;
326 
327         cmd->func.write(cmd->reg, cmd->val);
328 }
329 
330 static void drv_write(struct acpi_cpufreq_data *data,
331                       const struct cpumask *mask, u32 val)
332 {
333         struct acpi_processor_performance *perf = to_perf_data(data);
334         struct drv_cmd cmd = {
335                 .reg = &perf->control_register,
336                 .val = val,
337                 .func.write = data->cpu_freq_write,
338         };
339         int this_cpu;
340 
341         this_cpu = get_cpu();
342         if (cpumask_test_cpu(this_cpu, mask))
343                 do_drv_write(&cmd);
344 
345         smp_call_function_many(mask, do_drv_write, &cmd, 1);
346         put_cpu();
347 }
348 
349 static u32 get_cur_val(const struct cpumask *mask, struct acpi_cpufreq_data *data)
350 {
351         u32 val;
352 
353         if (unlikely(cpumask_empty(mask)))
354                 return 0;
355 
356         val = drv_read(data, mask);
357 
358         pr_debug("get_cur_val = %u\n", val);
359 
360         return val;
361 }
362 
363 static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
364 {
365         struct acpi_cpufreq_data *data;
366         struct cpufreq_policy *policy;
367         unsigned int freq;
368         unsigned int cached_freq;
369 
370         pr_debug("get_cur_freq_on_cpu (%d)\n", cpu);
371 
372         policy = cpufreq_cpu_get_raw(cpu);
373         if (unlikely(!policy))
374                 return 0;
375 
376         data = policy->driver_data;
377         if (unlikely(!data || !data->freq_table))
378                 return 0;
379 
380         cached_freq = data->freq_table[to_perf_data(data)->state].frequency;
381         freq = extract_freq(get_cur_val(cpumask_of(cpu), data), data);
382         if (freq != cached_freq) {
383                 /*
384                  * The dreaded BIOS frequency change behind our back.
385                  * Force set the frequency on next target call.
386                  */
387                 data->resume = 1;
388         }
389 
390         pr_debug("cur freq = %u\n", freq);
391 
392         return freq;
393 }
394 
395 static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq,
396                                 struct acpi_cpufreq_data *data)
397 {
398         unsigned int cur_freq;
399         unsigned int i;
400 
401         for (i = 0; i < 100; i++) {
402                 cur_freq = extract_freq(get_cur_val(mask, data), data);
403                 if (cur_freq == freq)
404                         return 1;
405                 udelay(10);
406         }
407         return 0;
408 }
409 
410 static int acpi_cpufreq_target(struct cpufreq_policy *policy,
411                                unsigned int index)
412 {
413         struct acpi_cpufreq_data *data = policy->driver_data;
414         struct acpi_processor_performance *perf;
415         const struct cpumask *mask;
416         unsigned int next_perf_state = 0; /* Index into perf table */
417         int result = 0;
418 
419         if (unlikely(data == NULL || data->freq_table == NULL)) {
420                 return -ENODEV;
421         }
422 
423         perf = to_perf_data(data);
424         next_perf_state = data->freq_table[index].driver_data;
425         if (perf->state == next_perf_state) {
426                 if (unlikely(data->resume)) {
427                         pr_debug("Called after resume, resetting to P%d\n",
428                                 next_perf_state);
429                         data->resume = 0;
430                 } else {
431                         pr_debug("Already at target state (P%d)\n",
432                                 next_perf_state);
433                         return 0;
434                 }
435         }
436 
437         /*
438          * The core won't allow CPUs to go away until the governor has been
439          * stopped, so we can rely on the stability of policy->cpus.
440          */
441         mask = policy->shared_type == CPUFREQ_SHARED_TYPE_ANY ?
442                 cpumask_of(policy->cpu) : policy->cpus;
443 
444         drv_write(data, mask, perf->states[next_perf_state].control);
445 
446         if (acpi_pstate_strict) {
447                 if (!check_freqs(mask, data->freq_table[index].frequency,
448                                         data)) {
449                         pr_debug("acpi_cpufreq_target failed (%d)\n",
450                                 policy->cpu);
451                         result = -EAGAIN;
452                 }
453         }
454 
455         if (!result)
456                 perf->state = next_perf_state;
457 
458         return result;
459 }
460 
461 static unsigned long
462 acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
463 {
464         struct acpi_processor_performance *perf;
465 
466         perf = to_perf_data(data);
467         if (cpu_khz) {
468                 /* search the closest match to cpu_khz */
469                 unsigned int i;
470                 unsigned long freq;
471                 unsigned long freqn = perf->states[0].core_frequency * 1000;
472 
473                 for (i = 0; i < (perf->state_count-1); i++) {
474                         freq = freqn;
475                         freqn = perf->states[i+1].core_frequency * 1000;
476                         if ((2 * cpu_khz) > (freqn + freq)) {
477                                 perf->state = i;
478                                 return freq;
479                         }
480                 }
481                 perf->state = perf->state_count-1;
482                 return freqn;
483         } else {
484                 /* assume CPU is at P0... */
485                 perf->state = 0;
486                 return perf->states[0].core_frequency * 1000;
487         }
488 }
489 
490 static void free_acpi_perf_data(void)
491 {
492         unsigned int i;
493 
494         /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */
495         for_each_possible_cpu(i)
496                 free_cpumask_var(per_cpu_ptr(acpi_perf_data, i)
497                                  ->shared_cpu_map);
498         free_percpu(acpi_perf_data);
499 }
500 
501 static int boost_notify(struct notifier_block *nb, unsigned long action,
502                       void *hcpu)
503 {
504         unsigned cpu = (long)hcpu;
505         const struct cpumask *cpumask;
506 
507         cpumask = get_cpu_mask(cpu);
508 
509         /*
510          * Clear the boost-disable bit on the CPU_DOWN path so that
511          * this cpu cannot block the remaining ones from boosting. On
512          * the CPU_UP path we simply keep the boost-disable flag in
513          * sync with the current global state.
514          */
515 
516         switch (action) {
517         case CPU_DOWN_FAILED:
518         case CPU_DOWN_FAILED_FROZEN:
519         case CPU_ONLINE:
520         case CPU_ONLINE_FROZEN:
521                 boost_set_msrs(acpi_cpufreq_driver.boost_enabled, cpumask);
522                 break;
523 
524         case CPU_DOWN_PREPARE:
525         case CPU_DOWN_PREPARE_FROZEN:
526                 boost_set_msrs(1, cpumask);
527                 break;
528 
529         default:
530                 break;
531         }
532 
533         return NOTIFY_OK;
534 }
535 
536 
537 static struct notifier_block boost_nb = {
538         .notifier_call          = boost_notify,
539 };
540 
541 /*
542  * acpi_cpufreq_early_init - initialize ACPI P-States library
543  *
544  * Initialize the ACPI P-States library (drivers/acpi/processor_perflib.c)
545  * in order to determine correct frequency and voltage pairings. We can
546  * do _PDC and _PSD and find out the processor dependency for the
547  * actual init that will happen later...
548  */
549 static int __init acpi_cpufreq_early_init(void)
550 {
551         unsigned int i;
552         pr_debug("acpi_cpufreq_early_init\n");
553 
554         acpi_perf_data = alloc_percpu(struct acpi_processor_performance);
555         if (!acpi_perf_data) {
556                 pr_debug("Memory allocation error for acpi_perf_data.\n");
557                 return -ENOMEM;
558         }
559         for_each_possible_cpu(i) {
560                 if (!zalloc_cpumask_var_node(
561                         &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map,
562                         GFP_KERNEL, cpu_to_node(i))) {
563 
564                         /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */
565                         free_acpi_perf_data();
566                         return -ENOMEM;
567                 }
568         }
569 
570         /* Do initialization in ACPI core */
571         acpi_processor_preregister_performance(acpi_perf_data);
572         return 0;
573 }
574 
575 #ifdef CONFIG_SMP
576 /*
577  * Some BIOSes do SW_ANY coordination internally, either set it up in hw
578  * or do it in BIOS firmware and won't inform about it to OS. If not
579  * detected, this has a side effect of making CPU run at a different speed
580  * than OS intended it to run at. Detect it and handle it cleanly.
581  */
582 static int bios_with_sw_any_bug;
583 
584 static int sw_any_bug_found(const struct dmi_system_id *d)
585 {
586         bios_with_sw_any_bug = 1;
587         return 0;
588 }
589 
590 static const struct dmi_system_id sw_any_bug_dmi_table[] = {
591         {
592                 .callback = sw_any_bug_found,
593                 .ident = "Supermicro Server X6DLP",
594                 .matches = {
595                         DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
596                         DMI_MATCH(DMI_BIOS_VERSION, "080010"),
597                         DMI_MATCH(DMI_PRODUCT_NAME, "X6DLP"),
598                 },
599         },
600         { }
601 };
602 
603 static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
604 {
605         /* Intel Xeon Processor 7100 Series Specification Update
606          * http://www.intel.com/Assets/PDF/specupdate/314554.pdf
607          * AL30: A Machine Check Exception (MCE) Occurring during an
608          * Enhanced Intel SpeedStep Technology Ratio Change May Cause
609          * Both Processor Cores to Lock Up. */
610         if (c->x86_vendor == X86_VENDOR_INTEL) {
611                 if ((c->x86 == 15) &&
612                     (c->x86_model == 6) &&
613                     (c->x86_mask == 8)) {
614                         printk(KERN_INFO "acpi-cpufreq: Intel(R) "
615                             "Xeon(R) 7100 Errata AL30, processors may "
616                             "lock up on frequency changes: disabling "
617                             "acpi-cpufreq.\n");
618                         return -ENODEV;
619                     }
620                 }
621         return 0;
622 }
623 #endif
624 
625 static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
626 {
627         unsigned int i;
628         unsigned int valid_states = 0;
629         unsigned int cpu = policy->cpu;
630         struct acpi_cpufreq_data *data;
631         unsigned int result = 0;
632         struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
633         struct acpi_processor_performance *perf;
634 #ifdef CONFIG_SMP
635         static int blacklisted;
636 #endif
637 
638         pr_debug("acpi_cpufreq_cpu_init\n");
639 
640 #ifdef CONFIG_SMP
641         if (blacklisted)
642                 return blacklisted;
643         blacklisted = acpi_cpufreq_blacklist(c);
644         if (blacklisted)
645                 return blacklisted;
646 #endif
647 
648         data = kzalloc(sizeof(*data), GFP_KERNEL);
649         if (!data)
650                 return -ENOMEM;
651 
652         if (!zalloc_cpumask_var(&data->freqdomain_cpus, GFP_KERNEL)) {
653                 result = -ENOMEM;
654                 goto err_free;
655         }
656 
657         perf = per_cpu_ptr(acpi_perf_data, cpu);
658         data->acpi_perf_cpu = cpu;
659         policy->driver_data = data;
660 
661         if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
662                 acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
663 
664         result = acpi_processor_register_performance(perf, cpu);
665         if (result)
666                 goto err_free_mask;
667 
668         policy->shared_type = perf->shared_type;
669 
670         /*
671          * Will let policy->cpus know about dependency only when software
672          * coordination is required.
673          */
674         if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
675             policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
676                 cpumask_copy(policy->cpus, perf->shared_cpu_map);
677         }
678         cpumask_copy(data->freqdomain_cpus, perf->shared_cpu_map);
679 
680 #ifdef CONFIG_SMP
681         dmi_check_system(sw_any_bug_dmi_table);
682         if (bios_with_sw_any_bug && !policy_is_shared(policy)) {
683                 policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
684                 cpumask_copy(policy->cpus, topology_core_cpumask(cpu));
685         }
686 
687         if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) {
688                 cpumask_clear(policy->cpus);
689                 cpumask_set_cpu(cpu, policy->cpus);
690                 cpumask_copy(data->freqdomain_cpus,
691                              topology_sibling_cpumask(cpu));
692                 policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
693                 pr_info_once(PFX "overriding BIOS provided _PSD data\n");
694         }
695 #endif
696 
697         /* capability check */
698         if (perf->state_count <= 1) {
699                 pr_debug("No P-States\n");
700                 result = -ENODEV;
701                 goto err_unreg;
702         }
703 
704         if (perf->control_register.space_id != perf->status_register.space_id) {
705                 result = -ENODEV;
706                 goto err_unreg;
707         }
708 
709         switch (perf->control_register.space_id) {
710         case ACPI_ADR_SPACE_SYSTEM_IO:
711                 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
712                     boot_cpu_data.x86 == 0xf) {
713                         pr_debug("AMD K8 systems must use native drivers.\n");
714                         result = -ENODEV;
715                         goto err_unreg;
716                 }
717                 pr_debug("SYSTEM IO addr space\n");
718                 data->cpu_feature = SYSTEM_IO_CAPABLE;
719                 data->cpu_freq_read = cpu_freq_read_io;
720                 data->cpu_freq_write = cpu_freq_write_io;
721                 break;
722         case ACPI_ADR_SPACE_FIXED_HARDWARE:
723                 pr_debug("HARDWARE addr space\n");
724                 if (check_est_cpu(cpu)) {
725                         data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
726                         data->cpu_freq_read = cpu_freq_read_intel;
727                         data->cpu_freq_write = cpu_freq_write_intel;
728                         break;
729                 }
730                 if (check_amd_hwpstate_cpu(cpu)) {
731                         data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE;
732                         data->cpu_freq_read = cpu_freq_read_amd;
733                         data->cpu_freq_write = cpu_freq_write_amd;
734                         break;
735                 }
736                 result = -ENODEV;
737                 goto err_unreg;
738         default:
739                 pr_debug("Unknown addr space %d\n",
740                         (u32) (perf->control_register.space_id));
741                 result = -ENODEV;
742                 goto err_unreg;
743         }
744 
745         data->freq_table = kzalloc(sizeof(*data->freq_table) *
746                     (perf->state_count+1), GFP_KERNEL);
747         if (!data->freq_table) {
748                 result = -ENOMEM;
749                 goto err_unreg;
750         }
751 
752         /* detect transition latency */
753         policy->cpuinfo.transition_latency = 0;
754         for (i = 0; i < perf->state_count; i++) {
755                 if ((perf->states[i].transition_latency * 1000) >
756                     policy->cpuinfo.transition_latency)
757                         policy->cpuinfo.transition_latency =
758                             perf->states[i].transition_latency * 1000;
759         }
760 
761         /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */
762         if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE &&
763             policy->cpuinfo.transition_latency > 20 * 1000) {
764                 policy->cpuinfo.transition_latency = 20 * 1000;
765                 printk_once(KERN_INFO
766                             "P-state transition latency capped at 20 uS\n");
767         }
768 
769         /* table init */
770         for (i = 0; i < perf->state_count; i++) {
771                 if (i > 0 && perf->states[i].core_frequency >=
772                     data->freq_table[valid_states-1].frequency / 1000)
773                         continue;
774 
775                 data->freq_table[valid_states].driver_data = i;
776                 data->freq_table[valid_states].frequency =
777                     perf->states[i].core_frequency * 1000;
778                 valid_states++;
779         }
780         data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
781         perf->state = 0;
782 
783         result = cpufreq_table_validate_and_show(policy, data->freq_table);
784         if (result)
785                 goto err_freqfree;
786 
787         if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq)
788                 printk(KERN_WARNING FW_WARN "P-state 0 is not max freq\n");
789 
790         switch (perf->control_register.space_id) {
791         case ACPI_ADR_SPACE_SYSTEM_IO:
792                 /*
793                  * The core will not set policy->cur, because
794                  * cpufreq_driver->get is NULL, so we need to set it here.
795                  * However, we have to guess it, because the current speed is
796                  * unknown and not detectable via IO ports.
797                  */
798                 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
799                 break;
800         case ACPI_ADR_SPACE_FIXED_HARDWARE:
801                 acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
802                 break;
803         default:
804                 break;
805         }
806 
807         /* notify BIOS that we exist */
808         acpi_processor_notify_smm(THIS_MODULE);
809 
810         pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
811         for (i = 0; i < perf->state_count; i++)
812                 pr_debug("     %cP%d: %d MHz, %d mW, %d uS\n",
813                         (i == perf->state ? '*' : ' '), i,
814                         (u32) perf->states[i].core_frequency,
815                         (u32) perf->states[i].power,
816                         (u32) perf->states[i].transition_latency);
817 
818         /*
819          * the first call to ->target() should result in us actually
820          * writing something to the appropriate registers.
821          */
822         data->resume = 1;
823 
824         return result;
825 
826 err_freqfree:
827         kfree(data->freq_table);
828 err_unreg:
829         acpi_processor_unregister_performance(cpu);
830 err_free_mask:
831         free_cpumask_var(data->freqdomain_cpus);
832 err_free:
833         kfree(data);
834         policy->driver_data = NULL;
835 
836         return result;
837 }
838 
839 static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
840 {
841         struct acpi_cpufreq_data *data = policy->driver_data;
842 
843         pr_debug("acpi_cpufreq_cpu_exit\n");
844 
845         if (data) {
846                 policy->driver_data = NULL;
847                 acpi_processor_unregister_performance(data->acpi_perf_cpu);
848                 free_cpumask_var(data->freqdomain_cpus);
849                 kfree(data->freq_table);
850                 kfree(data);
851         }
852 
853         return 0;
854 }
855 
856 static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
857 {
858         struct acpi_cpufreq_data *data = policy->driver_data;
859 
860         pr_debug("acpi_cpufreq_resume\n");
861 
862         data->resume = 1;
863 
864         return 0;
865 }
866 
867 static struct freq_attr *acpi_cpufreq_attr[] = {
868         &cpufreq_freq_attr_scaling_available_freqs,
869         &freqdomain_cpus,
870 #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
871         &cpb,
872 #endif
873         NULL,
874 };
875 
876 static struct cpufreq_driver acpi_cpufreq_driver = {
877         .verify         = cpufreq_generic_frequency_table_verify,
878         .target_index   = acpi_cpufreq_target,
879         .bios_limit     = acpi_processor_get_bios_limit,
880         .init           = acpi_cpufreq_cpu_init,
881         .exit           = acpi_cpufreq_cpu_exit,
882         .resume         = acpi_cpufreq_resume,
883         .name           = "acpi-cpufreq",
884         .attr           = acpi_cpufreq_attr,
885 };
886 
887 static void __init acpi_cpufreq_boost_init(void)
888 {
889         if (boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA)) {
890                 msrs = msrs_alloc();
891 
892                 if (!msrs)
893                         return;
894 
895                 acpi_cpufreq_driver.set_boost = set_boost;
896                 acpi_cpufreq_driver.boost_enabled = boost_state(0);
897 
898                 cpu_notifier_register_begin();
899 
900                 /* Force all MSRs to the same value */
901                 boost_set_msrs(acpi_cpufreq_driver.boost_enabled,
902                                cpu_online_mask);
903 
904                 __register_cpu_notifier(&boost_nb);
905 
906                 cpu_notifier_register_done();
907         }
908 }
909 
910 static void acpi_cpufreq_boost_exit(void)
911 {
912         if (msrs) {
913                 unregister_cpu_notifier(&boost_nb);
914 
915                 msrs_free(msrs);
916                 msrs = NULL;
917         }
918 }
919 
920 static int __init acpi_cpufreq_init(void)
921 {
922         int ret;
923 
924         if (acpi_disabled)
925                 return -ENODEV;
926 
927         /* don't keep reloading if cpufreq_driver exists */
928         if (cpufreq_get_current_driver())
929                 return -EEXIST;
930 
931         pr_debug("acpi_cpufreq_init\n");
932 
933         ret = acpi_cpufreq_early_init();
934         if (ret)
935                 return ret;
936 
937 #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
938         /* this is a sysfs file with a strange name and an even stranger
939          * semantic - per CPU instantiation, but system global effect.
940          * Lets enable it only on AMD CPUs for compatibility reasons and
941          * only if configured. This is considered legacy code, which
942          * will probably be removed at some point in the future.
943          */
944         if (!check_amd_hwpstate_cpu(0)) {
945                 struct freq_attr **attr;
946 
947                 pr_debug("CPB unsupported, do not expose it\n");
948 
949                 for (attr = acpi_cpufreq_attr; *attr; attr++)
950                         if (*attr == &cpb) {
951                                 *attr = NULL;
952                                 break;
953                         }
954         }
955 #endif
956         acpi_cpufreq_boost_init();
957 
958         ret = cpufreq_register_driver(&acpi_cpufreq_driver);
959         if (ret) {
960                 free_acpi_perf_data();
961                 acpi_cpufreq_boost_exit();
962         }
963         return ret;
964 }
965 
966 static void __exit acpi_cpufreq_exit(void)
967 {
968         pr_debug("acpi_cpufreq_exit\n");
969 
970         acpi_cpufreq_boost_exit();
971 
972         cpufreq_unregister_driver(&acpi_cpufreq_driver);
973 
974         free_acpi_perf_data();
975 }
976 
977 module_param(acpi_pstate_strict, uint, 0644);
978 MODULE_PARM_DESC(acpi_pstate_strict,
979         "value 0 or non-zero. non-zero -> strict ACPI checks are "
980         "performed during frequency changes.");
981 
982 late_initcall(acpi_cpufreq_init);
983 module_exit(acpi_cpufreq_exit);
984 
985 static const struct x86_cpu_id acpi_cpufreq_ids[] = {
986         X86_FEATURE_MATCH(X86_FEATURE_ACPI),
987         X86_FEATURE_MATCH(X86_FEATURE_HW_PSTATE),
988         {}
989 };
990 MODULE_DEVICE_TABLE(x86cpu, acpi_cpufreq_ids);
991 
992 static const struct acpi_device_id processor_device_ids[] = {
993         {ACPI_PROCESSOR_OBJECT_HID, },
994         {ACPI_PROCESSOR_DEVICE_HID, },
995         {},
996 };
997 MODULE_DEVICE_TABLE(acpi, processor_device_ids);
998 
999 MODULE_ALIAS("acpi");
1000 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us