Version:  2.0.40 2.2.26 2.4.37 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4 4.5 4.6 4.7 4.8 4.9 4.10

Linux/tools/perf/util/header.c

  1 #include "util.h"
  2 #include <sys/types.h>
  3 #include <byteswap.h>
  4 #include <unistd.h>
  5 #include <stdio.h>
  6 #include <stdlib.h>
  7 #include <linux/list.h>
  8 #include <linux/kernel.h>
  9 #include <linux/bitops.h>
 10 #include <sys/utsname.h>
 11 
 12 #include "evlist.h"
 13 #include "evsel.h"
 14 #include "header.h"
 15 #include "../perf.h"
 16 #include "trace-event.h"
 17 #include "session.h"
 18 #include "symbol.h"
 19 #include "debug.h"
 20 #include "cpumap.h"
 21 #include "pmu.h"
 22 #include "vdso.h"
 23 #include "strbuf.h"
 24 #include "build-id.h"
 25 #include "data.h"
 26 #include <api/fs/fs.h>
 27 #include "asm/bug.h"
 28 
 29 /*
 30  * magic2 = "PERFILE2"
 31  * must be a numerical value to let the endianness
 32  * determine the memory layout. That way we are able
 33  * to detect endianness when reading the perf.data file
 34  * back.
 35  *
 36  * we check for legacy (PERFFILE) format.
 37  */
 38 static const char *__perf_magic1 = "PERFFILE";
 39 static const u64 __perf_magic2    = 0x32454c4946524550ULL;
 40 static const u64 __perf_magic2_sw = 0x50455246494c4532ULL;
 41 
 42 #define PERF_MAGIC      __perf_magic2
 43 
 44 struct perf_file_attr {
 45         struct perf_event_attr  attr;
 46         struct perf_file_section        ids;
 47 };
 48 
 49 void perf_header__set_feat(struct perf_header *header, int feat)
 50 {
 51         set_bit(feat, header->adds_features);
 52 }
 53 
 54 void perf_header__clear_feat(struct perf_header *header, int feat)
 55 {
 56         clear_bit(feat, header->adds_features);
 57 }
 58 
 59 bool perf_header__has_feat(const struct perf_header *header, int feat)
 60 {
 61         return test_bit(feat, header->adds_features);
 62 }
 63 
 64 static int do_write(int fd, const void *buf, size_t size)
 65 {
 66         while (size) {
 67                 int ret = write(fd, buf, size);
 68 
 69                 if (ret < 0)
 70                         return -errno;
 71 
 72                 size -= ret;
 73                 buf += ret;
 74         }
 75 
 76         return 0;
 77 }
 78 
 79 int write_padded(int fd, const void *bf, size_t count, size_t count_aligned)
 80 {
 81         static const char zero_buf[NAME_ALIGN];
 82         int err = do_write(fd, bf, count);
 83 
 84         if (!err)
 85                 err = do_write(fd, zero_buf, count_aligned - count);
 86 
 87         return err;
 88 }
 89 
 90 #define string_size(str)                                                \
 91         (PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32))
 92 
 93 static int do_write_string(int fd, const char *str)
 94 {
 95         u32 len, olen;
 96         int ret;
 97 
 98         olen = strlen(str) + 1;
 99         len = PERF_ALIGN(olen, NAME_ALIGN);
100 
101         /* write len, incl. \0 */
102         ret = do_write(fd, &len, sizeof(len));
103         if (ret < 0)
104                 return ret;
105 
106         return write_padded(fd, str, olen, len);
107 }
108 
109 static char *do_read_string(int fd, struct perf_header *ph)
110 {
111         ssize_t sz, ret;
112         u32 len;
113         char *buf;
114 
115         sz = readn(fd, &len, sizeof(len));
116         if (sz < (ssize_t)sizeof(len))
117                 return NULL;
118 
119         if (ph->needs_swap)
120                 len = bswap_32(len);
121 
122         buf = malloc(len);
123         if (!buf)
124                 return NULL;
125 
126         ret = readn(fd, buf, len);
127         if (ret == (ssize_t)len) {
128                 /*
129                  * strings are padded by zeroes
130                  * thus the actual strlen of buf
131                  * may be less than len
132                  */
133                 return buf;
134         }
135 
136         free(buf);
137         return NULL;
138 }
139 
140 static int write_tracing_data(int fd, struct perf_header *h __maybe_unused,
141                             struct perf_evlist *evlist)
142 {
143         return read_tracing_data(fd, &evlist->entries);
144 }
145 
146 
147 static int write_build_id(int fd, struct perf_header *h,
148                           struct perf_evlist *evlist __maybe_unused)
149 {
150         struct perf_session *session;
151         int err;
152 
153         session = container_of(h, struct perf_session, header);
154 
155         if (!perf_session__read_build_ids(session, true))
156                 return -1;
157 
158         err = perf_session__write_buildid_table(session, fd);
159         if (err < 0) {
160                 pr_debug("failed to write buildid table\n");
161                 return err;
162         }
163         perf_session__cache_build_ids(session);
164 
165         return 0;
166 }
167 
168 static int write_hostname(int fd, struct perf_header *h __maybe_unused,
169                           struct perf_evlist *evlist __maybe_unused)
170 {
171         struct utsname uts;
172         int ret;
173 
174         ret = uname(&uts);
175         if (ret < 0)
176                 return -1;
177 
178         return do_write_string(fd, uts.nodename);
179 }
180 
181 static int write_osrelease(int fd, struct perf_header *h __maybe_unused,
182                            struct perf_evlist *evlist __maybe_unused)
183 {
184         struct utsname uts;
185         int ret;
186 
187         ret = uname(&uts);
188         if (ret < 0)
189                 return -1;
190 
191         return do_write_string(fd, uts.release);
192 }
193 
194 static int write_arch(int fd, struct perf_header *h __maybe_unused,
195                       struct perf_evlist *evlist __maybe_unused)
196 {
197         struct utsname uts;
198         int ret;
199 
200         ret = uname(&uts);
201         if (ret < 0)
202                 return -1;
203 
204         return do_write_string(fd, uts.machine);
205 }
206 
207 static int write_version(int fd, struct perf_header *h __maybe_unused,
208                          struct perf_evlist *evlist __maybe_unused)
209 {
210         return do_write_string(fd, perf_version_string);
211 }
212 
213 static int __write_cpudesc(int fd, const char *cpuinfo_proc)
214 {
215         FILE *file;
216         char *buf = NULL;
217         char *s, *p;
218         const char *search = cpuinfo_proc;
219         size_t len = 0;
220         int ret = -1;
221 
222         if (!search)
223                 return -1;
224 
225         file = fopen("/proc/cpuinfo", "r");
226         if (!file)
227                 return -1;
228 
229         while (getline(&buf, &len, file) > 0) {
230                 ret = strncmp(buf, search, strlen(search));
231                 if (!ret)
232                         break;
233         }
234 
235         if (ret) {
236                 ret = -1;
237                 goto done;
238         }
239 
240         s = buf;
241 
242         p = strchr(buf, ':');
243         if (p && *(p+1) == ' ' && *(p+2))
244                 s = p + 2;
245         p = strchr(s, '\n');
246         if (p)
247                 *p = '\0';
248 
249         /* squash extra space characters (branding string) */
250         p = s;
251         while (*p) {
252                 if (isspace(*p)) {
253                         char *r = p + 1;
254                         char *q = r;
255                         *p = ' ';
256                         while (*q && isspace(*q))
257                                 q++;
258                         if (q != (p+1))
259                                 while ((*r++ = *q++));
260                 }
261                 p++;
262         }
263         ret = do_write_string(fd, s);
264 done:
265         free(buf);
266         fclose(file);
267         return ret;
268 }
269 
270 static int write_cpudesc(int fd, struct perf_header *h __maybe_unused,
271                        struct perf_evlist *evlist __maybe_unused)
272 {
273 #ifndef CPUINFO_PROC
274 #define CPUINFO_PROC {"model name", }
275 #endif
276         const char *cpuinfo_procs[] = CPUINFO_PROC;
277         unsigned int i;
278 
279         for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) {
280                 int ret;
281                 ret = __write_cpudesc(fd, cpuinfo_procs[i]);
282                 if (ret >= 0)
283                         return ret;
284         }
285         return -1;
286 }
287 
288 
289 static int write_nrcpus(int fd, struct perf_header *h __maybe_unused,
290                         struct perf_evlist *evlist __maybe_unused)
291 {
292         long nr;
293         u32 nrc, nra;
294         int ret;
295 
296         nr = sysconf(_SC_NPROCESSORS_CONF);
297         if (nr < 0)
298                 return -1;
299 
300         nrc = (u32)(nr & UINT_MAX);
301 
302         nr = sysconf(_SC_NPROCESSORS_ONLN);
303         if (nr < 0)
304                 return -1;
305 
306         nra = (u32)(nr & UINT_MAX);
307 
308         ret = do_write(fd, &nrc, sizeof(nrc));
309         if (ret < 0)
310                 return ret;
311 
312         return do_write(fd, &nra, sizeof(nra));
313 }
314 
315 static int write_event_desc(int fd, struct perf_header *h __maybe_unused,
316                             struct perf_evlist *evlist)
317 {
318         struct perf_evsel *evsel;
319         u32 nre, nri, sz;
320         int ret;
321 
322         nre = evlist->nr_entries;
323 
324         /*
325          * write number of events
326          */
327         ret = do_write(fd, &nre, sizeof(nre));
328         if (ret < 0)
329                 return ret;
330 
331         /*
332          * size of perf_event_attr struct
333          */
334         sz = (u32)sizeof(evsel->attr);
335         ret = do_write(fd, &sz, sizeof(sz));
336         if (ret < 0)
337                 return ret;
338 
339         evlist__for_each_entry(evlist, evsel) {
340                 ret = do_write(fd, &evsel->attr, sz);
341                 if (ret < 0)
342                         return ret;
343                 /*
344                  * write number of unique id per event
345                  * there is one id per instance of an event
346                  *
347                  * copy into an nri to be independent of the
348                  * type of ids,
349                  */
350                 nri = evsel->ids;
351                 ret = do_write(fd, &nri, sizeof(nri));
352                 if (ret < 0)
353                         return ret;
354 
355                 /*
356                  * write event string as passed on cmdline
357                  */
358                 ret = do_write_string(fd, perf_evsel__name(evsel));
359                 if (ret < 0)
360                         return ret;
361                 /*
362                  * write unique ids for this event
363                  */
364                 ret = do_write(fd, evsel->id, evsel->ids * sizeof(u64));
365                 if (ret < 0)
366                         return ret;
367         }
368         return 0;
369 }
370 
371 static int write_cmdline(int fd, struct perf_header *h __maybe_unused,
372                          struct perf_evlist *evlist __maybe_unused)
373 {
374         char buf[MAXPATHLEN];
375         char proc[32];
376         u32 n;
377         int i, ret;
378 
379         /*
380          * actual atual path to perf binary
381          */
382         sprintf(proc, "/proc/%d/exe", getpid());
383         ret = readlink(proc, buf, sizeof(buf));
384         if (ret <= 0)
385                 return -1;
386 
387         /* readlink() does not add null termination */
388         buf[ret] = '\0';
389 
390         /* account for binary path */
391         n = perf_env.nr_cmdline + 1;
392 
393         ret = do_write(fd, &n, sizeof(n));
394         if (ret < 0)
395                 return ret;
396 
397         ret = do_write_string(fd, buf);
398         if (ret < 0)
399                 return ret;
400 
401         for (i = 0 ; i < perf_env.nr_cmdline; i++) {
402                 ret = do_write_string(fd, perf_env.cmdline_argv[i]);
403                 if (ret < 0)
404                         return ret;
405         }
406         return 0;
407 }
408 
409 #define CORE_SIB_FMT \
410         "/sys/devices/system/cpu/cpu%d/topology/core_siblings_list"
411 #define THRD_SIB_FMT \
412         "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list"
413 
414 struct cpu_topo {
415         u32 cpu_nr;
416         u32 core_sib;
417         u32 thread_sib;
418         char **core_siblings;
419         char **thread_siblings;
420 };
421 
422 static int build_cpu_topo(struct cpu_topo *tp, int cpu)
423 {
424         FILE *fp;
425         char filename[MAXPATHLEN];
426         char *buf = NULL, *p;
427         size_t len = 0;
428         ssize_t sret;
429         u32 i = 0;
430         int ret = -1;
431 
432         sprintf(filename, CORE_SIB_FMT, cpu);
433         fp = fopen(filename, "r");
434         if (!fp)
435                 goto try_threads;
436 
437         sret = getline(&buf, &len, fp);
438         fclose(fp);
439         if (sret <= 0)
440                 goto try_threads;
441 
442         p = strchr(buf, '\n');
443         if (p)
444                 *p = '\0';
445 
446         for (i = 0; i < tp->core_sib; i++) {
447                 if (!strcmp(buf, tp->core_siblings[i]))
448                         break;
449         }
450         if (i == tp->core_sib) {
451                 tp->core_siblings[i] = buf;
452                 tp->core_sib++;
453                 buf = NULL;
454                 len = 0;
455         }
456         ret = 0;
457 
458 try_threads:
459         sprintf(filename, THRD_SIB_FMT, cpu);
460         fp = fopen(filename, "r");
461         if (!fp)
462                 goto done;
463 
464         if (getline(&buf, &len, fp) <= 0)
465                 goto done;
466 
467         p = strchr(buf, '\n');
468         if (p)
469                 *p = '\0';
470 
471         for (i = 0; i < tp->thread_sib; i++) {
472                 if (!strcmp(buf, tp->thread_siblings[i]))
473                         break;
474         }
475         if (i == tp->thread_sib) {
476                 tp->thread_siblings[i] = buf;
477                 tp->thread_sib++;
478                 buf = NULL;
479         }
480         ret = 0;
481 done:
482         if(fp)
483                 fclose(fp);
484         free(buf);
485         return ret;
486 }
487 
488 static void free_cpu_topo(struct cpu_topo *tp)
489 {
490         u32 i;
491 
492         if (!tp)
493                 return;
494 
495         for (i = 0 ; i < tp->core_sib; i++)
496                 zfree(&tp->core_siblings[i]);
497 
498         for (i = 0 ; i < tp->thread_sib; i++)
499                 zfree(&tp->thread_siblings[i]);
500 
501         free(tp);
502 }
503 
504 static struct cpu_topo *build_cpu_topology(void)
505 {
506         struct cpu_topo *tp;
507         void *addr;
508         u32 nr, i;
509         size_t sz;
510         long ncpus;
511         int ret = -1;
512 
513         ncpus = sysconf(_SC_NPROCESSORS_CONF);
514         if (ncpus < 0)
515                 return NULL;
516 
517         nr = (u32)(ncpus & UINT_MAX);
518 
519         sz = nr * sizeof(char *);
520 
521         addr = calloc(1, sizeof(*tp) + 2 * sz);
522         if (!addr)
523                 return NULL;
524 
525         tp = addr;
526         tp->cpu_nr = nr;
527         addr += sizeof(*tp);
528         tp->core_siblings = addr;
529         addr += sz;
530         tp->thread_siblings = addr;
531 
532         for (i = 0; i < nr; i++) {
533                 ret = build_cpu_topo(tp, i);
534                 if (ret < 0)
535                         break;
536         }
537         if (ret) {
538                 free_cpu_topo(tp);
539                 tp = NULL;
540         }
541         return tp;
542 }
543 
544 static int write_cpu_topology(int fd, struct perf_header *h __maybe_unused,
545                           struct perf_evlist *evlist __maybe_unused)
546 {
547         struct cpu_topo *tp;
548         u32 i;
549         int ret, j;
550 
551         tp = build_cpu_topology();
552         if (!tp)
553                 return -1;
554 
555         ret = do_write(fd, &tp->core_sib, sizeof(tp->core_sib));
556         if (ret < 0)
557                 goto done;
558 
559         for (i = 0; i < tp->core_sib; i++) {
560                 ret = do_write_string(fd, tp->core_siblings[i]);
561                 if (ret < 0)
562                         goto done;
563         }
564         ret = do_write(fd, &tp->thread_sib, sizeof(tp->thread_sib));
565         if (ret < 0)
566                 goto done;
567 
568         for (i = 0; i < tp->thread_sib; i++) {
569                 ret = do_write_string(fd, tp->thread_siblings[i]);
570                 if (ret < 0)
571                         break;
572         }
573 
574         ret = perf_env__read_cpu_topology_map(&perf_env);
575         if (ret < 0)
576                 goto done;
577 
578         for (j = 0; j < perf_env.nr_cpus_avail; j++) {
579                 ret = do_write(fd, &perf_env.cpu[j].core_id,
580                                sizeof(perf_env.cpu[j].core_id));
581                 if (ret < 0)
582                         return ret;
583                 ret = do_write(fd, &perf_env.cpu[j].socket_id,
584                                sizeof(perf_env.cpu[j].socket_id));
585                 if (ret < 0)
586                         return ret;
587         }
588 done:
589         free_cpu_topo(tp);
590         return ret;
591 }
592 
593 
594 
595 static int write_total_mem(int fd, struct perf_header *h __maybe_unused,
596                           struct perf_evlist *evlist __maybe_unused)
597 {
598         char *buf = NULL;
599         FILE *fp;
600         size_t len = 0;
601         int ret = -1, n;
602         uint64_t mem;
603 
604         fp = fopen("/proc/meminfo", "r");
605         if (!fp)
606                 return -1;
607 
608         while (getline(&buf, &len, fp) > 0) {
609                 ret = strncmp(buf, "MemTotal:", 9);
610                 if (!ret)
611                         break;
612         }
613         if (!ret) {
614                 n = sscanf(buf, "%*s %"PRIu64, &mem);
615                 if (n == 1)
616                         ret = do_write(fd, &mem, sizeof(mem));
617         } else
618                 ret = -1;
619         free(buf);
620         fclose(fp);
621         return ret;
622 }
623 
624 static int write_topo_node(int fd, int node)
625 {
626         char str[MAXPATHLEN];
627         char field[32];
628         char *buf = NULL, *p;
629         size_t len = 0;
630         FILE *fp;
631         u64 mem_total, mem_free, mem;
632         int ret = -1;
633 
634         sprintf(str, "/sys/devices/system/node/node%d/meminfo", node);
635         fp = fopen(str, "r");
636         if (!fp)
637                 return -1;
638 
639         while (getline(&buf, &len, fp) > 0) {
640                 /* skip over invalid lines */
641                 if (!strchr(buf, ':'))
642                         continue;
643                 if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2)
644                         goto done;
645                 if (!strcmp(field, "MemTotal:"))
646                         mem_total = mem;
647                 if (!strcmp(field, "MemFree:"))
648                         mem_free = mem;
649         }
650 
651         fclose(fp);
652         fp = NULL;
653 
654         ret = do_write(fd, &mem_total, sizeof(u64));
655         if (ret)
656                 goto done;
657 
658         ret = do_write(fd, &mem_free, sizeof(u64));
659         if (ret)
660                 goto done;
661 
662         ret = -1;
663         sprintf(str, "/sys/devices/system/node/node%d/cpulist", node);
664 
665         fp = fopen(str, "r");
666         if (!fp)
667                 goto done;
668 
669         if (getline(&buf, &len, fp) <= 0)
670                 goto done;
671 
672         p = strchr(buf, '\n');
673         if (p)
674                 *p = '\0';
675 
676         ret = do_write_string(fd, buf);
677 done:
678         free(buf);
679         if (fp)
680                 fclose(fp);
681         return ret;
682 }
683 
684 static int write_numa_topology(int fd, struct perf_header *h __maybe_unused,
685                           struct perf_evlist *evlist __maybe_unused)
686 {
687         char *buf = NULL;
688         size_t len = 0;
689         FILE *fp;
690         struct cpu_map *node_map = NULL;
691         char *c;
692         u32 nr, i, j;
693         int ret = -1;
694 
695         fp = fopen("/sys/devices/system/node/online", "r");
696         if (!fp)
697                 return -1;
698 
699         if (getline(&buf, &len, fp) <= 0)
700                 goto done;
701 
702         c = strchr(buf, '\n');
703         if (c)
704                 *c = '\0';
705 
706         node_map = cpu_map__new(buf);
707         if (!node_map)
708                 goto done;
709 
710         nr = (u32)node_map->nr;
711 
712         ret = do_write(fd, &nr, sizeof(nr));
713         if (ret < 0)
714                 goto done;
715 
716         for (i = 0; i < nr; i++) {
717                 j = (u32)node_map->map[i];
718                 ret = do_write(fd, &j, sizeof(j));
719                 if (ret < 0)
720                         break;
721 
722                 ret = write_topo_node(fd, i);
723                 if (ret < 0)
724                         break;
725         }
726 done:
727         free(buf);
728         fclose(fp);
729         cpu_map__put(node_map);
730         return ret;
731 }
732 
733 /*
734  * File format:
735  *
736  * struct pmu_mappings {
737  *      u32     pmu_num;
738  *      struct pmu_map {
739  *              u32     type;
740  *              char    name[];
741  *      }[pmu_num];
742  * };
743  */
744 
745 static int write_pmu_mappings(int fd, struct perf_header *h __maybe_unused,
746                               struct perf_evlist *evlist __maybe_unused)
747 {
748         struct perf_pmu *pmu = NULL;
749         off_t offset = lseek(fd, 0, SEEK_CUR);
750         __u32 pmu_num = 0;
751         int ret;
752 
753         /* write real pmu_num later */
754         ret = do_write(fd, &pmu_num, sizeof(pmu_num));
755         if (ret < 0)
756                 return ret;
757 
758         while ((pmu = perf_pmu__scan(pmu))) {
759                 if (!pmu->name)
760                         continue;
761                 pmu_num++;
762 
763                 ret = do_write(fd, &pmu->type, sizeof(pmu->type));
764                 if (ret < 0)
765                         return ret;
766 
767                 ret = do_write_string(fd, pmu->name);
768                 if (ret < 0)
769                         return ret;
770         }
771 
772         if (pwrite(fd, &pmu_num, sizeof(pmu_num), offset) != sizeof(pmu_num)) {
773                 /* discard all */
774                 lseek(fd, offset, SEEK_SET);
775                 return -1;
776         }
777 
778         return 0;
779 }
780 
781 /*
782  * File format:
783  *
784  * struct group_descs {
785  *      u32     nr_groups;
786  *      struct group_desc {
787  *              char    name[];
788  *              u32     leader_idx;
789  *              u32     nr_members;
790  *      }[nr_groups];
791  * };
792  */
793 static int write_group_desc(int fd, struct perf_header *h __maybe_unused,
794                             struct perf_evlist *evlist)
795 {
796         u32 nr_groups = evlist->nr_groups;
797         struct perf_evsel *evsel;
798         int ret;
799 
800         ret = do_write(fd, &nr_groups, sizeof(nr_groups));
801         if (ret < 0)
802                 return ret;
803 
804         evlist__for_each_entry(evlist, evsel) {
805                 if (perf_evsel__is_group_leader(evsel) &&
806                     evsel->nr_members > 1) {
807                         const char *name = evsel->group_name ?: "{anon_group}";
808                         u32 leader_idx = evsel->idx;
809                         u32 nr_members = evsel->nr_members;
810 
811                         ret = do_write_string(fd, name);
812                         if (ret < 0)
813                                 return ret;
814 
815                         ret = do_write(fd, &leader_idx, sizeof(leader_idx));
816                         if (ret < 0)
817                                 return ret;
818 
819                         ret = do_write(fd, &nr_members, sizeof(nr_members));
820                         if (ret < 0)
821                                 return ret;
822                 }
823         }
824         return 0;
825 }
826 
827 /*
828  * default get_cpuid(): nothing gets recorded
829  * actual implementation must be in arch/$(ARCH)/util/header.c
830  */
831 int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused)
832 {
833         return -1;
834 }
835 
836 static int write_cpuid(int fd, struct perf_header *h __maybe_unused,
837                        struct perf_evlist *evlist __maybe_unused)
838 {
839         char buffer[64];
840         int ret;
841 
842         ret = get_cpuid(buffer, sizeof(buffer));
843         if (!ret)
844                 goto write_it;
845 
846         return -1;
847 write_it:
848         return do_write_string(fd, buffer);
849 }
850 
851 static int write_branch_stack(int fd __maybe_unused,
852                               struct perf_header *h __maybe_unused,
853                        struct perf_evlist *evlist __maybe_unused)
854 {
855         return 0;
856 }
857 
858 static int write_auxtrace(int fd, struct perf_header *h,
859                           struct perf_evlist *evlist __maybe_unused)
860 {
861         struct perf_session *session;
862         int err;
863 
864         session = container_of(h, struct perf_session, header);
865 
866         err = auxtrace_index__write(fd, &session->auxtrace_index);
867         if (err < 0)
868                 pr_err("Failed to write auxtrace index\n");
869         return err;
870 }
871 
872 static int cpu_cache_level__sort(const void *a, const void *b)
873 {
874         struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a;
875         struct cpu_cache_level *cache_b = (struct cpu_cache_level *)b;
876 
877         return cache_a->level - cache_b->level;
878 }
879 
880 static bool cpu_cache_level__cmp(struct cpu_cache_level *a, struct cpu_cache_level *b)
881 {
882         if (a->level != b->level)
883                 return false;
884 
885         if (a->line_size != b->line_size)
886                 return false;
887 
888         if (a->sets != b->sets)
889                 return false;
890 
891         if (a->ways != b->ways)
892                 return false;
893 
894         if (strcmp(a->type, b->type))
895                 return false;
896 
897         if (strcmp(a->size, b->size))
898                 return false;
899 
900         if (strcmp(a->map, b->map))
901                 return false;
902 
903         return true;
904 }
905 
906 static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 level)
907 {
908         char path[PATH_MAX], file[PATH_MAX];
909         struct stat st;
910         size_t len;
911 
912         scnprintf(path, PATH_MAX, "devices/system/cpu/cpu%d/cache/index%d/", cpu, level);
913         scnprintf(file, PATH_MAX, "%s/%s", sysfs__mountpoint(), path);
914 
915         if (stat(file, &st))
916                 return 1;
917 
918         scnprintf(file, PATH_MAX, "%s/level", path);
919         if (sysfs__read_int(file, (int *) &cache->level))
920                 return -1;
921 
922         scnprintf(file, PATH_MAX, "%s/coherency_line_size", path);
923         if (sysfs__read_int(file, (int *) &cache->line_size))
924                 return -1;
925 
926         scnprintf(file, PATH_MAX, "%s/number_of_sets", path);
927         if (sysfs__read_int(file, (int *) &cache->sets))
928                 return -1;
929 
930         scnprintf(file, PATH_MAX, "%s/ways_of_associativity", path);
931         if (sysfs__read_int(file, (int *) &cache->ways))
932                 return -1;
933 
934         scnprintf(file, PATH_MAX, "%s/type", path);
935         if (sysfs__read_str(file, &cache->type, &len))
936                 return -1;
937 
938         cache->type[len] = 0;
939         cache->type = rtrim(cache->type);
940 
941         scnprintf(file, PATH_MAX, "%s/size", path);
942         if (sysfs__read_str(file, &cache->size, &len)) {
943                 free(cache->type);
944                 return -1;
945         }
946 
947         cache->size[len] = 0;
948         cache->size = rtrim(cache->size);
949 
950         scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path);
951         if (sysfs__read_str(file, &cache->map, &len)) {
952                 free(cache->map);
953                 free(cache->type);
954                 return -1;
955         }
956 
957         cache->map[len] = 0;
958         cache->map = rtrim(cache->map);
959         return 0;
960 }
961 
962 static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c)
963 {
964         fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map);
965 }
966 
967 static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp)
968 {
969         u32 i, cnt = 0;
970         long ncpus;
971         u32 nr, cpu;
972         u16 level;
973 
974         ncpus = sysconf(_SC_NPROCESSORS_CONF);
975         if (ncpus < 0)
976                 return -1;
977 
978         nr = (u32)(ncpus & UINT_MAX);
979 
980         for (cpu = 0; cpu < nr; cpu++) {
981                 for (level = 0; level < 10; level++) {
982                         struct cpu_cache_level c;
983                         int err;
984 
985                         err = cpu_cache_level__read(&c, cpu, level);
986                         if (err < 0)
987                                 return err;
988 
989                         if (err == 1)
990                                 break;
991 
992                         for (i = 0; i < cnt; i++) {
993                                 if (cpu_cache_level__cmp(&c, &caches[i]))
994                                         break;
995                         }
996 
997                         if (i == cnt)
998                                 caches[cnt++] = c;
999                         else
1000                                 cpu_cache_level__free(&c);
1001 
1002                         if (WARN_ONCE(cnt == size, "way too many cpu caches.."))
1003                                 goto out;
1004                 }
1005         }
1006  out:
1007         *cntp = cnt;
1008         return 0;
1009 }
1010 
1011 #define MAX_CACHES 2000
1012 
1013 static int write_cache(int fd, struct perf_header *h __maybe_unused,
1014                           struct perf_evlist *evlist __maybe_unused)
1015 {
1016         struct cpu_cache_level caches[MAX_CACHES];
1017         u32 cnt = 0, i, version = 1;
1018         int ret;
1019 
1020         ret = build_caches(caches, MAX_CACHES, &cnt);
1021         if (ret)
1022                 goto out;
1023 
1024         qsort(&caches, cnt, sizeof(struct cpu_cache_level), cpu_cache_level__sort);
1025 
1026         ret = do_write(fd, &version, sizeof(u32));
1027         if (ret < 0)
1028                 goto out;
1029 
1030         ret = do_write(fd, &cnt, sizeof(u32));
1031         if (ret < 0)
1032                 goto out;
1033 
1034         for (i = 0; i < cnt; i++) {
1035                 struct cpu_cache_level *c = &caches[i];
1036 
1037                 #define _W(v)                                   \
1038                         ret = do_write(fd, &c->v, sizeof(u32)); \
1039                         if (ret < 0)                            \
1040                                 goto out;
1041 
1042                 _W(level)
1043                 _W(line_size)
1044                 _W(sets)
1045                 _W(ways)
1046                 #undef _W
1047 
1048                 #define _W(v)                                           \
1049                         ret = do_write_string(fd, (const char *) c->v); \
1050                         if (ret < 0)                                    \
1051                                 goto out;
1052 
1053                 _W(type)
1054                 _W(size)
1055                 _W(map)
1056                 #undef _W
1057         }
1058 
1059 out:
1060         for (i = 0; i < cnt; i++)
1061                 cpu_cache_level__free(&caches[i]);
1062         return ret;
1063 }
1064 
1065 static int write_stat(int fd __maybe_unused,
1066                       struct perf_header *h __maybe_unused,
1067                       struct perf_evlist *evlist __maybe_unused)
1068 {
1069         return 0;
1070 }
1071 
1072 static void print_hostname(struct perf_header *ph, int fd __maybe_unused,
1073                            FILE *fp)
1074 {
1075         fprintf(fp, "# hostname : %s\n", ph->env.hostname);
1076 }
1077 
1078 static void print_osrelease(struct perf_header *ph, int fd __maybe_unused,
1079                             FILE *fp)
1080 {
1081         fprintf(fp, "# os release : %s\n", ph->env.os_release);
1082 }
1083 
1084 static void print_arch(struct perf_header *ph, int fd __maybe_unused, FILE *fp)
1085 {
1086         fprintf(fp, "# arch : %s\n", ph->env.arch);
1087 }
1088 
1089 static void print_cpudesc(struct perf_header *ph, int fd __maybe_unused,
1090                           FILE *fp)
1091 {
1092         fprintf(fp, "# cpudesc : %s\n", ph->env.cpu_desc);
1093 }
1094 
1095 static void print_nrcpus(struct perf_header *ph, int fd __maybe_unused,
1096                          FILE *fp)
1097 {
1098         fprintf(fp, "# nrcpus online : %u\n", ph->env.nr_cpus_online);
1099         fprintf(fp, "# nrcpus avail : %u\n", ph->env.nr_cpus_avail);
1100 }
1101 
1102 static void print_version(struct perf_header *ph, int fd __maybe_unused,
1103                           FILE *fp)
1104 {
1105         fprintf(fp, "# perf version : %s\n", ph->env.version);
1106 }
1107 
1108 static void print_cmdline(struct perf_header *ph, int fd __maybe_unused,
1109                           FILE *fp)
1110 {
1111         int nr, i;
1112 
1113         nr = ph->env.nr_cmdline;
1114 
1115         fprintf(fp, "# cmdline : ");
1116 
1117         for (i = 0; i < nr; i++)
1118                 fprintf(fp, "%s ", ph->env.cmdline_argv[i]);
1119         fputc('\n', fp);
1120 }
1121 
1122 static void print_cpu_topology(struct perf_header *ph, int fd __maybe_unused,
1123                                FILE *fp)
1124 {
1125         int nr, i;
1126         char *str;
1127         int cpu_nr = ph->env.nr_cpus_online;
1128 
1129         nr = ph->env.nr_sibling_cores;
1130         str = ph->env.sibling_cores;
1131 
1132         for (i = 0; i < nr; i++) {
1133                 fprintf(fp, "# sibling cores   : %s\n", str);
1134                 str += strlen(str) + 1;
1135         }
1136 
1137         nr = ph->env.nr_sibling_threads;
1138         str = ph->env.sibling_threads;
1139 
1140         for (i = 0; i < nr; i++) {
1141                 fprintf(fp, "# sibling threads : %s\n", str);
1142                 str += strlen(str) + 1;
1143         }
1144 
1145         if (ph->env.cpu != NULL) {
1146                 for (i = 0; i < cpu_nr; i++)
1147                         fprintf(fp, "# CPU %d: Core ID %d, Socket ID %d\n", i,
1148                                 ph->env.cpu[i].core_id, ph->env.cpu[i].socket_id);
1149         } else
1150                 fprintf(fp, "# Core ID and Socket ID information is not available\n");
1151 }
1152 
1153 static void free_event_desc(struct perf_evsel *events)
1154 {
1155         struct perf_evsel *evsel;
1156 
1157         if (!events)
1158                 return;
1159 
1160         for (evsel = events; evsel->attr.size; evsel++) {
1161                 zfree(&evsel->name);
1162                 zfree(&evsel->id);
1163         }
1164 
1165         free(events);
1166 }
1167 
1168 static struct perf_evsel *
1169 read_event_desc(struct perf_header *ph, int fd)
1170 {
1171         struct perf_evsel *evsel, *events = NULL;
1172         u64 *id;
1173         void *buf = NULL;
1174         u32 nre, sz, nr, i, j;
1175         ssize_t ret;
1176         size_t msz;
1177 
1178         /* number of events */
1179         ret = readn(fd, &nre, sizeof(nre));
1180         if (ret != (ssize_t)sizeof(nre))
1181                 goto error;
1182 
1183         if (ph->needs_swap)
1184                 nre = bswap_32(nre);
1185 
1186         ret = readn(fd, &sz, sizeof(sz));
1187         if (ret != (ssize_t)sizeof(sz))
1188                 goto error;
1189 
1190         if (ph->needs_swap)
1191                 sz = bswap_32(sz);
1192 
1193         /* buffer to hold on file attr struct */
1194         buf = malloc(sz);
1195         if (!buf)
1196                 goto error;
1197 
1198         /* the last event terminates with evsel->attr.size == 0: */
1199         events = calloc(nre + 1, sizeof(*events));
1200         if (!events)
1201                 goto error;
1202 
1203         msz = sizeof(evsel->attr);
1204         if (sz < msz)
1205                 msz = sz;
1206 
1207         for (i = 0, evsel = events; i < nre; evsel++, i++) {
1208                 evsel->idx = i;
1209 
1210                 /*
1211                  * must read entire on-file attr struct to
1212                  * sync up with layout.
1213                  */
1214                 ret = readn(fd, buf, sz);
1215                 if (ret != (ssize_t)sz)
1216                         goto error;
1217 
1218                 if (ph->needs_swap)
1219                         perf_event__attr_swap(buf);
1220 
1221                 memcpy(&evsel->attr, buf, msz);
1222 
1223                 ret = readn(fd, &nr, sizeof(nr));
1224                 if (ret != (ssize_t)sizeof(nr))
1225                         goto error;
1226 
1227                 if (ph->needs_swap) {
1228                         nr = bswap_32(nr);
1229                         evsel->needs_swap = true;
1230                 }
1231 
1232                 evsel->name = do_read_string(fd, ph);
1233 
1234                 if (!nr)
1235                         continue;
1236 
1237                 id = calloc(nr, sizeof(*id));
1238                 if (!id)
1239                         goto error;
1240                 evsel->ids = nr;
1241                 evsel->id = id;
1242 
1243                 for (j = 0 ; j < nr; j++) {
1244                         ret = readn(fd, id, sizeof(*id));
1245                         if (ret != (ssize_t)sizeof(*id))
1246                                 goto error;
1247                         if (ph->needs_swap)
1248                                 *id = bswap_64(*id);
1249                         id++;
1250                 }
1251         }
1252 out:
1253         free(buf);
1254         return events;
1255 error:
1256         free_event_desc(events);
1257         events = NULL;
1258         goto out;
1259 }
1260 
1261 static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val,
1262                                 void *priv __attribute__((unused)))
1263 {
1264         return fprintf(fp, ", %s = %s", name, val);
1265 }
1266 
1267 static void print_event_desc(struct perf_header *ph, int fd, FILE *fp)
1268 {
1269         struct perf_evsel *evsel, *events = read_event_desc(ph, fd);
1270         u32 j;
1271         u64 *id;
1272 
1273         if (!events) {
1274                 fprintf(fp, "# event desc: not available or unable to read\n");
1275                 return;
1276         }
1277 
1278         for (evsel = events; evsel->attr.size; evsel++) {
1279                 fprintf(fp, "# event : name = %s, ", evsel->name);
1280 
1281                 if (evsel->ids) {
1282                         fprintf(fp, ", id = {");
1283                         for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) {
1284                                 if (j)
1285                                         fputc(',', fp);
1286                                 fprintf(fp, " %"PRIu64, *id);
1287                         }
1288                         fprintf(fp, " }");
1289                 }
1290 
1291                 perf_event_attr__fprintf(fp, &evsel->attr, __desc_attr__fprintf, NULL);
1292 
1293                 fputc('\n', fp);
1294         }
1295 
1296         free_event_desc(events);
1297 }
1298 
1299 static void print_total_mem(struct perf_header *ph, int fd __maybe_unused,
1300                             FILE *fp)
1301 {
1302         fprintf(fp, "# total memory : %Lu kB\n", ph->env.total_mem);
1303 }
1304 
1305 static void print_numa_topology(struct perf_header *ph, int fd __maybe_unused,
1306                                 FILE *fp)
1307 {
1308         int i;
1309         struct numa_node *n;
1310 
1311         for (i = 0; i < ph->env.nr_numa_nodes; i++) {
1312                 n = &ph->env.numa_nodes[i];
1313 
1314                 fprintf(fp, "# node%u meminfo  : total = %"PRIu64" kB,"
1315                             " free = %"PRIu64" kB\n",
1316                         n->node, n->mem_total, n->mem_free);
1317 
1318                 fprintf(fp, "# node%u cpu list : ", n->node);
1319                 cpu_map__fprintf(n->map, fp);
1320         }
1321 }
1322 
1323 static void print_cpuid(struct perf_header *ph, int fd __maybe_unused, FILE *fp)
1324 {
1325         fprintf(fp, "# cpuid : %s\n", ph->env.cpuid);
1326 }
1327 
1328 static void print_branch_stack(struct perf_header *ph __maybe_unused,
1329                                int fd __maybe_unused, FILE *fp)
1330 {
1331         fprintf(fp, "# contains samples with branch stack\n");
1332 }
1333 
1334 static void print_auxtrace(struct perf_header *ph __maybe_unused,
1335                            int fd __maybe_unused, FILE *fp)
1336 {
1337         fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n");
1338 }
1339 
1340 static void print_stat(struct perf_header *ph __maybe_unused,
1341                        int fd __maybe_unused, FILE *fp)
1342 {
1343         fprintf(fp, "# contains stat data\n");
1344 }
1345 
1346 static void print_cache(struct perf_header *ph __maybe_unused,
1347                         int fd __maybe_unused, FILE *fp __maybe_unused)
1348 {
1349         int i;
1350 
1351         fprintf(fp, "# CPU cache info:\n");
1352         for (i = 0; i < ph->env.caches_cnt; i++) {
1353                 fprintf(fp, "#  ");
1354                 cpu_cache_level__fprintf(fp, &ph->env.caches[i]);
1355         }
1356 }
1357 
1358 static void print_pmu_mappings(struct perf_header *ph, int fd __maybe_unused,
1359                                FILE *fp)
1360 {
1361         const char *delimiter = "# pmu mappings: ";
1362         char *str, *tmp;
1363         u32 pmu_num;
1364         u32 type;
1365 
1366         pmu_num = ph->env.nr_pmu_mappings;
1367         if (!pmu_num) {
1368                 fprintf(fp, "# pmu mappings: not available\n");
1369                 return;
1370         }
1371 
1372         str = ph->env.pmu_mappings;
1373 
1374         while (pmu_num) {
1375                 type = strtoul(str, &tmp, 0);
1376                 if (*tmp != ':')
1377                         goto error;
1378 
1379                 str = tmp + 1;
1380                 fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type);
1381 
1382                 delimiter = ", ";
1383                 str += strlen(str) + 1;
1384                 pmu_num--;
1385         }
1386 
1387         fprintf(fp, "\n");
1388 
1389         if (!pmu_num)
1390                 return;
1391 error:
1392         fprintf(fp, "# pmu mappings: unable to read\n");
1393 }
1394 
1395 static void print_group_desc(struct perf_header *ph, int fd __maybe_unused,
1396                              FILE *fp)
1397 {
1398         struct perf_session *session;
1399         struct perf_evsel *evsel;
1400         u32 nr = 0;
1401 
1402         session = container_of(ph, struct perf_session, header);
1403 
1404         evlist__for_each_entry(session->evlist, evsel) {
1405                 if (perf_evsel__is_group_leader(evsel) &&
1406                     evsel->nr_members > 1) {
1407                         fprintf(fp, "# group: %s{%s", evsel->group_name ?: "",
1408                                 perf_evsel__name(evsel));
1409 
1410                         nr = evsel->nr_members - 1;
1411                 } else if (nr) {
1412                         fprintf(fp, ",%s", perf_evsel__name(evsel));
1413 
1414                         if (--nr == 0)
1415                                 fprintf(fp, "}\n");
1416                 }
1417         }
1418 }
1419 
1420 static int __event_process_build_id(struct build_id_event *bev,
1421                                     char *filename,
1422                                     struct perf_session *session)
1423 {
1424         int err = -1;
1425         struct machine *machine;
1426         u16 cpumode;
1427         struct dso *dso;
1428         enum dso_kernel_type dso_type;
1429 
1430         machine = perf_session__findnew_machine(session, bev->pid);
1431         if (!machine)
1432                 goto out;
1433 
1434         cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1435 
1436         switch (cpumode) {
1437         case PERF_RECORD_MISC_KERNEL:
1438                 dso_type = DSO_TYPE_KERNEL;
1439                 break;
1440         case PERF_RECORD_MISC_GUEST_KERNEL:
1441                 dso_type = DSO_TYPE_GUEST_KERNEL;
1442                 break;
1443         case PERF_RECORD_MISC_USER:
1444         case PERF_RECORD_MISC_GUEST_USER:
1445                 dso_type = DSO_TYPE_USER;
1446                 break;
1447         default:
1448                 goto out;
1449         }
1450 
1451         dso = machine__findnew_dso(machine, filename);
1452         if (dso != NULL) {
1453                 char sbuild_id[SBUILD_ID_SIZE];
1454 
1455                 dso__set_build_id(dso, &bev->build_id);
1456 
1457                 if (!is_kernel_module(filename, cpumode))
1458                         dso->kernel = dso_type;
1459 
1460                 build_id__sprintf(dso->build_id, sizeof(dso->build_id),
1461                                   sbuild_id);
1462                 pr_debug("build id event received for %s: %s\n",
1463                          dso->long_name, sbuild_id);
1464                 dso__put(dso);
1465         }
1466 
1467         err = 0;
1468 out:
1469         return err;
1470 }
1471 
1472 static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
1473                                                  int input, u64 offset, u64 size)
1474 {
1475         struct perf_session *session = container_of(header, struct perf_session, header);
1476         struct {
1477                 struct perf_event_header   header;
1478                 u8                         build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
1479                 char                       filename[0];
1480         } old_bev;
1481         struct build_id_event bev;
1482         char filename[PATH_MAX];
1483         u64 limit = offset + size;
1484 
1485         while (offset < limit) {
1486                 ssize_t len;
1487 
1488                 if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
1489                         return -1;
1490 
1491                 if (header->needs_swap)
1492                         perf_event_header__bswap(&old_bev.header);
1493 
1494                 len = old_bev.header.size - sizeof(old_bev);
1495                 if (readn(input, filename, len) != len)
1496                         return -1;
1497 
1498                 bev.header = old_bev.header;
1499 
1500                 /*
1501                  * As the pid is the missing value, we need to fill
1502                  * it properly. The header.misc value give us nice hint.
1503                  */
1504                 bev.pid = HOST_KERNEL_ID;
1505                 if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
1506                     bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
1507                         bev.pid = DEFAULT_GUEST_KERNEL_ID;
1508 
1509                 memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
1510                 __event_process_build_id(&bev, filename, session);
1511 
1512                 offset += bev.header.size;
1513         }
1514 
1515         return 0;
1516 }
1517 
1518 static int perf_header__read_build_ids(struct perf_header *header,
1519                                        int input, u64 offset, u64 size)
1520 {
1521         struct perf_session *session = container_of(header, struct perf_session, header);
1522         struct build_id_event bev;
1523         char filename[PATH_MAX];
1524         u64 limit = offset + size, orig_offset = offset;
1525         int err = -1;
1526 
1527         while (offset < limit) {
1528                 ssize_t len;
1529 
1530                 if (readn(input, &bev, sizeof(bev)) != sizeof(bev))
1531                         goto out;
1532 
1533                 if (header->needs_swap)
1534                         perf_event_header__bswap(&bev.header);
1535 
1536                 len = bev.header.size - sizeof(bev);
1537                 if (readn(input, filename, len) != len)
1538                         goto out;
1539                 /*
1540                  * The a1645ce1 changeset:
1541                  *
1542                  * "perf: 'perf kvm' tool for monitoring guest performance from host"
1543                  *
1544                  * Added a field to struct build_id_event that broke the file
1545                  * format.
1546                  *
1547                  * Since the kernel build-id is the first entry, process the
1548                  * table using the old format if the well known
1549                  * '[kernel.kallsyms]' string for the kernel build-id has the
1550                  * first 4 characters chopped off (where the pid_t sits).
1551                  */
1552                 if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
1553                         if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
1554                                 return -1;
1555                         return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
1556                 }
1557 
1558                 __event_process_build_id(&bev, filename, session);
1559 
1560                 offset += bev.header.size;
1561         }
1562         err = 0;
1563 out:
1564         return err;
1565 }
1566 
1567 static int process_tracing_data(struct perf_file_section *section __maybe_unused,
1568                                 struct perf_header *ph __maybe_unused,
1569                                 int fd, void *data)
1570 {
1571         ssize_t ret = trace_report(fd, data, false);
1572         return ret < 0 ? -1 : 0;
1573 }
1574 
1575 static int process_build_id(struct perf_file_section *section,
1576                             struct perf_header *ph, int fd,
1577                             void *data __maybe_unused)
1578 {
1579         if (perf_header__read_build_ids(ph, fd, section->offset, section->size))
1580                 pr_debug("Failed to read buildids, continuing...\n");
1581         return 0;
1582 }
1583 
1584 static int process_hostname(struct perf_file_section *section __maybe_unused,
1585                             struct perf_header *ph, int fd,
1586                             void *data __maybe_unused)
1587 {
1588         ph->env.hostname = do_read_string(fd, ph);
1589         return ph->env.hostname ? 0 : -ENOMEM;
1590 }
1591 
1592 static int process_osrelease(struct perf_file_section *section __maybe_unused,
1593                              struct perf_header *ph, int fd,
1594                              void *data __maybe_unused)
1595 {
1596         ph->env.os_release = do_read_string(fd, ph);
1597         return ph->env.os_release ? 0 : -ENOMEM;
1598 }
1599 
1600 static int process_version(struct perf_file_section *section __maybe_unused,
1601                            struct perf_header *ph, int fd,
1602                            void *data __maybe_unused)
1603 {
1604         ph->env.version = do_read_string(fd, ph);
1605         return ph->env.version ? 0 : -ENOMEM;
1606 }
1607 
1608 static int process_arch(struct perf_file_section *section __maybe_unused,
1609                         struct perf_header *ph, int fd,
1610                         void *data __maybe_unused)
1611 {
1612         ph->env.arch = do_read_string(fd, ph);
1613         return ph->env.arch ? 0 : -ENOMEM;
1614 }
1615 
1616 static int process_nrcpus(struct perf_file_section *section __maybe_unused,
1617                           struct perf_header *ph, int fd,
1618                           void *data __maybe_unused)
1619 {
1620         ssize_t ret;
1621         u32 nr;
1622 
1623         ret = readn(fd, &nr, sizeof(nr));
1624         if (ret != sizeof(nr))
1625                 return -1;
1626 
1627         if (ph->needs_swap)
1628                 nr = bswap_32(nr);
1629 
1630         ph->env.nr_cpus_avail = nr;
1631 
1632         ret = readn(fd, &nr, sizeof(nr));
1633         if (ret != sizeof(nr))
1634                 return -1;
1635 
1636         if (ph->needs_swap)
1637                 nr = bswap_32(nr);
1638 
1639         ph->env.nr_cpus_online = nr;
1640         return 0;
1641 }
1642 
1643 static int process_cpudesc(struct perf_file_section *section __maybe_unused,
1644                            struct perf_header *ph, int fd,
1645                            void *data __maybe_unused)
1646 {
1647         ph->env.cpu_desc = do_read_string(fd, ph);
1648         return ph->env.cpu_desc ? 0 : -ENOMEM;
1649 }
1650 
1651 static int process_cpuid(struct perf_file_section *section __maybe_unused,
1652                          struct perf_header *ph,  int fd,
1653                          void *data __maybe_unused)
1654 {
1655         ph->env.cpuid = do_read_string(fd, ph);
1656         return ph->env.cpuid ? 0 : -ENOMEM;
1657 }
1658 
1659 static int process_total_mem(struct perf_file_section *section __maybe_unused,
1660                              struct perf_header *ph, int fd,
1661                              void *data __maybe_unused)
1662 {
1663         uint64_t mem;
1664         ssize_t ret;
1665 
1666         ret = readn(fd, &mem, sizeof(mem));
1667         if (ret != sizeof(mem))
1668                 return -1;
1669 
1670         if (ph->needs_swap)
1671                 mem = bswap_64(mem);
1672 
1673         ph->env.total_mem = mem;
1674         return 0;
1675 }
1676 
1677 static struct perf_evsel *
1678 perf_evlist__find_by_index(struct perf_evlist *evlist, int idx)
1679 {
1680         struct perf_evsel *evsel;
1681 
1682         evlist__for_each_entry(evlist, evsel) {
1683                 if (evsel->idx == idx)
1684                         return evsel;
1685         }
1686 
1687         return NULL;
1688 }
1689 
1690 static void
1691 perf_evlist__set_event_name(struct perf_evlist *evlist,
1692                             struct perf_evsel *event)
1693 {
1694         struct perf_evsel *evsel;
1695 
1696         if (!event->name)
1697                 return;
1698 
1699         evsel = perf_evlist__find_by_index(evlist, event->idx);
1700         if (!evsel)
1701                 return;
1702 
1703         if (evsel->name)
1704                 return;
1705 
1706         evsel->name = strdup(event->name);
1707 }
1708 
1709 static int
1710 process_event_desc(struct perf_file_section *section __maybe_unused,
1711                    struct perf_header *header, int fd,
1712                    void *data __maybe_unused)
1713 {
1714         struct perf_session *session;
1715         struct perf_evsel *evsel, *events = read_event_desc(header, fd);
1716 
1717         if (!events)
1718                 return 0;
1719 
1720         session = container_of(header, struct perf_session, header);
1721         for (evsel = events; evsel->attr.size; evsel++)
1722                 perf_evlist__set_event_name(session->evlist, evsel);
1723 
1724         free_event_desc(events);
1725 
1726         return 0;
1727 }
1728 
1729 static int process_cmdline(struct perf_file_section *section,
1730                            struct perf_header *ph, int fd,
1731                            void *data __maybe_unused)
1732 {
1733         ssize_t ret;
1734         char *str, *cmdline = NULL, **argv = NULL;
1735         u32 nr, i, len = 0;
1736 
1737         ret = readn(fd, &nr, sizeof(nr));
1738         if (ret != sizeof(nr))
1739                 return -1;
1740 
1741         if (ph->needs_swap)
1742                 nr = bswap_32(nr);
1743 
1744         ph->env.nr_cmdline = nr;
1745 
1746         cmdline = zalloc(section->size + nr + 1);
1747         if (!cmdline)
1748                 return -1;
1749 
1750         argv = zalloc(sizeof(char *) * (nr + 1));
1751         if (!argv)
1752                 goto error;
1753 
1754         for (i = 0; i < nr; i++) {
1755                 str = do_read_string(fd, ph);
1756                 if (!str)
1757                         goto error;
1758 
1759                 argv[i] = cmdline + len;
1760                 memcpy(argv[i], str, strlen(str) + 1);
1761                 len += strlen(str) + 1;
1762                 free(str);
1763         }
1764         ph->env.cmdline = cmdline;
1765         ph->env.cmdline_argv = (const char **) argv;
1766         return 0;
1767 
1768 error:
1769         free(argv);
1770         free(cmdline);
1771         return -1;
1772 }
1773 
1774 static int process_cpu_topology(struct perf_file_section *section,
1775                                 struct perf_header *ph, int fd,
1776                                 void *data __maybe_unused)
1777 {
1778         ssize_t ret;
1779         u32 nr, i;
1780         char *str;
1781         struct strbuf sb;
1782         int cpu_nr = ph->env.nr_cpus_online;
1783         u64 size = 0;
1784 
1785         ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu));
1786         if (!ph->env.cpu)
1787                 return -1;
1788 
1789         ret = readn(fd, &nr, sizeof(nr));
1790         if (ret != sizeof(nr))
1791                 goto free_cpu;
1792 
1793         if (ph->needs_swap)
1794                 nr = bswap_32(nr);
1795 
1796         ph->env.nr_sibling_cores = nr;
1797         size += sizeof(u32);
1798         if (strbuf_init(&sb, 128) < 0)
1799                 goto free_cpu;
1800 
1801         for (i = 0; i < nr; i++) {
1802                 str = do_read_string(fd, ph);
1803                 if (!str)
1804                         goto error;
1805 
1806                 /* include a NULL character at the end */
1807                 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
1808                         goto error;
1809                 size += string_size(str);
1810                 free(str);
1811         }
1812         ph->env.sibling_cores = strbuf_detach(&sb, NULL);
1813 
1814         ret = readn(fd, &nr, sizeof(nr));
1815         if (ret != sizeof(nr))
1816                 return -1;
1817 
1818         if (ph->needs_swap)
1819                 nr = bswap_32(nr);
1820 
1821         ph->env.nr_sibling_threads = nr;
1822         size += sizeof(u32);
1823 
1824         for (i = 0; i < nr; i++) {
1825                 str = do_read_string(fd, ph);
1826                 if (!str)
1827                         goto error;
1828 
1829                 /* include a NULL character at the end */
1830                 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
1831                         goto error;
1832                 size += string_size(str);
1833                 free(str);
1834         }
1835         ph->env.sibling_threads = strbuf_detach(&sb, NULL);
1836 
1837         /*
1838          * The header may be from old perf,
1839          * which doesn't include core id and socket id information.
1840          */
1841         if (section->size <= size) {
1842                 zfree(&ph->env.cpu);
1843                 return 0;
1844         }
1845 
1846         for (i = 0; i < (u32)cpu_nr; i++) {
1847                 ret = readn(fd, &nr, sizeof(nr));
1848                 if (ret != sizeof(nr))
1849                         goto free_cpu;
1850 
1851                 if (ph->needs_swap)
1852                         nr = bswap_32(nr);
1853 
1854                 ph->env.cpu[i].core_id = nr;
1855 
1856                 ret = readn(fd, &nr, sizeof(nr));
1857                 if (ret != sizeof(nr))
1858                         goto free_cpu;
1859 
1860                 if (ph->needs_swap)
1861                         nr = bswap_32(nr);
1862 
1863                 if (nr > (u32)cpu_nr) {
1864                         pr_debug("socket_id number is too big."
1865                                  "You may need to upgrade the perf tool.\n");
1866                         goto free_cpu;
1867                 }
1868 
1869                 ph->env.cpu[i].socket_id = nr;
1870         }
1871 
1872         return 0;
1873 
1874 error:
1875         strbuf_release(&sb);
1876 free_cpu:
1877         zfree(&ph->env.cpu);
1878         return -1;
1879 }
1880 
1881 static int process_numa_topology(struct perf_file_section *section __maybe_unused,
1882                                  struct perf_header *ph, int fd,
1883                                  void *data __maybe_unused)
1884 {
1885         struct numa_node *nodes, *n;
1886         ssize_t ret;
1887         u32 nr, i;
1888         char *str;
1889 
1890         /* nr nodes */
1891         ret = readn(fd, &nr, sizeof(nr));
1892         if (ret != sizeof(nr))
1893                 return -1;
1894 
1895         if (ph->needs_swap)
1896                 nr = bswap_32(nr);
1897 
1898         nodes = zalloc(sizeof(*nodes) * nr);
1899         if (!nodes)
1900                 return -ENOMEM;
1901 
1902         for (i = 0; i < nr; i++) {
1903                 n = &nodes[i];
1904 
1905                 /* node number */
1906                 ret = readn(fd, &n->node, sizeof(u32));
1907                 if (ret != sizeof(n->node))
1908                         goto error;
1909 
1910                 ret = readn(fd, &n->mem_total, sizeof(u64));
1911                 if (ret != sizeof(u64))
1912                         goto error;
1913 
1914                 ret = readn(fd, &n->mem_free, sizeof(u64));
1915                 if (ret != sizeof(u64))
1916                         goto error;
1917 
1918                 if (ph->needs_swap) {
1919                         n->node      = bswap_32(n->node);
1920                         n->mem_total = bswap_64(n->mem_total);
1921                         n->mem_free  = bswap_64(n->mem_free);
1922                 }
1923 
1924                 str = do_read_string(fd, ph);
1925                 if (!str)
1926                         goto error;
1927 
1928                 n->map = cpu_map__new(str);
1929                 if (!n->map)
1930                         goto error;
1931 
1932                 free(str);
1933         }
1934         ph->env.nr_numa_nodes = nr;
1935         ph->env.numa_nodes = nodes;
1936         return 0;
1937 
1938 error:
1939         free(nodes);
1940         return -1;
1941 }
1942 
1943 static int process_pmu_mappings(struct perf_file_section *section __maybe_unused,
1944                                 struct perf_header *ph, int fd,
1945                                 void *data __maybe_unused)
1946 {
1947         ssize_t ret;
1948         char *name;
1949         u32 pmu_num;
1950         u32 type;
1951         struct strbuf sb;
1952 
1953         ret = readn(fd, &pmu_num, sizeof(pmu_num));
1954         if (ret != sizeof(pmu_num))
1955                 return -1;
1956 
1957         if (ph->needs_swap)
1958                 pmu_num = bswap_32(pmu_num);
1959 
1960         if (!pmu_num) {
1961                 pr_debug("pmu mappings not available\n");
1962                 return 0;
1963         }
1964 
1965         ph->env.nr_pmu_mappings = pmu_num;
1966         if (strbuf_init(&sb, 128) < 0)
1967                 return -1;
1968 
1969         while (pmu_num) {
1970                 if (readn(fd, &type, sizeof(type)) != sizeof(type))
1971                         goto error;
1972                 if (ph->needs_swap)
1973                         type = bswap_32(type);
1974 
1975                 name = do_read_string(fd, ph);
1976                 if (!name)
1977                         goto error;
1978 
1979                 if (strbuf_addf(&sb, "%u:%s", type, name) < 0)
1980                         goto error;
1981                 /* include a NULL character at the end */
1982                 if (strbuf_add(&sb, "", 1) < 0)
1983                         goto error;
1984 
1985                 if (!strcmp(name, "msr"))
1986                         ph->env.msr_pmu_type = type;
1987 
1988                 free(name);
1989                 pmu_num--;
1990         }
1991         ph->env.pmu_mappings = strbuf_detach(&sb, NULL);
1992         return 0;
1993 
1994 error:
1995         strbuf_release(&sb);
1996         return -1;
1997 }
1998 
1999 static int process_group_desc(struct perf_file_section *section __maybe_unused,
2000                               struct perf_header *ph, int fd,
2001                               void *data __maybe_unused)
2002 {
2003         size_t ret = -1;
2004         u32 i, nr, nr_groups;
2005         struct perf_session *session;
2006         struct perf_evsel *evsel, *leader = NULL;
2007         struct group_desc {
2008                 char *name;
2009                 u32 leader_idx;
2010                 u32 nr_members;
2011         } *desc;
2012 
2013         if (readn(fd, &nr_groups, sizeof(nr_groups)) != sizeof(nr_groups))
2014                 return -1;
2015 
2016         if (ph->needs_swap)
2017                 nr_groups = bswap_32(nr_groups);
2018 
2019         ph->env.nr_groups = nr_groups;
2020         if (!nr_groups) {
2021                 pr_debug("group desc not available\n");
2022                 return 0;
2023         }
2024 
2025         desc = calloc(nr_groups, sizeof(*desc));
2026         if (!desc)
2027                 return -1;
2028 
2029         for (i = 0; i < nr_groups; i++) {
2030                 desc[i].name = do_read_string(fd, ph);
2031                 if (!desc[i].name)
2032                         goto out_free;
2033 
2034                 if (readn(fd, &desc[i].leader_idx, sizeof(u32)) != sizeof(u32))
2035                         goto out_free;
2036 
2037                 if (readn(fd, &desc[i].nr_members, sizeof(u32)) != sizeof(u32))
2038                         goto out_free;
2039 
2040                 if (ph->needs_swap) {
2041                         desc[i].leader_idx = bswap_32(desc[i].leader_idx);
2042                         desc[i].nr_members = bswap_32(desc[i].nr_members);
2043                 }
2044         }
2045 
2046         /*
2047          * Rebuild group relationship based on the group_desc
2048          */
2049         session = container_of(ph, struct perf_session, header);
2050         session->evlist->nr_groups = nr_groups;
2051 
2052         i = nr = 0;
2053         evlist__for_each_entry(session->evlist, evsel) {
2054                 if (evsel->idx == (int) desc[i].leader_idx) {
2055                         evsel->leader = evsel;
2056                         /* {anon_group} is a dummy name */
2057                         if (strcmp(desc[i].name, "{anon_group}")) {
2058                                 evsel->group_name = desc[i].name;
2059                                 desc[i].name = NULL;
2060                         }
2061                         evsel->nr_members = desc[i].nr_members;
2062 
2063                         if (i >= nr_groups || nr > 0) {
2064                                 pr_debug("invalid group desc\n");
2065                                 goto out_free;
2066                         }
2067 
2068                         leader = evsel;
2069                         nr = evsel->nr_members - 1;
2070                         i++;
2071                 } else if (nr) {
2072                         /* This is a group member */
2073                         evsel->leader = leader;
2074 
2075                         nr--;
2076                 }
2077         }
2078 
2079         if (i != nr_groups || nr != 0) {
2080                 pr_debug("invalid group desc\n");
2081                 goto out_free;
2082         }
2083 
2084         ret = 0;
2085 out_free:
2086         for (i = 0; i < nr_groups; i++)
2087                 zfree(&desc[i].name);
2088         free(desc);
2089 
2090         return ret;
2091 }
2092 
2093 static int process_auxtrace(struct perf_file_section *section,
2094                             struct perf_header *ph, int fd,
2095                             void *data __maybe_unused)
2096 {
2097         struct perf_session *session;
2098         int err;
2099 
2100         session = container_of(ph, struct perf_session, header);
2101 
2102         err = auxtrace_index__process(fd, section->size, session,
2103                                       ph->needs_swap);
2104         if (err < 0)
2105                 pr_err("Failed to process auxtrace index\n");
2106         return err;
2107 }
2108 
2109 static int process_cache(struct perf_file_section *section __maybe_unused,
2110                          struct perf_header *ph __maybe_unused, int fd __maybe_unused,
2111                          void *data __maybe_unused)
2112 {
2113         struct cpu_cache_level *caches;
2114         u32 cnt, i, version;
2115 
2116         if (readn(fd, &version, sizeof(version)) != sizeof(version))
2117                 return -1;
2118 
2119         if (ph->needs_swap)
2120                 version = bswap_32(version);
2121 
2122         if (version != 1)
2123                 return -1;
2124 
2125         if (readn(fd, &cnt, sizeof(cnt)) != sizeof(cnt))
2126                 return -1;
2127 
2128         if (ph->needs_swap)
2129                 cnt = bswap_32(cnt);
2130 
2131         caches = zalloc(sizeof(*caches) * cnt);
2132         if (!caches)
2133                 return -1;
2134 
2135         for (i = 0; i < cnt; i++) {
2136                 struct cpu_cache_level c;
2137 
2138                 #define _R(v)                                           \
2139                         if (readn(fd, &c.v, sizeof(u32)) != sizeof(u32))\
2140                                 goto out_free_caches;                   \
2141                         if (ph->needs_swap)                             \
2142                                 c.v = bswap_32(c.v);                    \
2143 
2144                 _R(level)
2145                 _R(line_size)
2146                 _R(sets)
2147                 _R(ways)
2148                 #undef _R
2149 
2150                 #define _R(v)                           \
2151                         c.v = do_read_string(fd, ph);   \
2152                         if (!c.v)                       \
2153                                 goto out_free_caches;
2154 
2155                 _R(type)
2156                 _R(size)
2157                 _R(map)
2158                 #undef _R
2159 
2160                 caches[i] = c;
2161         }
2162 
2163         ph->env.caches = caches;
2164         ph->env.caches_cnt = cnt;
2165         return 0;
2166 out_free_caches:
2167         free(caches);
2168         return -1;
2169 }
2170 
2171 struct feature_ops {
2172         int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist);
2173         void (*print)(struct perf_header *h, int fd, FILE *fp);
2174         int (*process)(struct perf_file_section *section,
2175                        struct perf_header *h, int fd, void *data);
2176         const char *name;
2177         bool full_only;
2178 };
2179 
2180 #define FEAT_OPA(n, func) \
2181         [n] = { .name = #n, .write = write_##func, .print = print_##func }
2182 #define FEAT_OPP(n, func) \
2183         [n] = { .name = #n, .write = write_##func, .print = print_##func, \
2184                 .process = process_##func }
2185 #define FEAT_OPF(n, func) \
2186         [n] = { .name = #n, .write = write_##func, .print = print_##func, \
2187                 .process = process_##func, .full_only = true }
2188 
2189 /* feature_ops not implemented: */
2190 #define print_tracing_data      NULL
2191 #define print_build_id          NULL
2192 
2193 static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
2194         FEAT_OPP(HEADER_TRACING_DATA,   tracing_data),
2195         FEAT_OPP(HEADER_BUILD_ID,       build_id),
2196         FEAT_OPP(HEADER_HOSTNAME,       hostname),
2197         FEAT_OPP(HEADER_OSRELEASE,      osrelease),
2198         FEAT_OPP(HEADER_VERSION,        version),
2199         FEAT_OPP(HEADER_ARCH,           arch),
2200         FEAT_OPP(HEADER_NRCPUS,         nrcpus),
2201         FEAT_OPP(HEADER_CPUDESC,        cpudesc),
2202         FEAT_OPP(HEADER_CPUID,          cpuid),
2203         FEAT_OPP(HEADER_TOTAL_MEM,      total_mem),
2204         FEAT_OPP(HEADER_EVENT_DESC,     event_desc),
2205         FEAT_OPP(HEADER_CMDLINE,        cmdline),
2206         FEAT_OPF(HEADER_CPU_TOPOLOGY,   cpu_topology),
2207         FEAT_OPF(HEADER_NUMA_TOPOLOGY,  numa_topology),
2208         FEAT_OPA(HEADER_BRANCH_STACK,   branch_stack),
2209         FEAT_OPP(HEADER_PMU_MAPPINGS,   pmu_mappings),
2210         FEAT_OPP(HEADER_GROUP_DESC,     group_desc),
2211         FEAT_OPP(HEADER_AUXTRACE,       auxtrace),
2212         FEAT_OPA(HEADER_STAT,           stat),
2213         FEAT_OPF(HEADER_CACHE,          cache),
2214 };
2215 
2216 struct header_print_data {
2217         FILE *fp;
2218         bool full; /* extended list of headers */
2219 };
2220 
2221 static int perf_file_section__fprintf_info(struct perf_file_section *section,
2222                                            struct perf_header *ph,
2223                                            int feat, int fd, void *data)
2224 {
2225         struct header_print_data *hd = data;
2226 
2227         if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
2228                 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
2229                                 "%d, continuing...\n", section->offset, feat);
2230                 return 0;
2231         }
2232         if (feat >= HEADER_LAST_FEATURE) {
2233                 pr_warning("unknown feature %d\n", feat);
2234                 return 0;
2235         }
2236         if (!feat_ops[feat].print)
2237                 return 0;
2238 
2239         if (!feat_ops[feat].full_only || hd->full)
2240                 feat_ops[feat].print(ph, fd, hd->fp);
2241         else
2242                 fprintf(hd->fp, "# %s info available, use -I to display\n",
2243                         feat_ops[feat].name);
2244 
2245         return 0;
2246 }
2247 
2248 int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
2249 {
2250         struct header_print_data hd;
2251         struct perf_header *header = &session->header;
2252         int fd = perf_data_file__fd(session->file);
2253         struct stat st;
2254         int ret, bit;
2255 
2256         hd.fp = fp;
2257         hd.full = full;
2258 
2259         ret = fstat(fd, &st);
2260         if (ret == -1)
2261                 return -1;
2262 
2263         fprintf(fp, "# captured on: %s", ctime(&st.st_ctime));
2264 
2265         perf_header__process_sections(header, fd, &hd,
2266                                       perf_file_section__fprintf_info);
2267 
2268         fprintf(fp, "# missing features: ");
2269         for_each_clear_bit(bit, header->adds_features, HEADER_LAST_FEATURE) {
2270                 if (bit)
2271                         fprintf(fp, "%s ", feat_ops[bit].name);
2272         }
2273 
2274         fprintf(fp, "\n");
2275         return 0;
2276 }
2277 
2278 static int do_write_feat(int fd, struct perf_header *h, int type,
2279                          struct perf_file_section **p,
2280                          struct perf_evlist *evlist)
2281 {
2282         int err;
2283         int ret = 0;
2284 
2285         if (perf_header__has_feat(h, type)) {
2286                 if (!feat_ops[type].write)
2287                         return -1;
2288 
2289                 (*p)->offset = lseek(fd, 0, SEEK_CUR);
2290 
2291                 err = feat_ops[type].write(fd, h, evlist);
2292                 if (err < 0) {
2293                         pr_debug("failed to write feature %s\n", feat_ops[type].name);
2294 
2295                         /* undo anything written */
2296                         lseek(fd, (*p)->offset, SEEK_SET);
2297 
2298                         return -1;
2299                 }
2300                 (*p)->size = lseek(fd, 0, SEEK_CUR) - (*p)->offset;
2301                 (*p)++;
2302         }
2303         return ret;
2304 }
2305 
2306 static int perf_header__adds_write(struct perf_header *header,
2307                                    struct perf_evlist *evlist, int fd)
2308 {
2309         int nr_sections;
2310         struct perf_file_section *feat_sec, *p;
2311         int sec_size;
2312         u64 sec_start;
2313         int feat;
2314         int err;
2315 
2316         nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
2317         if (!nr_sections)
2318                 return 0;
2319 
2320         feat_sec = p = calloc(nr_sections, sizeof(*feat_sec));
2321         if (feat_sec == NULL)
2322                 return -ENOMEM;
2323 
2324         sec_size = sizeof(*feat_sec) * nr_sections;
2325 
2326         sec_start = header->feat_offset;
2327         lseek(fd, sec_start + sec_size, SEEK_SET);
2328 
2329         for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
2330                 if (do_write_feat(fd, header, feat, &p, evlist))
2331                         perf_header__clear_feat(header, feat);
2332         }
2333 
2334         lseek(fd, sec_start, SEEK_SET);
2335         /*
2336          * may write more than needed due to dropped feature, but
2337          * this is okay, reader will skip the mising entries
2338          */
2339         err = do_write(fd, feat_sec, sec_size);
2340         if (err < 0)
2341                 pr_debug("failed to write feature section\n");
2342         free(feat_sec);
2343         return err;
2344 }
2345 
2346 int perf_header__write_pipe(int fd)
2347 {
2348         struct perf_pipe_file_header f_header;
2349         int err;
2350 
2351         f_header = (struct perf_pipe_file_header){
2352                 .magic     = PERF_MAGIC,
2353                 .size      = sizeof(f_header),
2354         };
2355 
2356         err = do_write(fd, &f_header, sizeof(f_header));
2357         if (err < 0) {
2358                 pr_debug("failed to write perf pipe header\n");
2359                 return err;
2360         }
2361 
2362         return 0;
2363 }
2364 
2365 int perf_session__write_header(struct perf_session *session,
2366                                struct perf_evlist *evlist,
2367                                int fd, bool at_exit)
2368 {
2369         struct perf_file_header f_header;
2370         struct perf_file_attr   f_attr;
2371         struct perf_header *header = &session->header;
2372         struct perf_evsel *evsel;
2373         u64 attr_offset;
2374         int err;
2375 
2376         lseek(fd, sizeof(f_header), SEEK_SET);
2377 
2378         evlist__for_each_entry(session->evlist, evsel) {
2379                 evsel->id_offset = lseek(fd, 0, SEEK_CUR);
2380                 err = do_write(fd, evsel->id, evsel->ids * sizeof(u64));
2381                 if (err < 0) {
2382                         pr_debug("failed to write perf header\n");
2383                         return err;
2384                 }
2385         }
2386 
2387         attr_offset = lseek(fd, 0, SEEK_CUR);
2388 
2389         evlist__for_each_entry(evlist, evsel) {
2390                 f_attr = (struct perf_file_attr){
2391                         .attr = evsel->attr,
2392                         .ids  = {
2393                                 .offset = evsel->id_offset,
2394                                 .size   = evsel->ids * sizeof(u64),
2395                         }
2396                 };
2397                 err = do_write(fd, &f_attr, sizeof(f_attr));
2398                 if (err < 0) {
2399                         pr_debug("failed to write perf header attribute\n");
2400                         return err;
2401                 }
2402         }
2403 
2404         if (!header->data_offset)
2405                 header->data_offset = lseek(fd, 0, SEEK_CUR);
2406         header->feat_offset = header->data_offset + header->data_size;
2407 
2408         if (at_exit) {
2409                 err = perf_header__adds_write(header, evlist, fd);
2410                 if (err < 0)
2411                         return err;
2412         }
2413 
2414         f_header = (struct perf_file_header){
2415                 .magic     = PERF_MAGIC,
2416                 .size      = sizeof(f_header),
2417                 .attr_size = sizeof(f_attr),
2418                 .attrs = {
2419                         .offset = attr_offset,
2420                         .size   = evlist->nr_entries * sizeof(f_attr),
2421                 },
2422                 .data = {
2423                         .offset = header->data_offset,
2424                         .size   = header->data_size,
2425                 },
2426                 /* event_types is ignored, store zeros */
2427         };
2428 
2429         memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
2430 
2431         lseek(fd, 0, SEEK_SET);
2432         err = do_write(fd, &f_header, sizeof(f_header));
2433         if (err < 0) {
2434                 pr_debug("failed to write perf header\n");
2435                 return err;
2436         }
2437         lseek(fd, header->data_offset + header->data_size, SEEK_SET);
2438 
2439         return 0;
2440 }
2441 
2442 static int perf_header__getbuffer64(struct perf_header *header,
2443                                     int fd, void *buf, size_t size)
2444 {
2445         if (readn(fd, buf, size) <= 0)
2446                 return -1;
2447 
2448         if (header->needs_swap)
2449                 mem_bswap_64(buf, size);
2450 
2451         return 0;
2452 }
2453 
2454 int perf_header__process_sections(struct perf_header *header, int fd,
2455                                   void *data,
2456                                   int (*process)(struct perf_file_section *section,
2457                                                  struct perf_header *ph,
2458                                                  int feat, int fd, void *data))
2459 {
2460         struct perf_file_section *feat_sec, *sec;
2461         int nr_sections;
2462         int sec_size;
2463         int feat;
2464         int err;
2465 
2466         nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
2467         if (!nr_sections)
2468                 return 0;
2469 
2470         feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec));
2471         if (!feat_sec)
2472                 return -1;
2473 
2474         sec_size = sizeof(*feat_sec) * nr_sections;
2475 
2476         lseek(fd, header->feat_offset, SEEK_SET);
2477 
2478         err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
2479         if (err < 0)
2480                 goto out_free;
2481 
2482         for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) {
2483                 err = process(sec++, header, feat, fd, data);
2484                 if (err < 0)
2485                         goto out_free;
2486         }
2487         err = 0;
2488 out_free:
2489         free(feat_sec);
2490         return err;
2491 }
2492 
2493 static const int attr_file_abi_sizes[] = {
2494         [0] = PERF_ATTR_SIZE_VER0,
2495         [1] = PERF_ATTR_SIZE_VER1,
2496         [2] = PERF_ATTR_SIZE_VER2,
2497         [3] = PERF_ATTR_SIZE_VER3,
2498         [4] = PERF_ATTR_SIZE_VER4,
2499         0,
2500 };
2501 
2502 /*
2503  * In the legacy file format, the magic number is not used to encode endianness.
2504  * hdr_sz was used to encode endianness. But given that hdr_sz can vary based
2505  * on ABI revisions, we need to try all combinations for all endianness to
2506  * detect the endianness.
2507  */
2508 static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph)
2509 {
2510         uint64_t ref_size, attr_size;
2511         int i;
2512 
2513         for (i = 0 ; attr_file_abi_sizes[i]; i++) {
2514                 ref_size = attr_file_abi_sizes[i]
2515                          + sizeof(struct perf_file_section);
2516                 if (hdr_sz != ref_size) {
2517                         attr_size = bswap_64(hdr_sz);
2518                         if (attr_size != ref_size)
2519                                 continue;
2520 
2521                         ph->needs_swap = true;
2522                 }
2523                 pr_debug("ABI%d perf.data file detected, need_swap=%d\n",
2524                          i,
2525                          ph->needs_swap);
2526                 return 0;
2527         }
2528         /* could not determine endianness */
2529         return -1;
2530 }
2531 
2532 #define PERF_PIPE_HDR_VER0      16
2533 
2534 static const size_t attr_pipe_abi_sizes[] = {
2535         [0] = PERF_PIPE_HDR_VER0,
2536         0,
2537 };
2538 
2539 /*
2540  * In the legacy pipe format, there is an implicit assumption that endiannesss
2541  * between host recording the samples, and host parsing the samples is the
2542  * same. This is not always the case given that the pipe output may always be
2543  * redirected into a file and analyzed on a different machine with possibly a
2544  * different endianness and perf_event ABI revsions in the perf tool itself.
2545  */
2546 static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph)
2547 {
2548         u64 attr_size;
2549         int i;
2550 
2551         for (i = 0 ; attr_pipe_abi_sizes[i]; i++) {
2552                 if (hdr_sz != attr_pipe_abi_sizes[i]) {
2553                         attr_size = bswap_64(hdr_sz);
2554                         if (attr_size != hdr_sz)
2555                                 continue;
2556 
2557                         ph->needs_swap = true;
2558                 }
2559                 pr_debug("Pipe ABI%d perf.data file detected\n", i);
2560                 return 0;
2561         }
2562         return -1;
2563 }
2564 
2565 bool is_perf_magic(u64 magic)
2566 {
2567         if (!memcmp(&magic, __perf_magic1, sizeof(magic))
2568                 || magic == __perf_magic2
2569                 || magic == __perf_magic2_sw)
2570                 return true;
2571 
2572         return false;
2573 }
2574 
2575 static int check_magic_endian(u64 magic, uint64_t hdr_sz,
2576                               bool is_pipe, struct perf_header *ph)
2577 {
2578         int ret;
2579 
2580         /* check for legacy format */
2581         ret = memcmp(&magic, __perf_magic1, sizeof(magic));
2582         if (ret == 0) {
2583                 ph->version = PERF_HEADER_VERSION_1;
2584                 pr_debug("legacy perf.data format\n");
2585                 if (is_pipe)
2586                         return try_all_pipe_abis(hdr_sz, ph);
2587 
2588                 return try_all_file_abis(hdr_sz, ph);
2589         }
2590         /*
2591          * the new magic number serves two purposes:
2592          * - unique number to identify actual perf.data files
2593          * - encode endianness of file
2594          */
2595         ph->version = PERF_HEADER_VERSION_2;
2596 
2597         /* check magic number with one endianness */
2598         if (magic == __perf_magic2)
2599                 return 0;
2600 
2601         /* check magic number with opposite endianness */
2602         if (magic != __perf_magic2_sw)
2603                 return -1;
2604 
2605         ph->needs_swap = true;
2606 
2607         return 0;
2608 }
2609 
2610 int perf_file_header__read(struct perf_file_header *header,
2611                            struct perf_header *ph, int fd)
2612 {
2613         ssize_t ret;
2614 
2615         lseek(fd, 0, SEEK_SET);
2616 
2617         ret = readn(fd, header, sizeof(*header));
2618         if (ret <= 0)
2619                 return -1;
2620 
2621         if (check_magic_endian(header->magic,
2622                                header->attr_size, false, ph) < 0) {
2623                 pr_debug("magic/endian check failed\n");
2624                 return -1;
2625         }
2626 
2627         if (ph->needs_swap) {
2628                 mem_bswap_64(header, offsetof(struct perf_file_header,
2629                              adds_features));
2630         }
2631 
2632         if (header->size != sizeof(*header)) {
2633                 /* Support the previous format */
2634                 if (header->size == offsetof(typeof(*header), adds_features))
2635                         bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
2636                 else
2637                         return -1;
2638         } else if (ph->needs_swap) {
2639                 /*
2640                  * feature bitmap is declared as an array of unsigned longs --
2641                  * not good since its size can differ between the host that
2642                  * generated the data file and the host analyzing the file.
2643                  *
2644                  * We need to handle endianness, but we don't know the size of
2645                  * the unsigned long where the file was generated. Take a best
2646                  * guess at determining it: try 64-bit swap first (ie., file
2647                  * created on a 64-bit host), and check if the hostname feature
2648                  * bit is set (this feature bit is forced on as of fbe96f2).
2649                  * If the bit is not, undo the 64-bit swap and try a 32-bit
2650                  * swap. If the hostname bit is still not set (e.g., older data
2651                  * file), punt and fallback to the original behavior --
2652                  * clearing all feature bits and setting buildid.
2653                  */
2654                 mem_bswap_64(&header->adds_features,
2655                             BITS_TO_U64(HEADER_FEAT_BITS));
2656 
2657                 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
2658                         /* unswap as u64 */
2659                         mem_bswap_64(&header->adds_features,
2660                                     BITS_TO_U64(HEADER_FEAT_BITS));
2661 
2662                         /* unswap as u32 */
2663                         mem_bswap_32(&header->adds_features,
2664                                     BITS_TO_U32(HEADER_FEAT_BITS));
2665                 }
2666 
2667                 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
2668                         bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
2669                         set_bit(HEADER_BUILD_ID, header->adds_features);
2670                 }
2671         }
2672 
2673         memcpy(&ph->adds_features, &header->adds_features,
2674                sizeof(ph->adds_features));
2675 
2676         ph->data_offset  = header->data.offset;
2677         ph->data_size    = header->data.size;
2678         ph->feat_offset  = header->data.offset + header->data.size;
2679         return 0;
2680 }
2681 
2682 static int perf_file_section__process(struct perf_file_section *section,
2683                                       struct perf_header *ph,
2684                                       int feat, int fd, void *data)
2685 {
2686         if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
2687                 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
2688                           "%d, continuing...\n", section->offset, feat);
2689                 return 0;
2690         }
2691 
2692         if (feat >= HEADER_LAST_FEATURE) {
2693                 pr_debug("unknown feature %d, continuing...\n", feat);
2694                 return 0;
2695         }
2696 
2697         if (!feat_ops[feat].process)
2698                 return 0;
2699 
2700         return feat_ops[feat].process(section, ph, fd, data);
2701 }
2702 
2703 static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
2704                                        struct perf_header *ph, int fd,
2705                                        bool repipe)
2706 {
2707         ssize_t ret;
2708 
2709         ret = readn(fd, header, sizeof(*header));
2710         if (ret <= 0)
2711                 return -1;
2712 
2713         if (check_magic_endian(header->magic, header->size, true, ph) < 0) {
2714                 pr_debug("endian/magic failed\n");
2715                 return -1;
2716         }
2717 
2718         if (ph->needs_swap)
2719                 header->size = bswap_64(header->size);
2720 
2721         if (repipe && do_write(STDOUT_FILENO, header, sizeof(*header)) < 0)
2722                 return -1;
2723 
2724         return 0;
2725 }
2726 
2727 static int perf_header__read_pipe(struct perf_session *session)
2728 {
2729         struct perf_header *header = &session->header;
2730         struct perf_pipe_file_header f_header;
2731 
2732         if (perf_file_header__read_pipe(&f_header, header,
2733                                         perf_data_file__fd(session->file),
2734                                         session->repipe) < 0) {
2735                 pr_debug("incompatible file format\n");
2736                 return -EINVAL;
2737         }
2738 
2739         return 0;
2740 }
2741 
2742 static int read_attr(int fd, struct perf_header *ph,
2743                      struct perf_file_attr *f_attr)
2744 {
2745         struct perf_event_attr *attr = &f_attr->attr;
2746         size_t sz, left;
2747         size_t our_sz = sizeof(f_attr->attr);
2748         ssize_t ret;
2749 
2750         memset(f_attr, 0, sizeof(*f_attr));
2751 
2752         /* read minimal guaranteed structure */
2753         ret = readn(fd, attr, PERF_ATTR_SIZE_VER0);
2754         if (ret <= 0) {
2755                 pr_debug("cannot read %d bytes of header attr\n",
2756                          PERF_ATTR_SIZE_VER0);
2757                 return -1;
2758         }
2759 
2760         /* on file perf_event_attr size */
2761         sz = attr->size;
2762 
2763         if (ph->needs_swap)
2764                 sz = bswap_32(sz);
2765 
2766         if (sz == 0) {
2767                 /* assume ABI0 */
2768                 sz =  PERF_ATTR_SIZE_VER0;
2769         } else if (sz > our_sz) {
2770                 pr_debug("file uses a more recent and unsupported ABI"
2771                          " (%zu bytes extra)\n", sz - our_sz);
2772                 return -1;
2773         }
2774         /* what we have not yet read and that we know about */
2775         left = sz - PERF_ATTR_SIZE_VER0;
2776         if (left) {
2777                 void *ptr = attr;
2778                 ptr += PERF_ATTR_SIZE_VER0;
2779 
2780                 ret = readn(fd, ptr, left);
2781         }
2782         /* read perf_file_section, ids are read in caller */
2783         ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids));
2784 
2785         return ret <= 0 ? -1 : 0;
2786 }
2787 
2788 static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel,
2789                                                 struct pevent *pevent)
2790 {
2791         struct event_format *event;
2792         char bf[128];
2793 
2794         /* already prepared */
2795         if (evsel->tp_format)
2796                 return 0;
2797 
2798         if (pevent == NULL) {
2799                 pr_debug("broken or missing trace data\n");
2800                 return -1;
2801         }
2802 
2803         event = pevent_find_event(pevent, evsel->attr.config);
2804         if (event == NULL)
2805                 return -1;
2806 
2807         if (!evsel->name) {
2808                 snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
2809                 evsel->name = strdup(bf);
2810                 if (evsel->name == NULL)
2811                         return -1;
2812         }
2813 
2814         evsel->tp_format = event;
2815         return 0;
2816 }
2817 
2818 static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist,
2819                                                   struct pevent *pevent)
2820 {
2821         struct perf_evsel *pos;
2822 
2823         evlist__for_each_entry(evlist, pos) {
2824                 if (pos->attr.type == PERF_TYPE_TRACEPOINT &&
2825                     perf_evsel__prepare_tracepoint_event(pos, pevent))
2826                         return -1;
2827         }
2828 
2829         return 0;
2830 }
2831 
2832 int perf_session__read_header(struct perf_session *session)
2833 {
2834         struct perf_data_file *file = session->file;
2835         struct perf_header *header = &session->header;
2836         struct perf_file_header f_header;
2837         struct perf_file_attr   f_attr;
2838         u64                     f_id;
2839         int nr_attrs, nr_ids, i, j;
2840         int fd = perf_data_file__fd(file);
2841 
2842         session->evlist = perf_evlist__new();
2843         if (session->evlist == NULL)
2844                 return -ENOMEM;
2845 
2846         session->evlist->env = &header->env;
2847         session->machines.host.env = &header->env;
2848         if (perf_data_file__is_pipe(file))
2849                 return perf_header__read_pipe(session);
2850 
2851         if (perf_file_header__read(&f_header, header, fd) < 0)
2852                 return -EINVAL;
2853 
2854         /*
2855          * Sanity check that perf.data was written cleanly; data size is
2856          * initialized to 0 and updated only if the on_exit function is run.
2857          * If data size is still 0 then the file contains only partial
2858          * information.  Just warn user and process it as much as it can.
2859          */
2860         if (f_header.data.size == 0) {
2861                 pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"
2862                            "Was the 'perf record' command properly terminated?\n",
2863                            file->path);
2864         }
2865 
2866         nr_attrs = f_header.attrs.size / f_header.attr_size;
2867         lseek(fd, f_header.attrs.offset, SEEK_SET);
2868 
2869         for (i = 0; i < nr_attrs; i++) {
2870                 struct perf_evsel *evsel;
2871                 off_t tmp;
2872 
2873                 if (read_attr(fd, header, &f_attr) < 0)
2874                         goto out_errno;
2875 
2876                 if (header->needs_swap) {
2877                         f_attr.ids.size   = bswap_64(f_attr.ids.size);
2878                         f_attr.ids.offset = bswap_64(f_attr.ids.offset);
2879                         perf_event__attr_swap(&f_attr.attr);
2880                 }
2881 
2882                 tmp = lseek(fd, 0, SEEK_CUR);
2883                 evsel = perf_evsel__new(&f_attr.attr);
2884 
2885                 if (evsel == NULL)
2886                         goto out_delete_evlist;
2887 
2888                 evsel->needs_swap = header->needs_swap;
2889                 /*
2890                  * Do it before so that if perf_evsel__alloc_id fails, this
2891                  * entry gets purged too at perf_evlist__delete().
2892                  */
2893                 perf_evlist__add(session->evlist, evsel);
2894 
2895                 nr_ids = f_attr.ids.size / sizeof(u64);
2896                 /*
2897                  * We don't have the cpu and thread maps on the header, so
2898                  * for allocating the perf_sample_id table we fake 1 cpu and
2899                  * hattr->ids threads.
2900                  */
2901                 if (perf_evsel__alloc_id(evsel, 1, nr_ids))
2902                         goto out_delete_evlist;
2903 
2904                 lseek(fd, f_attr.ids.offset, SEEK_SET);
2905 
2906                 for (j = 0; j < nr_ids; j++) {
2907                         if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
2908                                 goto out_errno;
2909 
2910                         perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
2911                 }
2912 
2913                 lseek(fd, tmp, SEEK_SET);
2914         }
2915 
2916         symbol_conf.nr_events = nr_attrs;
2917 
2918         perf_header__process_sections(header, fd, &session->tevent,
2919                                       perf_file_section__process);
2920 
2921         if (perf_evlist__prepare_tracepoint_events(session->evlist,
2922                                                    session->tevent.pevent))
2923                 goto out_delete_evlist;
2924 
2925         return 0;
2926 out_errno:
2927         return -errno;
2928 
2929 out_delete_evlist:
2930         perf_evlist__delete(session->evlist);
2931         session->evlist = NULL;
2932         return -ENOMEM;
2933 }
2934 
2935 int perf_event__synthesize_attr(struct perf_tool *tool,
2936                                 struct perf_event_attr *attr, u32 ids, u64 *id,
2937                                 perf_event__handler_t process)
2938 {
2939         union perf_event *ev;
2940         size_t size;
2941         int err;
2942 
2943         size = sizeof(struct perf_event_attr);
2944         size = PERF_ALIGN(size, sizeof(u64));
2945         size += sizeof(struct perf_event_header);
2946         size += ids * sizeof(u64);
2947 
2948         ev = malloc(size);
2949 
2950         if (ev == NULL)
2951                 return -ENOMEM;
2952 
2953         ev->attr.attr = *attr;
2954         memcpy(ev->attr.id, id, ids * sizeof(u64));
2955 
2956         ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
2957         ev->attr.header.size = (u16)size;
2958 
2959         if (ev->attr.header.size == size)
2960                 err = process(tool, ev, NULL, NULL);
2961         else
2962                 err = -E2BIG;
2963 
2964         free(ev);
2965 
2966         return err;
2967 }
2968 
2969 static struct event_update_event *
2970 event_update_event__new(size_t size, u64 type, u64 id)
2971 {
2972         struct event_update_event *ev;
2973 
2974         size += sizeof(*ev);
2975         size  = PERF_ALIGN(size, sizeof(u64));
2976 
2977         ev = zalloc(size);
2978         if (ev) {
2979                 ev->header.type = PERF_RECORD_EVENT_UPDATE;
2980                 ev->header.size = (u16)size;
2981                 ev->type = type;
2982                 ev->id = id;
2983         }
2984         return ev;
2985 }
2986 
2987 int
2988 perf_event__synthesize_event_update_unit(struct perf_tool *tool,
2989                                          struct perf_evsel *evsel,
2990                                          perf_event__handler_t process)
2991 {
2992         struct event_update_event *ev;
2993         size_t size = strlen(evsel->unit);
2994         int err;
2995 
2996         ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->id[0]);
2997         if (ev == NULL)
2998                 return -ENOMEM;
2999 
3000         strncpy(ev->data, evsel->unit, size);
3001         err = process(tool, (union perf_event *)ev, NULL, NULL);
3002         free(ev);
3003         return err;
3004 }
3005 
3006 int
3007 perf_event__synthesize_event_update_scale(struct perf_tool *tool,
3008                                           struct perf_evsel *evsel,
3009                                           perf_event__handler_t process)
3010 {
3011         struct event_update_event *ev;
3012         struct event_update_event_scale *ev_data;
3013         int err;
3014 
3015         ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->id[0]);
3016         if (ev == NULL)
3017                 return -ENOMEM;
3018 
3019         ev_data = (struct event_update_event_scale *) ev->data;
3020         ev_data->scale = evsel->scale;
3021         err = process(tool, (union perf_event*) ev, NULL, NULL);
3022         free(ev);
3023         return err;
3024 }
3025 
3026 int
3027 perf_event__synthesize_event_update_name(struct perf_tool *tool,
3028                                          struct perf_evsel *evsel,
3029                                          perf_event__handler_t process)
3030 {
3031         struct event_update_event *ev;
3032         size_t len = strlen(evsel->name);
3033         int err;
3034 
3035         ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->id[0]);
3036         if (ev == NULL)
3037                 return -ENOMEM;
3038 
3039         strncpy(ev->data, evsel->name, len);
3040         err = process(tool, (union perf_event*) ev, NULL, NULL);
3041         free(ev);
3042         return err;
3043 }
3044 
3045 int
3046 perf_event__synthesize_event_update_cpus(struct perf_tool *tool,
3047                                         struct perf_evsel *evsel,
3048                                         perf_event__handler_t process)
3049 {
3050         size_t size = sizeof(struct event_update_event);
3051         struct event_update_event *ev;
3052         int max, err;
3053         u16 type;
3054 
3055         if (!evsel->own_cpus)
3056                 return 0;
3057 
3058         ev = cpu_map_data__alloc(evsel->own_cpus, &size, &type, &max);
3059         if (!ev)
3060                 return -ENOMEM;
3061 
3062         ev->header.type = PERF_RECORD_EVENT_UPDATE;
3063         ev->header.size = (u16)size;
3064         ev->type = PERF_EVENT_UPDATE__CPUS;
3065         ev->id   = evsel->id[0];
3066 
3067         cpu_map_data__synthesize((struct cpu_map_data *) ev->data,
3068                                  evsel->own_cpus,
3069                                  type, max);
3070 
3071         err = process(tool, (union perf_event*) ev, NULL, NULL);
3072         free(ev);
3073         return err;
3074 }
3075 
3076 size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
3077 {
3078         struct event_update_event *ev = &event->event_update;
3079         struct event_update_event_scale *ev_scale;
3080         struct event_update_event_cpus *ev_cpus;
3081         struct cpu_map *map;
3082         size_t ret;
3083 
3084         ret = fprintf(fp, "\n... id:    %" PRIu64 "\n", ev->id);
3085 
3086         switch (ev->type) {
3087         case PERF_EVENT_UPDATE__SCALE:
3088                 ev_scale = (struct event_update_event_scale *) ev->data;
3089                 ret += fprintf(fp, "... scale: %f\n", ev_scale->scale);
3090                 break;
3091         case PERF_EVENT_UPDATE__UNIT:
3092                 ret += fprintf(fp, "... unit:  %s\n", ev->data);
3093                 break;
3094         case PERF_EVENT_UPDATE__NAME:
3095                 ret += fprintf(fp, "... name:  %s\n", ev->data);
3096                 break;
3097         case PERF_EVENT_UPDATE__CPUS:
3098                 ev_cpus = (struct event_update_event_cpus *) ev->data;
3099                 ret += fprintf(fp, "... ");
3100 
3101                 map = cpu_map__new_data(&ev_cpus->cpus);
3102                 if (map)
3103                         ret += cpu_map__fprintf(map, fp);
3104                 else
3105                         ret += fprintf(fp, "failed to get cpus\n");
3106                 break;
3107         default:
3108                 ret += fprintf(fp, "... unknown type\n");
3109                 break;
3110         }
3111 
3112         return ret;
3113 }
3114 
3115 int perf_event__synthesize_attrs(struct perf_tool *tool,
3116                                    struct perf_session *session,
3117                                    perf_event__handler_t process)
3118 {
3119         struct perf_evsel *evsel;
3120         int err = 0;
3121 
3122         evlist__for_each_entry(session->evlist, evsel) {
3123                 err = perf_event__synthesize_attr(tool, &evsel->attr, evsel->ids,
3124                                                   evsel->id, process);
3125                 if (err) {
3126                         pr_debug("failed to create perf header attribute\n");
3127                         return err;
3128                 }
3129         }
3130 
3131         return err;
3132 }
3133 
3134 int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
3135                              union perf_event *event,
3136                              struct perf_evlist **pevlist)
3137 {
3138         u32 i, ids, n_ids;
3139         struct perf_evsel *evsel;
3140         struct perf_evlist *evlist = *pevlist;
3141 
3142         if (evlist == NULL) {
3143                 *pevlist = evlist = perf_evlist__new();
3144                 if (evlist == NULL)
3145                         return -ENOMEM;
3146         }
3147 
3148         evsel = perf_evsel__new(&event->attr.attr);
3149         if (evsel == NULL)
3150                 return -ENOMEM;
3151 
3152         perf_evlist__add(evlist, evsel);
3153 
3154         ids = event->header.size;
3155         ids -= (void *)&event->attr.id - (void *)event;
3156         n_ids = ids / sizeof(u64);
3157         /*
3158          * We don't have the cpu and thread maps on the header, so
3159          * for allocating the perf_sample_id table we fake 1 cpu and
3160          * hattr->ids threads.
3161          */
3162         if (perf_evsel__alloc_id(evsel, 1, n_ids))
3163                 return -ENOMEM;
3164 
3165         for (i = 0; i < n_ids; i++) {
3166                 perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]);
3167         }
3168 
3169         symbol_conf.nr_events = evlist->nr_entries;
3170 
3171         return 0;
3172 }
3173 
3174 int perf_event__process_event_update(struct perf_tool *tool __maybe_unused,
3175                                      union perf_event *event,
3176                                      struct perf_evlist **pevlist)
3177 {
3178         struct event_update_event *ev = &event->event_update;
3179         struct event_update_event_scale *ev_scale;
3180         struct event_update_event_cpus *ev_cpus;
3181         struct perf_evlist *evlist;
3182         struct perf_evsel *evsel;
3183         struct cpu_map *map;
3184 
3185         if (!pevlist || *pevlist == NULL)
3186                 return -EINVAL;
3187 
3188         evlist = *pevlist;
3189 
3190         evsel = perf_evlist__id2evsel(evlist, ev->id);
3191         if (evsel == NULL)
3192                 return -EINVAL;
3193 
3194         switch (ev->type) {
3195         case PERF_EVENT_UPDATE__UNIT:
3196                 evsel->unit = strdup(ev->data);
3197                 break;
3198         case PERF_EVENT_UPDATE__NAME:
3199                 evsel->name = strdup(ev->data);
3200                 break;
3201         case PERF_EVENT_UPDATE__SCALE:
3202                 ev_scale = (struct event_update_event_scale *) ev->data;
3203                 evsel->scale = ev_scale->scale;
3204         case PERF_EVENT_UPDATE__CPUS:
3205                 ev_cpus = (struct event_update_event_cpus *) ev->data;
3206 
3207                 map = cpu_map__new_data(&ev_cpus->cpus);
3208                 if (map)
3209                         evsel->own_cpus = map;
3210                 else
3211                         pr_err("failed to get event_update cpus\n");
3212         default:
3213                 break;
3214         }
3215 
3216         return 0;
3217 }
3218 
3219 int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
3220                                         struct perf_evlist *evlist,
3221                                         perf_event__handler_t process)
3222 {
3223         union perf_event ev;
3224         struct tracing_data *tdata;
3225         ssize_t size = 0, aligned_size = 0, padding;
3226         int err __maybe_unused = 0;
3227 
3228         /*
3229          * We are going to store the size of the data followed
3230          * by the data contents. Since the fd descriptor is a pipe,
3231          * we cannot seek back to store the size of the data once
3232          * we know it. Instead we:
3233          *
3234          * - write the tracing data to the temp file
3235          * - get/write the data size to pipe
3236          * - write the tracing data from the temp file
3237          *   to the pipe
3238          */
3239         tdata = tracing_data_get(&evlist->entries, fd, true);
3240         if (!tdata)
3241                 return -1;
3242 
3243         memset(&ev, 0, sizeof(ev));
3244 
3245         ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
3246         size = tdata->size;
3247         aligned_size = PERF_ALIGN(size, sizeof(u64));
3248         padding = aligned_size - size;
3249         ev.tracing_data.header.size = sizeof(ev.tracing_data);
3250         ev.tracing_data.size = aligned_size;
3251 
3252         process(tool, &ev, NULL, NULL);
3253 
3254         /*
3255          * The put function will copy all the tracing data
3256          * stored in temp file to the pipe.
3257          */
3258         tracing_data_put(tdata);
3259 
3260         write_padded(fd, NULL, 0, padding);
3261 
3262         return aligned_size;
3263 }
3264 
3265 int perf_event__process_tracing_data(struct perf_tool *tool __maybe_unused,
3266                                      union perf_event *event,
3267                                      struct perf_session *session)
3268 {
3269         ssize_t size_read, padding, size = event->tracing_data.size;
3270         int fd = perf_data_file__fd(session->file);
3271         off_t offset = lseek(fd, 0, SEEK_CUR);
3272         char buf[BUFSIZ];
3273 
3274         /* setup for reading amidst mmap */
3275         lseek(fd, offset + sizeof(struct tracing_data_event),
3276               SEEK_SET);
3277 
3278         size_read = trace_report(fd, &session->tevent,
3279                                  session->repipe);
3280         padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
3281 
3282         if (readn(fd, buf, padding) < 0) {
3283                 pr_err("%s: reading input file", __func__);
3284                 return -1;
3285         }
3286         if (session->repipe) {
3287                 int retw = write(STDOUT_FILENO, buf, padding);
3288                 if (retw <= 0 || retw != padding) {
3289                         pr_err("%s: repiping tracing data padding", __func__);
3290                         return -1;
3291                 }
3292         }
3293 
3294         if (size_read + padding != size) {
3295                 pr_err("%s: tracing data size mismatch", __func__);
3296                 return -1;
3297         }
3298 
3299         perf_evlist__prepare_tracepoint_events(session->evlist,
3300                                                session->tevent.pevent);
3301 
3302         return size_read + padding;
3303 }
3304 
3305 int perf_event__synthesize_build_id(struct perf_tool *tool,
3306                                     struct dso *pos, u16 misc,
3307                                     perf_event__handler_t process,
3308                                     struct machine *machine)
3309 {
3310         union perf_event ev;
3311         size_t len;
3312         int err = 0;
3313 
3314         if (!pos->hit)
3315                 return err;
3316 
3317         memset(&ev, 0, sizeof(ev));
3318 
3319         len = pos->long_name_len + 1;
3320         len = PERF_ALIGN(len, NAME_ALIGN);
3321         memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
3322         ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
3323         ev.build_id.header.misc = misc;
3324         ev.build_id.pid = machine->pid;
3325         ev.build_id.header.size = sizeof(ev.build_id) + len;
3326         memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
3327 
3328         err = process(tool, &ev, NULL, machine);
3329 
3330         return err;
3331 }
3332 
3333 int perf_event__process_build_id(struct perf_tool *tool __maybe_unused,
3334                                  union perf_event *event,
3335                                  struct perf_session *session)
3336 {
3337         __event_process_build_id(&event->build_id,
3338                                  event->build_id.filename,
3339                                  session);
3340         return 0;
3341 }
3342 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us