gator: Version 5.18
[firefly-linux-kernel-4.4.55.git] / tools / gator / daemon / PerfSource.cpp
1 /**
2  * Copyright (C) ARM Limited 2010-2014. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8
9 #include "PerfSource.h"
10
11 #include <errno.h>
12 #include <string.h>
13 #include <unistd.h>
14
15 #include "Child.h"
16 #include "DynBuf.h"
17 #include "Logging.h"
18 #include "PerfDriver.h"
19 #include "Proc.h"
20 #include "SessionData.h"
21
22 #define MS_PER_US 1000000
23
24 extern Child *child;
25
26 static bool sendTracepointFormat(Buffer *const buffer, const char *const name, DynBuf *const printb, DynBuf *const b) {
27         if (!printb->printf(EVENTS_PATH "/%s/format", name)) {
28                 logg->logMessage("%s(%s:%i): DynBuf::printf failed", __FUNCTION__, __FILE__, __LINE__);
29                 return false;
30         }
31         if (!b->read(printb->getBuf())) {
32                 logg->logMessage("%s(%s:%i): DynBuf::read failed", __FUNCTION__, __FILE__, __LINE__);
33                 return false;
34         }
35         buffer->format(b->getLength(), b->getBuf());
36
37         return true;
38 }
39
40 PerfSource::PerfSource(sem_t *senderSem, sem_t *startProfile) : mSummary(0, FRAME_SUMMARY, 1024, senderSem), mBuffer(0, FRAME_PERF_ATTRS, 1024*1024, senderSem), mCountersBuf(), mCountersGroup(&mCountersBuf), mMonitor(), mUEvent(), mSenderSem(senderSem), mStartProfile(startProfile), mInterruptFd(-1), mIsDone(false) {
41         long l = sysconf(_SC_PAGE_SIZE);
42         if (l < 0) {
43                 logg->logError(__FILE__, __LINE__, "Unable to obtain the page size");
44                 handleException();
45         }
46         gSessionData->mPageSize = static_cast<int>(l);
47
48         l = sysconf(_SC_NPROCESSORS_CONF);
49         if (l < 0) {
50                 logg->logError(__FILE__, __LINE__, "Unable to obtain the number of cores");
51                 handleException();
52         }
53         gSessionData->mCores = static_cast<int>(l);
54 }
55
56 PerfSource::~PerfSource() {
57 }
58
59 struct PrepareParallelArgs {
60         PerfGroup *pg;
61         int cpu;
62 };
63
64 void *prepareParallel(void *arg) {
65         const PrepareParallelArgs *const args = (PrepareParallelArgs *)arg;
66         args->pg->prepareCPU(args->cpu);
67         return NULL;
68 }
69
70 bool PerfSource::prepare() {
71         DynBuf printb;
72         DynBuf b1;
73         DynBuf b2;
74         DynBuf b3;
75         long long schedSwitchId;
76
77         if (0
78                         || !mMonitor.init()
79                         || !mUEvent.init()
80                         || !mMonitor.add(mUEvent.getFd())
81
82                         || (schedSwitchId = PerfDriver::getTracepointId(SCHED_SWITCH, &printb)) < 0
83                         || !sendTracepointFormat(&mBuffer, SCHED_SWITCH, &printb, &b1)
84
85                         // Only want RAW but not IP on sched_switch and don't want TID on SAMPLE_ID
86                         || !mCountersGroup.add(&mBuffer, 100/**/, PERF_TYPE_TRACEPOINT, schedSwitchId, 1, PERF_SAMPLE_RAW, PERF_GROUP_MMAP | PERF_GROUP_COMM | PERF_GROUP_TASK | PERF_GROUP_SAMPLE_ID_ALL)
87
88                         // Only want TID and IP but not RAW on timer
89                         || (gSessionData->mSampleRate > 0 && !gSessionData->mIsEBS && !mCountersGroup.add(&mBuffer, 99/**/, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CPU_CLOCK, 1000000000UL / gSessionData->mSampleRate, PERF_SAMPLE_TID | PERF_SAMPLE_IP, 0))
90
91                         || !gSessionData->perf.enable(&mCountersGroup, &mBuffer)
92                         || 0) {
93                 logg->logMessage("%s(%s:%i): perf setup failed, are you running Linux 3.12 or later?", __FUNCTION__, __FILE__, __LINE__);
94                 return false;
95         }
96
97         if (!gSessionData->perf.summary(&mSummary)) {
98                 logg->logMessage("%s(%s:%i): PerfDriver::summary failed", __FUNCTION__, __FILE__, __LINE__);
99                 return false;
100         }
101
102         {
103                 // Run prepareCPU in parallel as perf_event_open can take more than 1 sec in some cases
104                 pthread_t threads[NR_CPUS];
105                 PrepareParallelArgs args[NR_CPUS];
106                 for (int cpu = 0; cpu < gSessionData->mCores; ++cpu) {
107                         args[cpu].pg = &mCountersGroup;
108                         args[cpu].cpu = cpu;
109                         if (pthread_create(&threads[cpu], NULL, prepareParallel, &args[cpu]) != 0) {
110                                 logg->logMessage("%s(%s:%i): pthread_create failed", __FUNCTION__, __FILE__, __LINE__);
111                                 return false;
112                         }
113                 }
114                 for (int cpu = 0; cpu < gSessionData->mCores; ++cpu) {
115                         if (pthread_join(threads[cpu], NULL) != 0) {
116                                 logg->logMessage("%s(%s:%i): pthread_join failed", __FUNCTION__, __FILE__, __LINE__);
117                                 return false;
118                         }
119                 }
120         }
121
122         int numEvents = 0;
123         for (int cpu = 0; cpu < gSessionData->mCores; ++cpu) {
124                 numEvents += mCountersGroup.onlineCPU(cpu, false, &mBuffer, &mMonitor);
125         }
126         if (numEvents <= 0) {
127                 logg->logMessage("%s(%s:%i): PerfGroup::onlineCPU failed on all cores", __FUNCTION__, __FILE__, __LINE__);
128                 return false;
129         }
130
131         // Start events before reading proc to avoid race conditions
132         if (!mCountersGroup.start()) {
133                 logg->logMessage("%s(%s:%i): PerfGroup::start failed", __FUNCTION__, __FILE__, __LINE__);
134                 return false;
135         }
136
137         if (!readProc(&mBuffer, &printb, &b1, &b2, &b3)) {
138                 logg->logMessage("%s(%s:%i): readProc failed", __FUNCTION__, __FILE__, __LINE__);
139                 return false;
140         }
141
142         mBuffer.commit(1);
143
144         return true;
145 }
146
147 static const char CPU_DEVPATH[] = "/devices/system/cpu/cpu";
148
149 void PerfSource::run() {
150         int pipefd[2];
151
152         if (pipe(pipefd) != 0) {
153                 logg->logError(__FILE__, __LINE__, "pipe failed");
154                 handleException();
155         }
156         mInterruptFd = pipefd[1];
157
158         if (!mMonitor.add(pipefd[0])) {
159                 logg->logError(__FILE__, __LINE__, "Monitor::add failed");
160                 handleException();
161         }
162
163         int timeout = -1;
164         if (gSessionData->mLiveRate > 0) {
165                 timeout = gSessionData->mLiveRate/MS_PER_US;
166         }
167
168         sem_post(mStartProfile);
169
170         while (gSessionData->mSessionIsActive) {
171                 // +1 for uevents, +1 for pipe
172                 struct epoll_event events[NR_CPUS + 2];
173                 int ready = mMonitor.wait(events, ARRAY_LENGTH(events), timeout);
174                 if (ready < 0) {
175                         logg->logError(__FILE__, __LINE__, "Monitor::wait failed");
176                         handleException();
177                 }
178
179                 for (int i = 0; i < ready; ++i) {
180                         if (events[i].data.fd == mUEvent.getFd()) {
181                                 if (!handleUEvent()) {
182                                         logg->logError(__FILE__, __LINE__, "PerfSource::handleUEvent failed");
183                                         handleException();
184                                 }
185                                 break;
186                         }
187                 }
188
189                 // send a notification that data is ready
190                 sem_post(mSenderSem);
191
192                 // In one shot mode, stop collection once all the buffers are filled
193                 // Assume timeout == 0 in this case
194                 if (gSessionData->mOneShot && gSessionData->mSessionIsActive) {
195                         logg->logMessage("%s(%s:%i): One shot", __FUNCTION__, __FILE__, __LINE__);
196                         child->endSession();
197                 }
198         }
199
200         mCountersGroup.stop();
201         mBuffer.setDone();
202         mIsDone = true;
203
204         // send a notification that data is ready
205         sem_post(mSenderSem);
206
207         mInterruptFd = -1;
208         close(pipefd[0]);
209         close(pipefd[1]);
210 }
211
212 bool PerfSource::handleUEvent() {
213         UEventResult result;
214         if (!mUEvent.read(&result)) {
215                 logg->logMessage("%s(%s:%i): UEvent::Read failed", __FUNCTION__, __FILE__, __LINE__);
216                 return false;
217         }
218
219         if (strcmp(result.mSubsystem, "cpu") == 0) {
220                 if (strncmp(result.mDevPath, CPU_DEVPATH, sizeof(CPU_DEVPATH) - 1) != 0) {
221                         logg->logMessage("%s(%s:%i): Unexpected cpu DEVPATH format", __FUNCTION__, __FILE__, __LINE__);
222                         return false;
223                 }
224                 char *endptr;
225                 errno = 0;
226                 int cpu = strtol(result.mDevPath + sizeof(CPU_DEVPATH) - 1, &endptr, 10);
227                 if (errno != 0 || *endptr != '\0') {
228                         logg->logMessage("%s(%s:%i): strtol failed", __FUNCTION__, __FILE__, __LINE__);
229                         return false;
230                 }
231                 if (strcmp(result.mAction, "online") == 0) {
232                         // Only call onlineCPU if prepareCPU succeeded
233                         const bool result = mCountersGroup.prepareCPU(cpu) &&
234                                 mCountersGroup.onlineCPU(cpu, true, &mBuffer, &mMonitor);
235                         mBuffer.commit(1);
236                         return result;
237                 } else if (strcmp(result.mAction, "offline") == 0) {
238                         return mCountersGroup.offlineCPU(cpu);
239                 }
240         }
241
242         return true;
243 }
244
245 void PerfSource::interrupt() {
246         if (mInterruptFd >= 0) {
247                 int8_t c = 0;
248                 // Write to the pipe to wake the monitor which will cause mSessionIsActive to be reread
249                 if (::write(mInterruptFd, &c, sizeof(c)) != sizeof(c)) {
250                         logg->logError(__FILE__, __LINE__, "write failed");
251                         handleException();
252                 }
253         }
254 }
255
256 bool PerfSource::isDone () {
257         return mBuffer.isDone() && mIsDone && mCountersBuf.isEmpty();
258 }
259
260 void PerfSource::write (Sender *sender) {
261         if (!mSummary.isDone()) {
262                 mSummary.write(sender);
263         }
264         if (!mBuffer.isDone()) {
265                 mBuffer.write(sender);
266         }
267         if (!mCountersBuf.send(sender)) {
268                 logg->logError(__FILE__, __LINE__, "PerfBuffer::send failed");
269                 handleException();
270         }
271 }