*/
#define pr_fmt(fmt) "hw perfevents: " fmt
-#include <linux/cpumask.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
static int
armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
{
- int mapping;
-
- if (config >= PERF_COUNT_HW_MAX)
- return -ENOENT;
-
- mapping = (*event_map)[config];
+ int mapping = (*event_map)[config];
return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
}
return armpmu_map_cache_event(cache_map, config);
case PERF_TYPE_RAW:
return armpmu_map_raw_event(raw_event_mask, config);
- default:
- if (event->attr.type >= PERF_TYPE_MAX)
- return armpmu_map_raw_event(raw_event_mask, config);
}
return -ENOENT;
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
- if (!cpumask_test_cpu(smp_processor_id(), &armpmu->valid_cpus))
- return;
/*
* ARM pmu always has to update the counter, so ignore
* PERF_EF_UPDATE, see comments in armpmu_start().
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
- if (!cpumask_test_cpu(smp_processor_id(), &armpmu->valid_cpus))
- return;
/*
* ARM pmu always has to reprogram the period, so ignore
* PERF_EF_RELOAD, see the comment below.
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
- if (!cpumask_test_cpu(smp_processor_id(), &armpmu->valid_cpus))
- return;
-
armpmu_stop(event, PERF_EF_UPDATE);
hw_events->events[idx] = NULL;
clear_bit(idx, hw_events->used_mask);
int idx;
int err = 0;
- /* An event following a process won't be stopped earlier */
- if (!cpumask_test_cpu(smp_processor_id(), &armpmu->valid_cpus))
- return 0;
-
perf_pmu_disable(event->pmu);
/* If we don't have a space for the counter then finish early. */
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct pmu *leader_pmu = event->group_leader->pmu;
- if (is_software_event(event))
- return 1;
-
if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
return 1;
struct arm_pmu *armpmu = (struct arm_pmu *) dev;
struct platform_device *plat_device = armpmu->plat_device;
struct arm_pmu_platdata *plat = dev_get_platdata(&plat_device->dev);
- int ret;
- u64 start_clock, finish_clock;
- start_clock = sched_clock();
if (plat && plat->handle_irq)
- ret = plat->handle_irq(irq, dev, armpmu->handle_irq);
+ return plat->handle_irq(irq, dev, armpmu->handle_irq);
else
- ret = armpmu->handle_irq(irq, dev);
- finish_clock = sched_clock();
-
- perf_sample_event_took(finish_clock - start_clock);
- return ret;
+ return armpmu->handle_irq(irq, dev);
}
static void
int err = 0;
atomic_t *active_events = &armpmu->active_events;
- if (event->cpu != -1 &&
- !cpumask_test_cpu(event->cpu, &armpmu->valid_cpus))
- return -ENOENT;
-
/* does not support taken branch sampling */
if (has_branch_stack(event))
return -EOPNOTSUPP;
return;
}
- perf_callchain_store(entry, regs->ARM_pc);
tail = (struct frame_tail __user *)regs->ARM_fp - 1;
while ((entry->nr < PERF_MAX_STACK_DEPTH) &&