- 根目录:
- arch
- arm
- kernel
- etm.c
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/sysrq.h>
#include <linux/device.h>
#include <linux/clk.h>
#include <linux/amba/bus.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/miscdevice.h>
#include <linux/vmalloc.h>
#include <linux/mutex.h>
#include <linux/module.h>
#include <asm/hardware/coresight.h>
#include <asm/sections.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alexander Shishkin");
struct tracectx {
unsigned int etb_bufsz;
void __iomem *etb_regs;
void __iomem **etm_regs;
int etm_regs_count;
unsigned long flags;
int ncmppairs;
int etm_portsz;
int etm_contextid_size;
u32 etb_fc;
unsigned long range_start;
unsigned long range_end;
unsigned long data_range_start;
unsigned long data_range_end;
bool dump_initial_etb;
struct device *dev;
struct clk *emu_clk;
struct mutex mutex;
};
static struct tracectx tracer = {
.range_start = (unsigned long)_stext,
.range_end = (unsigned long)_etext,
};
static inline bool trace_isrunning(struct tracectx *t)
{
return !!(t->flags & TRACER_RUNNING);
}
static int etm_setup_address_range(struct tracectx *t, int id, int n,
unsigned long start, unsigned long end, int exclude, int data)
{
u32 flags = ETMAAT_ARM | ETMAAT_IGNCONTEXTID | ETMAAT_IGNSECURITY |
ETMAAT_NOVALCMP;
if (n < 1 || n > t->ncmppairs)
return -EINVAL;
n--;
if (data)
flags |= ETMAAT_DLOADSTORE;
else
flags |= ETMAAT_IEXEC;
etm_writel(t, id, flags, ETMR_COMP_ACC_TYPE(n * 2));
etm_writel(t, id, start, ETMR_COMP_VAL(n * 2));
etm_writel(t, id, flags, ETMR_COMP_ACC_TYPE(n * 2 + 1));
etm_writel(t, id, end, ETMR_COMP_VAL(n * 2 + 1));
if (data) {
flags = exclude ? ETMVDC3_EXCLONLY : 0;
if (exclude)
n += 8;
etm_writel(t, id, flags | BIT(n), ETMR_VIEWDATACTRL3);
} else {
flags = exclude ? ETMTE_INCLEXCL : 0;
etm_writel(t, id, flags | (1 << n), ETMR_TRACEENCTRL);
}
return 0;
}
static int trace_start_etm(struct tracectx *t, int id)
{
u32 v;
unsigned long timeout = TRACER_TIMEOUT;
v = ETMCTRL_OPTS | ETMCTRL_PROGRAM | ETMCTRL_PORTSIZE(t->etm_portsz);
v |= ETMCTRL_CONTEXTIDSIZE(t->etm_contextid_size);
if (t->flags & TRACER_CYCLE_ACC)
v |= ETMCTRL_CYCLEACCURATE;
if (t->flags & TRACER_BRANCHOUTPUT)
v |= ETMCTRL_BRANCH_OUTPUT;
if (t->flags & TRACER_TRACE_DATA)
v |= ETMCTRL_DATA_DO_ADDR;
if (t->flags & TRACER_TIMESTAMP)
v |= ETMCTRL_TIMESTAMP_EN;
if (t->flags & TRACER_RETURN_STACK)
v |= ETMCTRL_RETURN_STACK_EN;
etm_unlock(t, id);
etm_writel(t, id, v, ETMR_CTRL);
while (!(etm_readl(t, id, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout)
;
if (!timeout) {
dev_dbg(t->dev, "Waiting for progbit to assert timed out\n");
etm_lock(t, id);
return -EFAULT;
}
if (t->range_start || t->range_end)
etm_setup_address_range(t, id, 1,
t->range_start, t->range_end, 0, 0);
else
etm_writel(t, id, ETMTE_INCLEXCL, ETMR_TRACEENCTRL);
etm_writel(t, id, 0, ETMR_TRACEENCTRL2);
etm_writel(t, id, 0, ETMR_TRACESSCTRL);
etm_writel(t, id, 0x6f, ETMR_TRACEENEVT);
etm_writel(t, id, 0, ETMR_VIEWDATACTRL1);
etm_writel(t, id, 0, ETMR_VIEWDATACTRL2);
if (t->data_range_start || t->data_range_end)
etm_setup_address_range(t, id, 2, t->data_range_start,
t->data_range_end, 0, 1);
else
etm_writel(t, id, ETMVDC3_EXCLONLY, ETMR_VIEWDATACTRL3);
etm_writel(t, id, 0x6f, ETMR_VIEWDATAEVT);
v &= ~ETMCTRL_PROGRAM;
v |= ETMCTRL_PORTSEL;
etm_writel(t, id, v, ETMR_CTRL);
timeout = TRACER_TIMEOUT;
while (etm_readl(t, id, ETMR_CTRL) & ETMCTRL_PROGRAM && --timeout)
;
if (!timeout) {
dev_dbg(t->dev, "Waiting for progbit to deassert timed out\n");
etm_lock(t, id);
return -EFAULT;
}
etm_lock(t, id);
return 0;
}
static int trace_start(struct tracectx *t)
{
int ret;
int id;
u32 etb_fc = t->etb_fc;
etb_unlock(t);
t->dump_initial_etb = false;
etb_writel(t, 0, ETBR_WRITEADDR);
etb_writel(t, etb_fc, ETBR_FORMATTERCTRL);
etb_writel(t, 1, ETBR_CTRL);
etb_lock(t);
for (id = 0; id < t->etm_regs_count; id++) {
ret = trace_start_etm(t, id);
if (ret)
return ret;
}
t->flags |= TRACER_RUNNING;
return 0;
}
static int trace_stop_etm(struct tracectx *t, int id)
{
unsigned long timeout = TRACER_TIMEOUT;
etm_unlock(t, id);
etm_writel(t, id, 0x440, ETMR_CTRL);
while (!(etm_readl(t, id, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout)
;
if (!timeout) {
dev_err(t->dev,
"etm%d: Waiting for progbit to assert timed out\n",
id);
etm_lock(t, id);
return -EFAULT;
}
etm_lock(t, id);
return 0;
}
static int trace_power_down_etm(struct tracectx *t, int id)
{
unsigned long timeout = TRACER_TIMEOUT;
etm_unlock(t, id);
while (!(etm_readl(t, id, ETMR_STATUS) & ETMST_PROGBIT) && --timeout)
;
if (!timeout) {
dev_err(t->dev, "etm%d: Waiting for status progbit to assert timed out\n",
id);
etm_lock(t, id);
return -EFAULT;
}
etm_writel(t, id, 0x441, ETMR_CTRL);
etm_lock(t, id);
return 0;
}
static int trace_stop(struct tracectx *t)
{
int id;
unsigned long timeout = TRACER_TIMEOUT;
u32 etb_fc = t->etb_fc;
for (id = 0; id < t->etm_regs_count; id++)
trace_stop_etm(t, id);
for (id = 0; id < t->etm_regs_count; id++)
trace_power_down_etm(t, id);
etb_unlock(t);
if (etb_fc) {
etb_fc |= ETBFF_STOPFL;
etb_writel(t, t->etb_fc, ETBR_FORMATTERCTRL);
}
etb_writel(t, etb_fc | ETBFF_MANUAL_FLUSH, ETBR_FORMATTERCTRL);
timeout = TRACER_TIMEOUT;
while (etb_readl(t, ETBR_FORMATTERCTRL) &
ETBFF_MANUAL_FLUSH && --timeout)
;
if (!timeout) {
dev_dbg(t->dev, "Waiting for formatter flush to commence "
"timed out\n");
etb_lock(t);
return -EFAULT;
}
etb_writel(t, 0, ETBR_CTRL);
etb_lock(t);
t->flags &= ~TRACER_RUNNING;
return 0;
}
static int etb_getdatalen(struct tracectx *t)
{
u32 v;
int wp;
v = etb_readl(t, ETBR_STATUS);
if (v & 1)
return t->etb_bufsz;
wp = etb_readl(t, ETBR_WRITEADDR);
return wp;
}
static void etm_dump(void)
{
struct tracectx *t = &tracer;
u32 first = 0;
int length;
if (!t->etb_regs) {
printk(KERN_INFO "No tracing hardware found\n");
return;
}
if (trace_isrunning(t))
trace_stop(t);
etb_unlock(t);
length = etb_getdatalen(t);
if (length == t->etb_bufsz)
first = etb_readl(t, ETBR_WRITEADDR);
etb_writel(t, first, ETBR_READADDR);
printk(KERN_INFO "Trace buffer contents length: %d\n", length);
printk(KERN_INFO "--- ETB buffer begin ---\n");
for (; length; length--)
printk("%08x", cpu_to_be32(etb_readl(t, ETBR_READMEM)));
printk(KERN_INFO "\n--- ETB buffer end ---\n");
etb_lock(t);
}
static void sysrq_etm_dump(int key)
{
if (!mutex_trylock(&tracer.mutex)) {
printk(KERN_INFO "Tracing hardware busy\n");
return;
}
dev_dbg(tracer.dev, "Dumping ETB buffer\n");
etm_dump();
mutex_unlock(&tracer.mutex);
}
static struct sysrq_key_op sysrq_etm_op = {
.handler = sysrq_etm_dump,
.help_msg = "etm-buffer-dump(v)",
.action_msg = "etm",
};
static int etb_open(struct inode *inode, struct file *file)
{
if (!tracer.etb_regs)
return -ENODEV;
file->private_data = &tracer;
return nonseekable_open(inode, file);
}
static ssize_t etb_read(struct file *file, char __user *data,
size_t len, loff_t *ppos)
{
int total, i;
long length;
struct tracectx *t = file->private_data;
u32 first = 0;
u32 *buf;
int wpos;
int skip;
long wlength;
loff_t pos = *ppos;
mutex_lock(&t->mutex);
if (trace_isrunning(t)) {
length = 0;
goto out;
}
etb_unlock(t);
total = etb_getdatalen(t);
if (total == 0 && t->dump_initial_etb)
total = t->etb_bufsz;
if (total == t->etb_bufsz)
first = etb_readl(t, ETBR_WRITEADDR);
if (pos > total * 4) {
skip = 0;
wpos = total;
} else {
skip = (int)pos % 4;
wpos = (int)pos / 4;
}
total -= wpos;
first = (first + wpos) % t->etb_bufsz;
etb_writel(t, first, ETBR_READADDR);
wlength = min(total, DIV_ROUND_UP(skip + (int)len, 4));
length = min(total * 4 - skip, (int)len);
buf = vmalloc(wlength * 4);
dev_dbg(t->dev, "ETB read %ld bytes to %lld from %ld words at %d\n",
length, pos, wlength, first);
dev_dbg(t->dev, "ETB buffer length: %d\n", total + wpos);
dev_dbg(t->dev, "ETB status reg: %x\n", etb_readl(t, ETBR_STATUS));
for (i = 0; i < wlength; i++)
buf[i] = etb_readl(t, ETBR_READMEM);
etb_lock(t);
length -= copy_to_user(data, (u8 *)buf + skip, length);
vfree(buf);
*ppos = pos + length;
out:
mutex_unlock(&t->mutex);
return length;
}
static int etb_release(struct inode *inode, struct file *file)
{
return 0;
}
static const struct file_operations etb_fops = {
.owner = THIS_MODULE,
.read = etb_read,
.open = etb_open,
.release = etb_release,
.llseek = no_llseek,
};
static struct miscdevice etb_miscdev = {
.name = "tracebuf",
.minor = 0,
.fops = &etb_fops,
};
static int etb_probe(struct amba_device *dev, const struct amba_id *id)
{
struct tracectx *t = &tracer;
int ret = 0;
ret = amba_request_regions(dev, NULL);
if (ret)
goto out;
mutex_lock(&t->mutex);
t->etb_regs = ioremap_nocache(dev->res.start, resource_size(&dev->res));
if (!t->etb_regs) {
ret = -ENOMEM;
goto out_release;
}
t->dev = &dev->dev;
t->dump_initial_etb = true;
amba_set_drvdata(dev, t);
etb_unlock(t);
t->etb_bufsz = etb_readl(t, ETBR_DEPTH);
dev_dbg(&dev->dev, "Size: %x\n", t->etb_bufsz);
etb_writel(t, 0, ETBR_CTRL);
etb_writel(t, 0x1000, ETBR_FORMATTERCTRL);
etb_lock(t);
mutex_unlock(&t->mutex);
etb_miscdev.parent = &dev->dev;
ret = misc_register(&etb_miscdev);
if (ret)
goto out_unmap;
t->emu_clk = clk_get(&dev->dev, "emu_src_ck");
if (IS_ERR(t->emu_clk))
dev_dbg(&dev->dev, "Failed to obtain emu_src_ck.\n");
else
clk_enable(t->emu_clk);
dev_dbg(&dev->dev, "ETB AMBA driver initialized.\n");
out:
return ret;
out_unmap:
mutex_lock(&t->mutex);
iounmap(t->etb_regs);
t->etb_regs = NULL;
out_release:
mutex_unlock(&t->mutex);
amba_release_regions(dev);
return ret;
}
static int etb_remove(struct amba_device *dev)
{
struct tracectx *t = amba_get_drvdata(dev);
iounmap(t->etb_regs);
t->etb_regs = NULL;
if (!IS_ERR(t->emu_clk)) {
clk_disable(t->emu_clk);
clk_put(t->emu_clk);
}
amba_release_regions(dev);
return 0;
}
static struct amba_id etb_ids[] = {
{
.id = 0x0003b907,
.mask = 0x0007ffff,
},
{ 0, 0 },
};
static struct amba_driver etb_driver = {
.drv = {
.name = "etb",
.owner = THIS_MODULE,
},
.probe = etb_probe,
.remove = etb_remove,
.id_table = etb_ids,
};
static ssize_t trace_running_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
return sprintf(buf, "%x\n", trace_isrunning(&tracer));
}
static ssize_t trace_running_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t n)
{
unsigned int value;
int ret;
if (sscanf(buf, "%u", &value) != 1)
return -EINVAL;
mutex_lock(&tracer.mutex);
if (!tracer.etb_regs)
ret = -ENODEV;
else
ret = value ? trace_start(&tracer) : trace_stop(&tracer);
mutex_unlock(&tracer.mutex);
return ret ? : n;
}
static struct kobj_attribute trace_running_attr =
__ATTR(trace_running, 0644, trace_running_show, trace_running_store);
static ssize_t trace_info_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
u32 etb_wa, etb_ra, etb_st, etb_fc, etm_ctrl, etm_st;
int datalen;
int id;
int ret;
mutex_lock(&tracer.mutex);
if (tracer.etb_regs) {
etb_unlock(&tracer);
datalen = etb_getdatalen(&tracer);
etb_wa = etb_readl(&tracer, ETBR_WRITEADDR);
etb_ra = etb_readl(&tracer, ETBR_READADDR);
etb_st = etb_readl(&tracer, ETBR_STATUS);
etb_fc = etb_readl(&tracer, ETBR_FORMATTERCTRL);
etb_lock(&tracer);
} else {
etb_wa = etb_ra = etb_st = etb_fc = ~0;
datalen = -1;
}
ret = sprintf(buf, "Trace buffer len: %d\nComparator pairs: %d\n"
"ETBR_WRITEADDR:\t%08x\n"
"ETBR_READADDR:\t%08x\n"
"ETBR_STATUS:\t%08x\n"
"ETBR_FORMATTERCTRL:\t%08x\n",
datalen,
tracer.ncmppairs,
etb_wa,
etb_ra,
etb_st,
etb_fc
);
for (id = 0; id < tracer.etm_regs_count; id++) {
etm_unlock(&tracer, id);
etm_ctrl = etm_readl(&tracer, id, ETMR_CTRL);
etm_st = etm_readl(&tracer, id, ETMR_STATUS);
etm_lock(&tracer, id);
ret += sprintf(buf + ret, "ETMR_CTRL:\t%08x\n"
"ETMR_STATUS:\t%08x\n",
etm_ctrl,
etm_st
);
}
mutex_unlock(&tracer.mutex);
return ret;
}
static struct kobj_attribute trace_info_attr =
__ATTR(trace_info, 0444, trace_info_show, NULL);
static ssize_t trace_mode_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
return sprintf(buf, "%d %d\n",
!!(tracer.flags & TRACER_CYCLE_ACC),
tracer.etm_portsz);
}
static ssize_t trace_mode_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t n)
{
unsigned int cycacc, portsz;
if (sscanf(buf, "%u %u", &cycacc, &portsz) != 2)
return -EINVAL;
mutex_lock(&tracer.mutex);
if (cycacc)
tracer.flags |= TRACER_CYCLE_ACC;
else
tracer.flags &= ~TRACER_CYCLE_ACC;
tracer.etm_portsz = portsz & 0x0f;
mutex_unlock(&tracer.mutex);
return n;
}
static struct kobj_attribute trace_mode_attr =
__ATTR(trace_mode, 0644, trace_mode_show, trace_mode_store);
static ssize_t trace_contextid_size_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
return sprintf(buf, "%d\n", (1 << tracer.etm_contextid_size) >> 1);
}
static ssize_t trace_contextid_size_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t n)
{
unsigned int contextid_size;
if (sscanf(buf, "%u", &contextid_size) != 1)
return -EINVAL;
if (contextid_size == 3 || contextid_size > 4)
return -EINVAL;
mutex_lock(&tracer.mutex);
tracer.etm_contextid_size = fls(contextid_size);
mutex_unlock(&tracer.mutex);
return n;
}
static struct kobj_attribute trace_contextid_size_attr =
__ATTR(trace_contextid_size, 0644,
trace_contextid_size_show, trace_contextid_size_store);
static ssize_t trace_branch_output_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
return sprintf(buf, "%d\n", !!(tracer.flags & TRACER_BRANCHOUTPUT));
}
static ssize_t trace_branch_output_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t n)
{
unsigned int branch_output;
if (sscanf(buf, "%u", &branch_output) != 1)
return -EINVAL;
mutex_lock(&tracer.mutex);
if (branch_output) {
tracer.flags |= TRACER_BRANCHOUTPUT;
tracer.flags &= ~TRACER_RETURN_STACK;
} else {
tracer.flags &= ~TRACER_BRANCHOUTPUT;
}
mutex_unlock(&tracer.mutex);
return n;
}
static struct kobj_attribute trace_branch_output_attr =
__ATTR(trace_branch_output, 0644,
trace_branch_output_show, trace_branch_output_store);
static ssize_t trace_return_stack_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
return sprintf(buf, "%d\n", !!(tracer.flags & TRACER_RETURN_STACK));
}
static ssize_t trace_return_stack_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t n)
{
unsigned int return_stack;
if (sscanf(buf, "%u", &return_stack) != 1)
return -EINVAL;
mutex_lock(&tracer.mutex);
if (return_stack) {
tracer.flags |= TRACER_RETURN_STACK;
tracer.flags &= ~TRACER_BRANCHOUTPUT;
} else {
tracer.flags &= ~TRACER_RETURN_STACK;
}
mutex_unlock(&tracer.mutex);
return n;
}
static struct kobj_attribute trace_return_stack_attr =
__ATTR(trace_return_stack, 0644,
trace_return_stack_show, trace_return_stack_store);
static ssize_t trace_timestamp_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
return sprintf(buf, "%d\n", !!(tracer.flags & TRACER_TIMESTAMP));
}
static ssize_t trace_timestamp_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t n)
{
unsigned int timestamp;
if (sscanf(buf, "%u", ×tamp) != 1)
return -EINVAL;
mutex_lock(&tracer.mutex);
if (timestamp)
tracer.flags |= TRACER_TIMESTAMP;
else
tracer.flags &= ~TRACER_TIMESTAMP;
mutex_unlock(&tracer.mutex);
return n;
}
static struct kobj_attribute trace_timestamp_attr =
__ATTR(trace_timestamp, 0644,
trace_timestamp_show, trace_timestamp_store);
static ssize_t trace_range_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
return sprintf(buf, "%08lx %08lx\n",
tracer.range_start, tracer.range_end);
}
static ssize_t trace_range_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t n)
{
unsigned long range_start, range_end;
if (sscanf(buf, "%lx %lx", &range_start, &range_end) != 2)
return -EINVAL;
mutex_lock(&tracer.mutex);
tracer.range_start = range_start;
tracer.range_end = range_end;
mutex_unlock(&tracer.mutex);
return n;
}
static struct kobj_attribute trace_range_attr =
__ATTR(trace_range, 0644, trace_range_show, trace_range_store);
static ssize_t trace_data_range_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
unsigned long range_start;
u64 range_end;
mutex_lock(&tracer.mutex);
range_start = tracer.data_range_start;
range_end = tracer.data_range_end;
if (!range_end && (tracer.flags & TRACER_TRACE_DATA))
range_end = 0x100000000ULL;
mutex_unlock(&tracer.mutex);
return sprintf(buf, "%08lx %08llx\n", range_start, range_end);
}
static ssize_t trace_data_range_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t n)
{
unsigned long range_start;
u64 range_end;
if (sscanf(buf, "%lx %llx", &range_start, &range_end) != 2)
return -EINVAL;
mutex_lock(&tracer.mutex);
tracer.data_range_start = range_start;
tracer.data_range_end = (unsigned long)range_end;
if (range_end)
tracer.flags |= TRACER_TRACE_DATA;
else
tracer.flags &= ~TRACER_TRACE_DATA;
mutex_unlock(&tracer.mutex);
return n;
}
static struct kobj_attribute trace_data_range_attr =
__ATTR(trace_data_range, 0644,
trace_data_range_show, trace_data_range_store);
static int etm_probe(struct amba_device *dev, const struct amba_id *id)
{
struct tracectx *t = &tracer;
int ret = 0;
void __iomem **new_regs;
int new_count;
u32 etmccr;
u32 etmidr;
u32 etmccer = 0;
u8 etm_version = 0;
mutex_lock(&t->mutex);
new_count = t->etm_regs_count + 1;
new_regs = krealloc(t->etm_regs,
sizeof(t->etm_regs[0]) * new_count, GFP_KERNEL);
if (!new_regs) {
dev_dbg(&dev->dev, "Failed to allocate ETM register array\n");
ret = -ENOMEM;
goto out;
}
t->etm_regs = new_regs;
ret = amba_request_regions(dev, NULL);
if (ret)
goto out;
t->etm_regs[t->etm_regs_count] =
ioremap_nocache(dev->res.start, resource_size(&dev->res));
if (!t->etm_regs[t->etm_regs_count]) {
ret = -ENOMEM;
goto out_release;
}
amba_set_drvdata(dev, t->etm_regs[t->etm_regs_count]);
t->flags = TRACER_CYCLE_ACC | TRACER_TRACE_DATA | TRACER_BRANCHOUTPUT;
t->etm_portsz = 1;
t->etm_contextid_size = 3;
etm_unlock(t, t->etm_regs_count);
(void)etm_readl(t, t->etm_regs_count, ETMMR_PDSR);
(void)etm_readl(&tracer, t->etm_regs_count, ETMMR_OSSRR);
etmccr = etm_readl(t, t->etm_regs_count, ETMR_CONFCODE);
t->ncmppairs = etmccr & 0xf;
if (etmccr & ETMCCR_ETMIDR_PRESENT) {
etmidr = etm_readl(t, t->etm_regs_count, ETMR_ID);
etm_version = ETMIDR_VERSION(etmidr);
if (etm_version >= ETMIDR_VERSION_3_1)
etmccer = etm_readl(t, t->etm_regs_count, ETMR_CCE);
}
etm_writel(t, t->etm_regs_count, 0x441, ETMR_CTRL);
etm_writel(t, t->etm_regs_count, new_count, ETMR_TRACEIDR);
etm_lock(t, t->etm_regs_count);
ret = sysfs_create_file(&dev->dev.kobj,
&trace_running_attr.attr);
if (ret)
goto out_unmap;
ret = sysfs_create_file(&dev->dev.kobj, &trace_info_attr.attr);
if (ret)
dev_dbg(&dev->dev, "Failed to create trace_info in sysfs\n");
ret = sysfs_create_file(&dev->dev.kobj, &trace_mode_attr.attr);
if (ret)
dev_dbg(&dev->dev, "Failed to create trace_mode in sysfs\n");
ret = sysfs_create_file(&dev->dev.kobj,
&trace_contextid_size_attr.attr);
if (ret)
dev_dbg(&dev->dev,
"Failed to create trace_contextid_size in sysfs\n");
ret = sysfs_create_file(&dev->dev.kobj,
&trace_branch_output_attr.attr);
if (ret)
dev_dbg(&dev->dev,
"Failed to create trace_branch_output in sysfs\n");
if (etmccer & ETMCCER_RETURN_STACK_IMPLEMENTED) {
ret = sysfs_create_file(&dev->dev.kobj,
&trace_return_stack_attr.attr);
if (ret)
dev_dbg(&dev->dev,
"Failed to create trace_return_stack in sysfs\n");
}
if (etmccer & ETMCCER_TIMESTAMPING_IMPLEMENTED) {
ret = sysfs_create_file(&dev->dev.kobj,
&trace_timestamp_attr.attr);
if (ret)
dev_dbg(&dev->dev,
"Failed to create trace_timestamp in sysfs\n");
}
ret = sysfs_create_file(&dev->dev.kobj, &trace_range_attr.attr);
if (ret)
dev_dbg(&dev->dev, "Failed to create trace_range in sysfs\n");
if (etm_version < ETMIDR_VERSION_PFT_1_0) {
ret = sysfs_create_file(&dev->dev.kobj,
&trace_data_range_attr.attr);
if (ret)
dev_dbg(&dev->dev,
"Failed to create trace_data_range in sysfs\n");
} else {
tracer.flags &= ~TRACER_TRACE_DATA;
}
dev_dbg(&dev->dev, "ETM AMBA driver initialized.\n");
if (new_count > 1)
t->etb_fc = ETBFF_ENFCONT | ETBFF_ENFTC;
t->etm_regs_count = new_count;
out:
mutex_unlock(&t->mutex);
return ret;
out_unmap:
iounmap(t->etm_regs[t->etm_regs_count]);
out_release:
amba_release_regions(dev);
mutex_unlock(&t->mutex);
return ret;
}
static int etm_remove(struct amba_device *dev)
{
int i;
struct tracectx *t = &tracer;
void __iomem *etm_regs = amba_get_drvdata(dev);
sysfs_remove_file(&dev->dev.kobj, &trace_running_attr.attr);
sysfs_remove_file(&dev->dev.kobj, &trace_info_attr.attr);
sysfs_remove_file(&dev->dev.kobj, &trace_mode_attr.attr);
sysfs_remove_file(&dev->dev.kobj, &trace_range_attr.attr);
sysfs_remove_file(&dev->dev.kobj, &trace_data_range_attr.attr);
mutex_lock(&t->mutex);
for (i = 0; i < t->etm_regs_count; i++)
if (t->etm_regs[i] == etm_regs)
break;
for (; i < t->etm_regs_count - 1; i++)
t->etm_regs[i] = t->etm_regs[i + 1];
t->etm_regs_count--;
if (!t->etm_regs_count) {
kfree(t->etm_regs);
t->etm_regs = NULL;
}
mutex_unlock(&t->mutex);
iounmap(etm_regs);
amba_release_regions(dev);
return 0;
}
static struct amba_id etm_ids[] = {
{
.id = 0x0003b921,
.mask = 0x0007ffff,
},
{
.id = 0x0003b950,
.mask = 0x0007ffff,
},
{ 0, 0 },
};
static struct amba_driver etm_driver = {
.drv = {
.name = "etm",
.owner = THIS_MODULE,
},
.probe = etm_probe,
.remove = etm_remove,
.id_table = etm_ids,
};
static int __init etm_init(void)
{
int retval;
mutex_init(&tracer.mutex);
retval = amba_driver_register(&etb_driver);
if (retval) {
printk(KERN_ERR "Failed to register etb\n");
return retval;
}
retval = amba_driver_register(&etm_driver);
if (retval) {
amba_driver_unregister(&etb_driver);
printk(KERN_ERR "Failed to probe etm\n");
return retval;
}
(void)register_sysrq_key('v', &sysrq_etm_op);
return 0;
}
device_initcall(etm_init);
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
- 11
- 12
- 13
- 14
- 15
- 16
- 17
- 18
- 19
- 20
- 21
- 22
- 23
- 24
- 25
- 26
- 27
- 28
- 29
- 30
- 31
- 32
- 33
- 34
- 35
- 36
- 37
- 38
- 39
- 40
- 41
- 42
- 43
- 44
- 45
- 46
- 47
- 48
- 49
- 50
- 51
- 52
- 53
- 54
- 55
- 56
- 57
- 58
- 59
- 60
- 61
- 62
- 63
- 64
- 65
- 66
- 67
- 68
- 69
- 70
- 71
- 72
- 73
- 74
- 75
- 76
- 77
- 78
- 79
- 80
- 81
- 82
- 83
- 84
- 85
- 86
- 87
- 88
- 89
- 90
- 91
- 92
- 93
- 94
- 95
- 96
- 97
- 98
- 99
- 100
- 101
- 102
- 103
- 104
- 105
- 106
- 107
- 108
- 109
- 110
- 111
- 112
- 113
- 114
- 115
- 116
- 117
- 118
- 119
- 120
- 121
- 122
- 123
- 124
- 125
- 126
- 127
- 128
- 129
- 130
- 131
- 132
- 133
- 134
- 135
- 136
- 137
- 138
- 139
- 140
- 141
- 142
- 143
- 144
- 145
- 146
- 147
- 148
- 149
- 150
- 151
- 152
- 153
- 154
- 155
- 156
- 157
- 158
- 159
- 160
- 161
- 162
- 163
- 164
- 165
- 166
- 167
- 168
- 169
- 170
- 171
- 172
- 173
- 174
- 175
- 176
- 177
- 178
- 179
- 180
- 181
- 182
- 183
- 184
- 185
- 186
- 187
- 188
- 189
- 190
- 191
- 192
- 193
- 194
- 195
- 196
- 197
- 198
- 199
- 200
- 201
- 202
- 203
- 204
- 205
- 206
- 207
- 208
- 209
- 210
- 211
- 212
- 213
- 214
- 215
- 216
- 217
- 218
- 219
- 220
- 221
- 222
- 223
- 224
- 225
- 226
- 227
- 228
- 229
- 230
- 231
- 232
- 233
- 234
- 235
- 236
- 237
- 238
- 239
- 240
- 241
- 242
- 243
- 244
- 245
- 246
- 247
- 248
- 249
- 250
- 251
- 252
- 253
- 254
- 255
- 256
- 257
- 258
- 259
- 260
- 261
- 262
- 263
- 264
- 265
- 266
- 267
- 268
- 269
- 270
- 271
- 272
- 273
- 274
- 275
- 276
- 277
- 278
- 279
- 280
- 281
- 282
- 283
- 284
- 285
- 286
- 287
- 288
- 289
- 290
- 291
- 292
- 293
- 294
- 295
- 296
- 297
- 298
- 299
- 300
- 301
- 302
- 303
- 304
- 305
- 306
- 307
- 308
- 309
- 310
- 311
- 312
- 313
- 314
- 315
- 316
- 317
- 318
- 319
- 320
- 321
- 322
- 323
- 324
- 325
- 326
- 327
- 328
- 329
- 330
- 331
- 332
- 333
- 334
- 335
- 336
- 337
- 338
- 339
- 340
- 341
- 342
- 343
- 344
- 345
- 346
- 347
- 348
- 349
- 350
- 351
- 352
- 353
- 354
- 355
- 356
- 357
- 358
- 359
- 360
- 361
- 362
- 363
- 364
- 365
- 366
- 367
- 368
- 369
- 370
- 371
- 372
- 373
- 374
- 375
- 376
- 377
- 378
- 379
- 380
- 381
- 382
- 383
- 384
- 385
- 386
- 387
- 388
- 389
- 390
- 391
- 392
- 393
- 394
- 395
- 396
- 397
- 398
- 399
- 400
- 401
- 402
- 403
- 404
- 405
- 406
- 407
- 408
- 409
- 410
- 411
- 412
- 413
- 414
- 415
- 416
- 417
- 418
- 419
- 420
- 421
- 422
- 423
- 424
- 425
- 426
- 427
- 428
- 429
- 430
- 431
- 432
- 433
- 434
- 435
- 436
- 437
- 438
- 439
- 440
- 441
- 442
- 443
- 444
- 445
- 446
- 447
- 448
- 449
- 450
- 451
- 452
- 453
- 454
- 455
- 456
- 457
- 458
- 459
- 460
- 461
- 462
- 463
- 464
- 465
- 466
- 467
- 468
- 469
- 470
- 471
- 472
- 473
- 474
- 475
- 476
- 477
- 478
- 479
- 480
- 481
- 482
- 483
- 484
- 485
- 486
- 487
- 488
- 489
- 490
- 491
- 492
- 493
- 494
- 495
- 496
- 497
- 498
- 499
- 500
- 501
- 502
- 503
- 504
- 505
- 506
- 507
- 508
- 509
- 510
- 511
- 512
- 513
- 514
- 515
- 516
- 517
- 518
- 519
- 520
- 521
- 522
- 523
- 524
- 525
- 526
- 527
- 528
- 529
- 530
- 531
- 532
- 533
- 534
- 535
- 536
- 537
- 538
- 539
- 540
- 541
- 542
- 543
- 544
- 545
- 546
- 547
- 548
- 549
- 550
- 551
- 552
- 553
- 554
- 555
- 556
- 557
- 558
- 559
- 560
- 561
- 562
- 563
- 564
- 565
- 566
- 567
- 568
- 569
- 570
- 571
- 572
- 573
- 574
- 575
- 576
- 577
- 578
- 579
- 580
- 581
- 582
- 583
- 584
- 585
- 586
- 587
- 588
- 589
- 590
- 591
- 592
- 593
- 594
- 595
- 596
- 597
- 598
- 599
- 600
- 601
- 602
- 603
- 604
- 605
- 606
- 607
- 608
- 609
- 610
- 611
- 612
- 613
- 614
- 615
- 616
- 617
- 618
- 619
- 620
- 621
- 622
- 623
- 624
- 625
- 626
- 627
- 628
- 629
- 630
- 631
- 632
- 633
- 634
- 635
- 636
- 637
- 638
- 639
- 640
- 641
- 642
- 643
- 644
- 645
- 646
- 647
- 648
- 649
- 650
- 651
- 652
- 653
- 654
- 655
- 656
- 657
- 658
- 659
- 660
- 661
- 662
- 663
- 664
- 665
- 666
- 667
- 668
- 669
- 670
- 671
- 672
- 673
- 674
- 675
- 676
- 677
- 678
- 679
- 680
- 681
- 682
- 683
- 684
- 685
- 686
- 687
- 688
- 689
- 690
- 691
- 692
- 693
- 694
- 695
- 696
- 697
- 698
- 699
- 700
- 701
- 702
- 703
- 704
- 705
- 706
- 707
- 708
- 709
- 710
- 711
- 712
- 713
- 714
- 715
- 716
- 717
- 718
- 719
- 720
- 721
- 722
- 723
- 724
- 725
- 726
- 727
- 728
- 729
- 730
- 731
- 732
- 733
- 734
- 735
- 736
- 737
- 738
- 739
- 740
- 741
- 742
- 743
- 744
- 745
- 746
- 747
- 748
- 749
- 750
- 751
- 752
- 753
- 754
- 755
- 756
- 757
- 758
- 759
- 760
- 761
- 762
- 763
- 764
- 765
- 766
- 767
- 768
- 769
- 770
- 771
- 772
- 773
- 774
- 775
- 776
- 777
- 778
- 779
- 780
- 781
- 782
- 783
- 784
- 785
- 786
- 787
- 788
- 789
- 790
- 791
- 792
- 793
- 794
- 795
- 796
- 797
- 798
- 799
- 800
- 801
- 802
- 803
- 804
- 805
- 806
- 807
- 808
- 809
- 810
- 811
- 812
- 813
- 814
- 815
- 816
- 817
- 818
- 819
- 820
- 821
- 822
- 823
- 824
- 825
- 826
- 827
- 828
- 829
- 830
- 831
- 832
- 833
- 834
- 835
- 836
- 837
- 838
- 839
- 840
- 841
- 842
- 843
- 844
- 845
- 846
- 847
- 848
- 849
- 850
- 851
- 852
- 853
- 854
- 855
- 856
- 857
- 858
- 859
- 860
- 861
- 862
- 863
- 864
- 865
- 866
- 867
- 868
- 869
- 870
- 871
- 872
- 873
- 874
- 875
- 876
- 877
- 878
- 879
- 880
- 881
- 882
- 883
- 884
- 885
- 886
- 887
- 888
- 889
- 890
- 891
- 892
- 893
- 894
- 895
- 896
- 897
- 898
- 899
- 900
- 901
- 902
- 903
- 904
- 905
- 906
- 907
- 908
- 909
- 910
- 911
- 912
- 913
- 914
- 915
- 916
- 917
- 918
- 919
- 920
- 921
- 922
- 923
- 924
- 925
- 926
- 927
- 928
- 929
- 930
- 931
- 932
- 933
- 934
- 935
- 936
- 937
- 938
- 939
- 940
- 941
- 942
- 943
- 944
- 945
- 946
- 947
- 948
- 949
- 950
- 951
- 952
- 953
- 954
- 955
- 956
- 957
- 958
- 959
- 960
- 961
- 962
- 963
- 964
- 965
- 966
- 967
- 968
- 969
- 970
- 971
- 972
- 973
- 974
- 975
- 976
- 977
- 978
- 979
- 980
- 981
- 982
- 983
- 984
- 985
- 986
- 987
- 988
- 989
- 990
- 991
- 992
- 993
- 994
- 995
- 996
- 997
- 998
- 999
- 1000
- 1001
- 1002
- 1003
- 1004
- 1005
- 1006
- 1007
- 1008
- 1009
- 1010
- 1011
- 1012
- 1013
- 1014
- 1015
- 1016
- 1017
- 1018
- 1019
- 1020
- 1021
- 1022
- 1023
- 1024
- 1025
- 1026
- 1027
- 1028
- 1029
- 1030
- 1031
- 1032
- 1033
- 1034
- 1035
- 1036
- 1037
- 1038
- 1039
- 1040
- 1041
- 1042
- 1043
- 1044
- 1045
- 1046
- 1047
- 1048
- 1049
- 1050
- 1051
- 1052
- 1053
- 1054
- 1055
- 1056
- 1057
- 1058
- 1059
- 1060
- 1061
- 1062
- 1063
- 1064
- 1065
- 1066
- 1067
- 1068
- 1069
- 1070