bool "FIQ Mode Serial Debugger"
select FIQ
select FIQ_GLUE
- select KERNEL_DEBUGGER_CORE
default n
help
The FIQ serial debugger can accept commands even when the
kernel is unresponsive due to being stuck with interrupts
- disabled. Depends on the kernel debugger core in drivers/misc.
+ disabled.
config FIQ_DEBUGGER_NO_SLEEP
#include <linux/interrupt.h>
#include <linux/clk.h>
#include <linux/platform_device.h>
-#include <linux/kernel_debugger.h>
#include <linux/kernel_stat.h>
#include <linux/irq.h>
#include <linux/delay.h>
struct tty_struct *tty;
int tty_open_count;
struct fiq_debugger_ringbuf *tty_rbuf;
+ bool syslog_dumping;
#endif
unsigned int last_irqs[NR_IRQS];
tail = user_backtrace(state, tail);
}
-static bool debug_help(struct fiq_debugger_state *state)
+static void do_ps(struct fiq_debugger_state *state)
+{
+ struct task_struct *g;
+ struct task_struct *p;
+ unsigned task_state;
+ static const char stat_nam[] = "RSDTtZX";
+
+ debug_printf(state, "pid ppid prio task pc\n");
+ read_lock(&tasklist_lock);
+ do_each_thread(g, p) {
+ task_state = p->state ? __ffs(p->state) + 1 : 0;
+ debug_printf(state,
+ "%5d %5d %4d ", p->pid, p->parent->pid, p->prio);
+ debug_printf(state, "%-13.13s %c", p->comm,
+ task_state >= sizeof(stat_nam) ? '?' : stat_nam[task_state]);
+ if (task_state == TASK_RUNNING)
+ debug_printf(state, " running\n");
+ else
+ debug_printf(state, " %08lx\n", thread_saved_pc(p));
+ } while_each_thread(g, p);
+ read_unlock(&tasklist_lock);
+}
+
+#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE
+static void begin_syslog_dump(struct fiq_debugger_state *state)
+{
+ state->syslog_dumping = true;
+}
+
+static void end_syslog_dump(struct fiq_debugger_state *state)
+{
+ state->syslog_dumping = false;
+}
+#else
+extern int do_syslog(int type, char __user *bug, int count);
+static void begin_syslog_dump(struct fiq_debugger_state *state)
+{
+ do_syslog(5 /* clear */, NULL, 0);
+}
+
+static void end_syslog_dump(struct fiq_debugger_state *state)
+{
+ char buf[128];
+ int ret;
+ int idx = 0;
+
+ while (1) {
+ ret = log_buf_copy(buf, idx, sizeof(buf) - 1);
+ if (ret <= 0)
+ break;
+ buf[ret] = 0;
+ debug_printf(state, "%s", buf);
+ idx += ret;
+ }
+}
+#endif
+
+static void do_sysrq(struct fiq_debugger_state *state, char rq)
+{
+ begin_syslog_dump(state);
+ handle_sysrq(rq);
+ end_syslog_dump(state);
+}
+
+/* This function CANNOT be called in FIQ context */
+static void debug_irq_exec(struct fiq_debugger_state *state, char *cmd)
+{
+ if (!strcmp(cmd, "ps"))
+ do_ps(state);
+ if (!strcmp(cmd, "sysrq"))
+ do_sysrq(state, 'h');
+ if (!strncmp(cmd, "sysrq ", 6))
+ do_sysrq(state, cmd[6]);
+}
+
+static void debug_help(struct fiq_debugger_state *state)
{
debug_printf(state, "FIQ Debugger commands:\n"
" pc PC status\n"
" console Switch terminal to console\n"
" cpu Current CPU\n"
" cpu <number> Switch to CPU<number>\n");
- if (!state->debug_busy) {
- strcpy(state->debug_cmd, "help");
- state->debug_busy = 1;
- return true;
- }
-
- return false;
+ debug_printf(state, " ps Process list\n"
+ " sysrq sysrq options\n"
+ " sysrq <param> Execute sysrq with <param>\n");
}
static void take_affinity(void *info)
state->current_cpu = cpu;
}
-static bool debug_exec(struct fiq_debugger_state *state,
+static bool debug_fiq_exec(struct fiq_debugger_state *state,
const char *cmd, unsigned *regs, void *svc_sp)
{
bool signal_helper = false;
if (!strcmp(cmd, "help") || !strcmp(cmd, "?")) {
- signal_helper |= debug_help(state);
+ debug_help(state);
} else if (!strcmp(cmd, "pc")) {
debug_printf(state, " pc %08x cpsr %08x mode %s\n",
regs[15], regs[16], mode_name(regs[16]));
}
#endif
if (state->debug_busy) {
- struct kdbg_ctxt ctxt;
-
- ctxt.printf = debug_printf_nfiq;
- ctxt.cookie = state;
- kernel_debugger(&ctxt, state->debug_cmd);
+ debug_irq_exec(state, state->debug_cmd);
debug_prompt(state);
-
state->debug_busy = 0;
}
}
state->debug_buf[state->debug_count] = 0;
state->debug_count = 0;
signal_helper |=
- debug_exec(state, state->debug_buf,
- regs, svc_sp);
+ debug_fiq_exec(state, state->debug_buf,
+ regs, svc_sp);
} else {
debug_prompt(state);
}
state = container_of(co, struct fiq_debugger_state, console);
- if (!state->console_enable)
+ if (!state->console_enable && !state->syslog_dumping)
return;
debug_uart_enable(state);
local_irq_enable();
}
}
- idle_notifier_call_chain(IDLE_END);
tick_nohz_restart_sched_tick();
+ idle_notifier_call_chain(IDLE_END);
preempt_enable_no_resched();
schedule();
preempt_disable();
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/tick.h>
+#include <linux/time.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/kthread.h>
static spinlock_t down_cpumask_lock;
static struct mutex set_speed_lock;
-/* Go to max speed when CPU load at or above this value. */
-#define DEFAULT_GO_MAXSPEED_LOAD 95
-static unsigned long go_maxspeed_load;
+/* Hi speed to bump to from lo speed when load burst (default max) */
+static u64 hispeed_freq;
+
+/* Go to hi speed when CPU load at or above this value. */
+#define DEFAULT_GO_HISPEED_LOAD 95
+static unsigned long go_hispeed_load;
/*
* The minimum amount of time to spend at a frequency before we can ramp down.
*/
-#define DEFAULT_MIN_SAMPLE_TIME 20000;
+#define DEFAULT_MIN_SAMPLE_TIME 20 * USEC_PER_MSEC
static unsigned long min_sample_time;
/*
* The sample rate of the timer used to increase frequency
*/
-#define DEFAULT_TIMER_RATE 10000;
+#define DEFAULT_TIMER_RATE 20 * USEC_PER_MSEC
static unsigned long timer_rate;
static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
if (load_since_change > cpu_load)
cpu_load = load_since_change;
- if (cpu_load >= go_maxspeed_load)
- new_freq = pcpu->policy->max;
- else
- new_freq = pcpu->policy->max * cpu_load / 100;
+ if (cpu_load >= go_hispeed_load) {
+ if (pcpu->policy->cur == pcpu->policy->min)
+ new_freq = hispeed_freq;
+ else
+ new_freq = pcpu->policy->max * cpu_load / 100;
+ } else {
+ new_freq = pcpu->policy->cur * cpu_load / 100;
+ }
if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
new_freq, CPUFREQ_RELATION_H,
}
}
-static ssize_t show_go_maxspeed_load(struct kobject *kobj,
+static ssize_t show_hispeed_freq(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%llu\n", hispeed_freq);
+}
+
+static ssize_t store_hispeed_freq(struct kobject *kobj,
+ struct attribute *attr, const char *buf,
+ size_t count)
+{
+ int ret;
+ u64 val;
+
+ ret = strict_strtoull(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+ hispeed_freq = val;
+ return count;
+}
+
+static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
+ show_hispeed_freq, store_hispeed_freq);
+
+
+static ssize_t show_go_hispeed_load(struct kobject *kobj,
struct attribute *attr, char *buf)
{
- return sprintf(buf, "%lu\n", go_maxspeed_load);
+ return sprintf(buf, "%lu\n", go_hispeed_load);
}
-static ssize_t store_go_maxspeed_load(struct kobject *kobj,
+static ssize_t store_go_hispeed_load(struct kobject *kobj,
struct attribute *attr, const char *buf, size_t count)
{
int ret;
ret = strict_strtoul(buf, 0, &val);
if (ret < 0)
return ret;
- go_maxspeed_load = val;
+ go_hispeed_load = val;
return count;
}
-static struct global_attr go_maxspeed_load_attr = __ATTR(go_maxspeed_load, 0644,
- show_go_maxspeed_load, store_go_maxspeed_load);
+static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
+ show_go_hispeed_load, store_go_hispeed_load);
static ssize_t show_min_sample_time(struct kobject *kobj,
struct attribute *attr, char *buf)
show_timer_rate, store_timer_rate);
static struct attribute *interactive_attributes[] = {
- &go_maxspeed_load_attr.attr,
+ &hispeed_freq_attr.attr,
+ &go_hispeed_load_attr.attr,
&min_sample_time_attr.attr,
&timer_rate_attr.attr,
NULL,
smp_wmb();
}
+ if (!hispeed_freq)
+ hispeed_freq = policy->max;
+
/*
* Do not register the idle hook and create sysfs
* entries if we have already done so.
struct cpufreq_interactive_cpuinfo *pcpu;
struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
- go_maxspeed_load = DEFAULT_GO_MAXSPEED_LOAD;
+ go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
timer_rate = DEFAULT_TIMER_RATE;
if (key_state->debounce & DEBOUNCE_UNSTABLE) {
debounce = key_state->debounce = DEBOUNCE_UNKNOWN;
enable_irq(gpio_to_irq(key_entry->gpio));
- pr_info("gpio_keys_scan_keys: key %x-%x, %d "
- "(%d) continue debounce\n",
- ds->info->type, key_entry->code,
- i, key_entry->gpio);
+ if (gpio_flags & GPIOEDF_PRINT_KEY_UNSTABLE)
+ pr_info("gpio_keys_scan_keys: key %x-%x, %d "
+ "(%d) continue debounce\n",
+ ds->info->type, key_entry->code,
+ i, key_entry->gpio);
}
npolarity = !(gpio_flags & GPIOEDF_ACTIVE_HIGH);
pressed = gpio_get_value(key_entry->gpio) ^ npolarity;
driver (SCSI/ATA) which supports enclosures
or a SCSI enclosure device (SES) to use these services.
-config KERNEL_DEBUGGER_CORE
- bool "Kernel Debugger Core"
- default n
- ---help---
- Generic kernel debugging command processor used by low level
- (interrupt context) platform-specific debuggers.
-
config SGI_XP
tristate "Support communication between SGI SSIs"
depends on NET
obj-$(CONFIG_ANDROID_PMEM) += pmem.o
obj-$(CONFIG_SGI_IOC4) += ioc4.o
obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o
-obj-$(CONFIG_KERNEL_DEBUGGER_CORE) += kernel_debugger.o
obj-$(CONFIG_KGDB_TESTS) += kgdbts.o
obj-$(CONFIG_SGI_XP) += sgi-xp/
obj-$(CONFIG_SGI_GRU) += sgi-gru/
+++ /dev/null
-/* drivers/android/kernel_debugger.c
- *
- * Guts of the kernel debugger.
- * Needs something to actually push commands to it.
- *
- * Copyright (C) 2007-2008 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/ctype.h>
-#include <linux/device.h>
-#include <linux/sched.h>
-#include <linux/spinlock.h>
-#include <linux/sysrq.h>
-#include <linux/kernel_debugger.h>
-
-#define dprintf(fmt...) (ctxt->printf(ctxt->cookie, fmt))
-
-static void do_ps(struct kdbg_ctxt *ctxt)
-{
- struct task_struct *g, *p;
- unsigned state;
- static const char stat_nam[] = "RSDTtZX";
-
- dprintf("pid ppid prio task pc\n");
- read_lock(&tasklist_lock);
- do_each_thread(g, p) {
- state = p->state ? __ffs(p->state) + 1 : 0;
- dprintf("%5d %5d %4d ", p->pid, p->parent->pid, p->prio);
- dprintf("%-13.13s %c", p->comm,
- state >= sizeof(stat_nam) ? '?' : stat_nam[state]);
- if (state == TASK_RUNNING)
- dprintf(" running\n");
- else
- dprintf(" %08lx\n", thread_saved_pc(p));
- } while_each_thread(g, p);
- read_unlock(&tasklist_lock);
-}
-
-int log_buf_copy(char *dest, int idx, int len);
-extern int do_syslog(int type, char __user *bug, int count);
-static void do_sysrq(struct kdbg_ctxt *ctxt, char rq)
-{
- char buf[128];
- int ret;
- int idx = 0;
- do_syslog(5 /* clear */, NULL, 0);
- handle_sysrq(rq);
- while (1) {
- ret = log_buf_copy(buf, idx, sizeof(buf) - 1);
- if (ret <= 0)
- break;
- buf[ret] = 0;
- dprintf("%s", buf);
- idx += ret;
- }
-}
-
-static void do_help(struct kdbg_ctxt *ctxt)
-{
- dprintf("Kernel Debugger commands:\n");
- dprintf(" ps Process list\n");
- dprintf(" sysrq sysrq options\n");
- dprintf(" sysrq <param> Execute sysrq with <param>\n");
-}
-
-int kernel_debugger(struct kdbg_ctxt *ctxt, char *cmd)
-{
- if (!strcmp(cmd, "ps"))
- do_ps(ctxt);
- if (!strcmp(cmd, "sysrq"))
- do_sysrq(ctxt, 'h');
- if (!strncmp(cmd, "sysrq ", 6))
- do_sysrq(ctxt, cmd[6]);
- if (!strcmp(cmd, "help"))
- do_help(ctxt);
-
- return 0;
-}
-
sdioh_start(NULL, 0);
ret = dhd_dev_reset(dev, FALSE);
sdioh_start(NULL, 1);
- dhd_dev_init_ioctl(dev);
+ if (!ret)
+ dhd_dev_init_ioctl(dev);
g_wifi_on = 1;
}
dhd_net_if_unlock(dev);
dhd_net_if_lock(dev);
if (g_wifi_on) {
- dhd_dev_reset(dev, 1);
+ ret = dhd_dev_reset(dev, TRUE);
sdioh_stop(NULL);
dhd_customer_gpio_wlan_ctrl(WLAN_RESET_OFF);
g_wifi_on = 0;
}
params = (struct wl_iscan_params *)kzalloc(params_size, GFP_KERNEL);
if (!params) {
- err = -ENOMEM;
- goto done;
+ return -ENOMEM;
}
if (request != NULL)
WL_ERR(("error (%d)\n", err));
}
}
- kfree(params);
done:
+ kfree(params);
return err;
}
params->sync_id = htod16(0x1234);
if (params_size + sizeof("escan") >= WLC_IOCTL_MEDLEN) {
WL_ERR(("ioctl buffer length not sufficient\n"));
+ kfree(params);
err = -ENOMEM;
goto exit;
}
#if defined(BCMLXSDMMC)
sdioh_start(NULL, 1);
#endif
-
- dhd_dev_init_ioctl(dev);
+ if (!ret)
+ dhd_dev_init_ioctl(dev);
g_onoff = G_WLAN_SET_ON;
}
g_iscan->iscan_state = ISCAN_STATE_IDLE;
#endif
- dhd_dev_reset(dev, 1);
+ ret = dhd_dev_reset(dev, 1);
#if defined(WL_IW_USE_ISCAN)
#if !defined(CSCAN)
sdioh_stop(NULL);
#endif
-
- net_os_set_dtim_skip(dev, 0);
-
dhd_customer_gpio_wlan_ctrl(WLAN_RESET_OFF);
wl_iw_send_priv_event(dev, "STOP");
case WLC_E_ROAM:
if (status == WLC_E_STATUS_SUCCESS) {
WL_ASSOC((" WLC_E_ROAM : success \n"));
- return;
+ goto wl_iw_event_end;
}
break;
case WLC_E_SCAN_COMPLETE:
#if defined(WL_IW_USE_ISCAN)
+ if (!g_iscan) {
+ WL_ERROR(("Event WLC_E_SCAN_COMPLETE on g_iscan NULL!"));
+ goto wl_iw_event_end;
+ }
+
if ((g_iscan) && (g_iscan->tsk_ctl.thr_pid >= 0) &&
(g_iscan->iscan_state != ISCAN_STATE_IDLE))
{
* system's wall clock; restore it on resume().
*/
-static time_t oldtime;
-static struct timespec oldts;
+static struct timespec old_rtc, old_system, old_delta;
+
static int rtc_suspend(struct device *dev, pm_message_t mesg)
{
struct rtc_device *rtc = to_rtc_device(dev);
struct rtc_time tm;
-
+ struct timespec delta, delta_delta;
if (strcmp(dev_name(&rtc->dev), CONFIG_RTC_HCTOSYS_DEVICE) != 0)
return 0;
+ /* snapshot the current RTC and system time at suspend*/
rtc_read_time(rtc, &tm);
- ktime_get_ts(&oldts);
- rtc_tm_to_time(&tm, &oldtime);
+ getnstimeofday(&old_system);
+ rtc_tm_to_time(&tm, &old_rtc.tv_sec);
+
+
+ /*
+ * To avoid drift caused by repeated suspend/resumes,
+ * which each can add ~1 second drift error,
+ * try to compensate so the difference in system time
+ * and rtc time stays close to constant.
+ */
+ delta = timespec_sub(old_system, old_rtc);
+ delta_delta = timespec_sub(delta, old_delta);
+ if (delta_delta.tv_sec < -2 || delta_delta.tv_sec >= 2) {
+ /*
+ * if delta_delta is too large, assume time correction
+ * has occured and set old_delta to the current delta.
+ */
+ old_delta = delta;
+ } else {
+ /* Otherwise try to adjust old_system to compensate */
+ old_system = timespec_sub(old_system, delta_delta);
+ }
return 0;
}
{
struct rtc_device *rtc = to_rtc_device(dev);
struct rtc_time tm;
- time_t newtime;
- struct timespec time;
- struct timespec newts;
+ struct timespec new_system, new_rtc;
+ struct timespec sleep_time;
if (strcmp(dev_name(&rtc->dev), CONFIG_RTC_HCTOSYS_DEVICE) != 0)
return 0;
- ktime_get_ts(&newts);
+ /* snapshot the current rtc and system time at resume */
+ getnstimeofday(&new_system);
rtc_read_time(rtc, &tm);
if (rtc_valid_tm(&tm) != 0) {
pr_debug("%s: bogus resume time\n", dev_name(&rtc->dev));
return 0;
}
- rtc_tm_to_time(&tm, &newtime);
- if (newtime <= oldtime) {
- if (newtime < oldtime)
- pr_debug("%s: time travel!\n", dev_name(&rtc->dev));
+ rtc_tm_to_time(&tm, &new_rtc.tv_sec);
+ new_rtc.tv_nsec = 0;
+
+ if (new_rtc.tv_sec < old_rtc.tv_sec) {
+ pr_debug("%s: time travel!\n", dev_name(&rtc->dev));
return 0;
}
- /* calculate the RTC time delta */
- set_normalized_timespec(&time, newtime - oldtime, 0);
-
- /* subtract kernel time between rtc_suspend to rtc_resume */
- time = timespec_sub(time, timespec_sub(newts, oldts));
- timekeeping_inject_sleeptime(&time);
+ /* calculate the RTC time delta (sleep time)*/
+ sleep_time = timespec_sub(new_rtc, old_rtc);
+
+ /*
+ * Since these RTC suspend/resume handlers are not called
+ * at the very end of suspend or the start of resume,
+ * some run-time may pass on either sides of the sleep time
+ * so subtract kernel run-time between rtc_suspend to rtc_resume
+ * to keep things accurate.
+ */
+ sleep_time = timespec_sub(sleep_time,
+ timespec_sub(new_system, old_system));
+
+ if (sleep_time.tv_sec >= 0)
+ timekeeping_inject_sleeptime(&sleep_time);
return 0;
}
sscanf(buff, "%d", &enabled);
if (enabled && !dev->enabled) {
+ cdev->next_string_id = 0;
/* update values in composite driver's copy of device descriptor */
cdev->desc.idVendor = device_desc.idVendor;
cdev->desc.idProduct = device_desc.idProduct;
/* GPIOEDF_USE_IRQ = (1U << 2) | GPIOIDF_USE_DOWN_IRQ, */
GPIOEDF_PRINT_KEYS = 1U << 8,
GPIOEDF_PRINT_KEY_DEBOUNCE = 1U << 9,
+ GPIOEDF_PRINT_KEY_UNSTABLE = 1U << 10,
};
struct gpio_event_direct_entry {
+++ /dev/null
-/*
- * include/linux/kernel_debugger.h
- *
- * Copyright (C) 2008 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _LINUX_KERNEL_DEBUGGER_H_
-#define _LINUX_KERNEL_DEBUGGER_H_
-
-struct kdbg_ctxt {
- int (*printf)(void *cookie, const char *fmt, ...);
- void *cookie;
-};
-
-/* kernel_debugger() is called from IRQ context and should
- * use the kdbg_ctxt.printf to write output (do NOT call
- * printk, do operations not safe from IRQ context, etc).
- *
- * kdbg_ctxt.printf will return -1 if there is not enough
- * buffer space or if you are being aborted. In this case
- * you must return as soon as possible.
- *
- * Return non-zero if more data is available -- if buffer
- * space ran and you had to stop, but could print more,
- * for example.
- *
- * Additional calls where cmd is "more" will be made if
- * the additional data is desired.
- */
-int kernel_debugger(struct kdbg_ctxt *ctxt, char *cmd);
-
-#endif
*/
static void __timekeeping_inject_sleeptime(struct timespec *delta)
{
+ if (!timespec_valid(delta)) {
+ printk(KERN_WARNING "__timekeeping_inject_sleeptime: Invalid "
+ "sleep delta value!\n");
+ return;
+ }
+
xtime = timespec_add(xtime, *delta);
wall_to_monotonic = timespec_sub(wall_to_monotonic, *delta);
total_sleep_time = timespec_add(total_sleep_time, *delta);
hci_conn_check_pending(hdev);
}
+static inline bool is_sco_active(struct hci_dev *hdev)
+{
+ if (hci_conn_hash_lookup_state(hdev, SCO_LINK, BT_CONNECTED) ||
+ (hci_conn_hash_lookup_state(hdev, ESCO_LINK,
+ BT_CONNECTED)))
+ return true;
+ return false;
+}
+
static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_conn_request *ev = (void *) skb->data;
bacpy(&cp.bdaddr, &ev->bdaddr);
- if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
+ if (lmp_rswitch_capable(hdev) && ((mask & HCI_LM_MASTER)
+ || is_sco_active(hdev)))
cp.role = 0x00; /* Become master */
else
cp.role = 0x01; /* Remain slave */