+ struct net_device *dev_for_napi = NULL;
+ union cvmx_pow_wq_int_thrx int_thr;
+ union cvmx_pow_wq_int_pc int_pc;
+
+ for (i = 0; i < TOTAL_NUMBER_OF_PORTS; i++) {
+ if (cvm_oct_device[i]) {
+ dev_for_napi = cvm_oct_device[i];
+ break;
+ }
+ }
+
+ if (NULL == dev_for_napi)
+ panic("No net_devices were allocated.");
+
+ if (max_rx_cpus > 1 && max_rx_cpus < num_online_cpus())
+ atomic_set(&core_state.available_cores, max_rx_cpus);
+ else
+ atomic_set(&core_state.available_cores, num_online_cpus());
+ core_state.baseline_cores = atomic_read(&core_state.available_cores);
+
+ core_state.cpu_state = CPU_MASK_NONE;
+ for_each_possible_cpu(i) {
+ netif_napi_add(dev_for_napi, &cvm_oct_napi[i].napi,
+ cvm_oct_napi_poll, rx_napi_weight);
+ napi_enable(&cvm_oct_napi[i].napi);
+ }
+ /* Register an IRQ hander for to receive POW interrupts */
+ i = request_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group,
+ cvm_oct_do_interrupt, 0, "Ethernet", cvm_oct_device);
+
+ if (i)
+ panic("Could not acquire Ethernet IRQ %d\n",
+ OCTEON_IRQ_WORKQ0 + pow_receive_group);
+
+ disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group);
+
+ int_thr.u64 = 0;
+ int_thr.s.tc_en = 1;
+ int_thr.s.tc_thr = 1;
+ /* Enable POW interrupt when our port has at least one packet */
+ cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), int_thr.u64);
+
+ int_pc.u64 = 0;
+ int_pc.s.pc_thr = 5;
+ cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64);
+
+
+ /* Scheduld NAPI now. This will indirectly enable interrupts. */
+ cvm_oct_enable_one_cpu();