3 * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved.
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
20 #include <mali_kbase.h>
21 #include <mali_kbase_debug.h>
23 #if defined(CONFIG_MALI_MIPE_ENABLED)
24 #include <mali_kbase_tlstream.h>
27 static struct base_jd_udata kbase_event_process(struct kbase_context *kctx, struct kbase_jd_atom *katom)
29 struct base_jd_udata data;
31 lockdep_assert_held(&kctx->jctx.lock);
33 KBASE_DEBUG_ASSERT(kctx != NULL);
34 KBASE_DEBUG_ASSERT(katom != NULL);
35 KBASE_DEBUG_ASSERT(katom->status == KBASE_JD_ATOM_STATE_COMPLETED);
39 KBASE_TIMELINE_ATOMS_IN_FLIGHT(kctx, atomic_sub_return(1, &kctx->timeline.jd_atoms_in_flight));
41 #if defined(CONFIG_MALI_MIPE_ENABLED)
42 kbase_tlstream_tl_nret_atom_ctx(katom, kctx);
43 kbase_tlstream_tl_del_atom(katom);
46 katom->status = KBASE_JD_ATOM_STATE_UNUSED;
48 wake_up(&katom->completed);
53 int kbase_event_pending(struct kbase_context *ctx)
55 KBASE_DEBUG_ASSERT(ctx);
57 return (atomic_read(&ctx->event_count) != 0) ||
58 (atomic_read(&ctx->event_closed) != 0);
61 KBASE_EXPORT_TEST_API(kbase_event_pending);
63 int kbase_event_dequeue(struct kbase_context *ctx, struct base_jd_event_v2 *uevent)
65 struct kbase_jd_atom *atom;
67 KBASE_DEBUG_ASSERT(ctx);
69 mutex_lock(&ctx->event_mutex);
71 if (list_empty(&ctx->event_list)) {
72 if (!atomic_read(&ctx->event_closed)) {
73 mutex_unlock(&ctx->event_mutex);
77 /* generate the BASE_JD_EVENT_DRV_TERMINATED message on the fly */
78 mutex_unlock(&ctx->event_mutex);
79 uevent->event_code = BASE_JD_EVENT_DRV_TERMINATED;
80 memset(&uevent->udata, 0, sizeof(uevent->udata));
81 dev_dbg(ctx->kbdev->dev,
82 "event system closed, returning BASE_JD_EVENT_DRV_TERMINATED(0x%X)\n",
83 BASE_JD_EVENT_DRV_TERMINATED);
87 /* normal event processing */
88 atomic_dec(&ctx->event_count);
89 atom = list_entry(ctx->event_list.next, struct kbase_jd_atom, dep_item[0]);
90 list_del(ctx->event_list.next);
92 mutex_unlock(&ctx->event_mutex);
94 dev_dbg(ctx->kbdev->dev, "event dequeuing %p\n", (void *)atom);
95 uevent->event_code = atom->event_code;
96 uevent->atom_number = (atom - ctx->jctx.atoms);
98 if (atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES)
99 kbase_jd_free_external_resources(atom);
101 mutex_lock(&ctx->jctx.lock);
102 uevent->udata = kbase_event_process(ctx, atom);
103 mutex_unlock(&ctx->jctx.lock);
108 KBASE_EXPORT_TEST_API(kbase_event_dequeue);
111 * kbase_event_process_noreport_worker - Worker for processing atoms that do not
112 * return an event but do have external
114 * @data: Work structure
116 static void kbase_event_process_noreport_worker(struct work_struct *data)
118 struct kbase_jd_atom *katom = container_of(data, struct kbase_jd_atom,
120 struct kbase_context *kctx = katom->kctx;
122 if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES)
123 kbase_jd_free_external_resources(katom);
125 mutex_lock(&kctx->jctx.lock);
126 kbase_event_process(kctx, katom);
127 mutex_unlock(&kctx->jctx.lock);
131 * kbase_event_process_noreport - Process atoms that do not return an event
132 * @kctx: Context pointer
133 * @katom: Atom to be processed
135 * Atoms that do not have external resources will be processed immediately.
136 * Atoms that do have external resources will be processed on a workqueue, in
137 * order to avoid locking issues.
139 static void kbase_event_process_noreport(struct kbase_context *kctx,
140 struct kbase_jd_atom *katom)
142 if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) {
143 INIT_WORK(&katom->work, kbase_event_process_noreport_worker);
144 queue_work(kctx->event_workq, &katom->work);
146 kbase_event_process(kctx, katom);
150 void kbase_event_post(struct kbase_context *ctx, struct kbase_jd_atom *atom)
152 if (atom->core_req & BASE_JD_REQ_EVENT_ONLY_ON_FAILURE) {
153 if (atom->event_code == BASE_JD_EVENT_DONE) {
154 /* Don't report the event */
155 kbase_event_process_noreport(ctx, atom);
160 if (atom->core_req & BASEP_JD_REQ_EVENT_NEVER) {
161 /* Don't report the event */
162 kbase_event_process_noreport(ctx, atom);
166 mutex_lock(&ctx->event_mutex);
167 atomic_inc(&ctx->event_count);
168 list_add_tail(&atom->dep_item[0], &ctx->event_list);
169 mutex_unlock(&ctx->event_mutex);
171 kbase_event_wakeup(ctx);
173 KBASE_EXPORT_TEST_API(kbase_event_post);
175 void kbase_event_close(struct kbase_context *kctx)
177 mutex_lock(&kctx->event_mutex);
178 atomic_set(&kctx->event_closed, true);
179 mutex_unlock(&kctx->event_mutex);
180 kbase_event_wakeup(kctx);
183 int kbase_event_init(struct kbase_context *kctx)
185 KBASE_DEBUG_ASSERT(kctx);
187 INIT_LIST_HEAD(&kctx->event_list);
188 mutex_init(&kctx->event_mutex);
189 atomic_set(&kctx->event_count, 0);
190 atomic_set(&kctx->event_closed, false);
191 kctx->event_workq = alloc_workqueue("kbase_event", WQ_MEM_RECLAIM, 1);
193 if (NULL == kctx->event_workq)
199 KBASE_EXPORT_TEST_API(kbase_event_init);
201 void kbase_event_cleanup(struct kbase_context *kctx)
203 KBASE_DEBUG_ASSERT(kctx);
204 KBASE_DEBUG_ASSERT(kctx->event_workq);
206 flush_workqueue(kctx->event_workq);
207 destroy_workqueue(kctx->event_workq);
209 /* We use kbase_event_dequeue to remove the remaining events as that
210 * deals with all the cleanup needed for the atoms.
212 * Note: use of kctx->event_list without a lock is safe because this must be the last
213 * thread using it (because we're about to terminate the lock)
215 while (!list_empty(&kctx->event_list)) {
216 struct base_jd_event_v2 event;
218 kbase_event_dequeue(kctx, &event);
222 KBASE_EXPORT_TEST_API(kbase_event_cleanup);