]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/contrib/vchiq/interface/vchiq_arm/vchiq_arm.c
Merge clang trunk r338150, and resolve conflicts.
[FreeBSD/FreeBSD.git] / sys / contrib / vchiq / interface / vchiq_arm / vchiq_arm.c
1 /**
2  * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
3  * Copyright (c) 2010-2012 Broadcom. All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions, and the following disclaimer,
10  *    without modification.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. The names of the above-listed copyright holders may not be used
15  *    to endorse or promote products derived from this software without
16  *    specific prior written permission.
17  *
18  * ALTERNATIVELY, this software may be distributed under the terms of the
19  * GNU General Public License ("GPL") version 2, as published by the Free
20  * Software Foundation.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
23  * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
26  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
27  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
29  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
30  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
31  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34
35
36 #include "vchiq_core.h"
37 #include "vchiq_ioctl.h"
38 #include "vchiq_arm.h"
39
40 #define DEVICE_NAME "vchiq"
41
42 /* Override the default prefix, which would be vchiq_arm (from the filename) */
43 #undef MODULE_PARAM_PREFIX
44 #define MODULE_PARAM_PREFIX DEVICE_NAME "."
45
46 #define VCHIQ_MINOR 0
47
48 /* Some per-instance constants */
49 #define MAX_COMPLETIONS 128
50 #define MAX_SERVICES 64
51 #define MAX_ELEMENTS 8
52 #define MSG_QUEUE_SIZE 128
53
54 #define KEEPALIVE_VER 1
55 #define KEEPALIVE_VER_MIN KEEPALIVE_VER
56
57 /* Run time control of log level, based on KERN_XXX level. */
58 int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT;
59 int vchiq_susp_log_level = VCHIQ_LOG_ERROR;
60
61 #define SUSPEND_TIMER_TIMEOUT_MS 100
62 #define SUSPEND_RETRY_TIMER_TIMEOUT_MS 1000
63
64 #define VC_SUSPEND_NUM_OFFSET 3 /* number of values before idle which are -ve */
65 static const char *const suspend_state_names[] = {
66         "VC_SUSPEND_FORCE_CANCELED",
67         "VC_SUSPEND_REJECTED",
68         "VC_SUSPEND_FAILED",
69         "VC_SUSPEND_IDLE",
70         "VC_SUSPEND_REQUESTED",
71         "VC_SUSPEND_IN_PROGRESS",
72         "VC_SUSPEND_SUSPENDED"
73 };
74 #define VC_RESUME_NUM_OFFSET 1 /* number of values before idle which are -ve */
75 static const char *const resume_state_names[] = {
76         "VC_RESUME_FAILED",
77         "VC_RESUME_IDLE",
78         "VC_RESUME_REQUESTED",
79         "VC_RESUME_IN_PROGRESS",
80         "VC_RESUME_RESUMED"
81 };
82 /* The number of times we allow force suspend to timeout before actually
83 ** _forcing_ suspend.  This is to cater for SW which fails to release vchiq
84 ** correctly - we don't want to prevent ARM suspend indefinitely in this case.
85 */
86 #define FORCE_SUSPEND_FAIL_MAX 8
87
88 /* The time in ms allowed for videocore to go idle when force suspend has been
89  * requested */
90 #define FORCE_SUSPEND_TIMEOUT_MS 200
91
92
93 static void suspend_timer_callback(unsigned long context);
94 #ifdef notyet
95 static int vchiq_proc_add_instance(VCHIQ_INSTANCE_T instance);
96 static void vchiq_proc_remove_instance(VCHIQ_INSTANCE_T instance);
97 #endif
98
99
100 typedef struct user_service_struct {
101         VCHIQ_SERVICE_T *service;
102         void *userdata;
103         VCHIQ_INSTANCE_T instance;
104         char is_vchi;
105         char dequeue_pending;
106         char close_pending;
107         int message_available_pos;
108         int msg_insert;
109         int msg_remove;
110         struct semaphore insert_event;
111         struct semaphore remove_event;
112         struct semaphore close_event;
113         VCHIQ_HEADER_T * msg_queue[MSG_QUEUE_SIZE];
114 } USER_SERVICE_T;
115
116 struct bulk_waiter_node {
117         struct bulk_waiter bulk_waiter;
118         int pid;
119         struct list_head list;
120 };
121
122 struct vchiq_instance_struct {
123         VCHIQ_STATE_T *state;
124         VCHIQ_COMPLETION_DATA_T completions[MAX_COMPLETIONS];
125         int completion_insert;
126         int completion_remove;
127         struct semaphore insert_event;
128         struct semaphore remove_event;
129         struct mutex completion_mutex;
130
131         int connected;
132         int closing;
133         int pid;
134         int mark;
135         int use_close_delivered;
136         int trace;
137
138         struct list_head bulk_waiter_list;
139         struct mutex bulk_waiter_list_mutex;
140
141 #ifdef notyet
142         VCHIQ_DEBUGFS_NODE_T proc_entry;
143 #endif
144 };
145
146 typedef struct dump_context_struct {
147         char __user *buf;
148         size_t actual;
149         size_t space;
150         loff_t offset;
151 } DUMP_CONTEXT_T;
152
153 static struct cdev *  vchiq_cdev;
154 VCHIQ_STATE_T g_state;
155 static DEFINE_SPINLOCK(msg_queue_spinlock);
156
157 static const char *const ioctl_names[] = {
158         "CONNECT",
159         "SHUTDOWN",
160         "CREATE_SERVICE",
161         "REMOVE_SERVICE",
162         "QUEUE_MESSAGE",
163         "QUEUE_BULK_TRANSMIT",
164         "QUEUE_BULK_RECEIVE",
165         "AWAIT_COMPLETION",
166         "DEQUEUE_MESSAGE",
167         "GET_CLIENT_ID",
168         "GET_CONFIG",
169         "CLOSE_SERVICE",
170         "USE_SERVICE",
171         "RELEASE_SERVICE",
172         "SET_SERVICE_OPTION",
173         "DUMP_PHYS_MEM",
174         "LIB_VERSION",
175         "CLOSE_DELIVERED"
176 };
177
178 vchiq_static_assert((sizeof(ioctl_names)/sizeof(ioctl_names[0])) ==
179         (VCHIQ_IOC_MAX + 1));
180
181 static eventhandler_tag vchiq_ehtag = NULL;
182 static d_open_t         vchiq_open;
183 static d_close_t        vchiq_close;
184 static d_ioctl_t        vchiq_ioctl;
185
186 static struct cdevsw vchiq_cdevsw = {
187         .d_version      = D_VERSION,
188         .d_ioctl        = vchiq_ioctl,
189         .d_open         = vchiq_open,
190         .d_close        = vchiq_close,
191         .d_name         = DEVICE_NAME,
192 };
193
194 #if 0
195 static void
196 dump_phys_mem(void *virt_addr, uint32_t num_bytes);
197 #endif
198
199 /****************************************************************************
200 *
201 *   add_completion
202 *
203 ***************************************************************************/
204
205 static VCHIQ_STATUS_T
206 add_completion(VCHIQ_INSTANCE_T instance, VCHIQ_REASON_T reason,
207         VCHIQ_HEADER_T *header, USER_SERVICE_T *user_service,
208         void *bulk_userdata)
209 {
210         VCHIQ_COMPLETION_DATA_T *completion;
211         int insert;
212         DEBUG_INITIALISE(g_state.local)
213
214         insert = instance->completion_insert;
215         while ((insert - instance->completion_remove) >= MAX_COMPLETIONS) {
216                 /* Out of space - wait for the client */
217                 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
218                 vchiq_log_trace(vchiq_arm_log_level,
219                         "add_completion - completion queue full");
220                 DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
221
222                 if (down_interruptible(&instance->remove_event) != 0) {
223                         vchiq_log_info(vchiq_arm_log_level,
224                                 "service_callback interrupted");
225                         return VCHIQ_RETRY;
226                 }
227
228                 if (instance->closing) {
229                         vchiq_log_info(vchiq_arm_log_level,
230                                 "service_callback closing");
231                         return VCHIQ_SUCCESS;
232                 }
233                 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
234         }
235
236         completion = &instance->completions[insert & (MAX_COMPLETIONS - 1)];
237
238         completion->header = header;
239         completion->reason = reason;
240         /* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
241         completion->service_userdata = user_service->service;
242         completion->bulk_userdata = bulk_userdata;
243
244         if (reason == VCHIQ_SERVICE_CLOSED) {
245                 /* Take an extra reference, to be held until
246                    this CLOSED notification is delivered. */
247                 lock_service(user_service->service);
248                 if (instance->use_close_delivered)
249                         user_service->close_pending = 1;
250         }
251
252         /* A write barrier is needed here to ensure that the entire completion
253                 record is written out before the insert point. */
254         wmb();
255
256         if (reason == VCHIQ_MESSAGE_AVAILABLE)
257                 user_service->message_available_pos = insert;
258
259         instance->completion_insert = ++insert;
260
261         up(&instance->insert_event);
262
263         return VCHIQ_SUCCESS;
264 }
265
266 /****************************************************************************
267 *
268 *   service_callback
269 *
270 ***************************************************************************/
271
272 static VCHIQ_STATUS_T
273 service_callback(VCHIQ_REASON_T reason, VCHIQ_HEADER_T *header,
274         VCHIQ_SERVICE_HANDLE_T handle, void *bulk_userdata)
275 {
276         /* How do we ensure the callback goes to the right client?
277         ** The service_user data points to a USER_SERVICE_T record containing
278         ** the original callback and the user state structure, which contains a
279         ** circular buffer for completion records.
280         */
281         USER_SERVICE_T *user_service;
282         VCHIQ_SERVICE_T *service;
283         VCHIQ_INSTANCE_T instance;
284         int skip_completion = 0;
285         DEBUG_INITIALISE(g_state.local)
286
287         DEBUG_TRACE(SERVICE_CALLBACK_LINE);
288
289         service = handle_to_service(handle);
290         BUG_ON(!service);
291         user_service = (USER_SERVICE_T *)service->base.userdata;
292         instance = user_service->instance;
293
294         if (!instance || instance->closing)
295                 return VCHIQ_SUCCESS;
296
297         vchiq_log_trace(vchiq_arm_log_level,
298                 "service_callback - service %lx(%d,%p), reason %d, header %lx, "
299                 "instance %lx, bulk_userdata %lx",
300                 (unsigned long)user_service,
301                 service->localport, user_service->userdata,
302                 reason, (unsigned long)header,
303                 (unsigned long)instance, (unsigned long)bulk_userdata);
304
305         if (header && user_service->is_vchi) {
306                 spin_lock(&msg_queue_spinlock);
307                 while (user_service->msg_insert ==
308                         (user_service->msg_remove + MSG_QUEUE_SIZE)) {
309                         spin_unlock(&msg_queue_spinlock);
310                         DEBUG_TRACE(SERVICE_CALLBACK_LINE);
311                         DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
312                         vchiq_log_trace(vchiq_arm_log_level,
313                                 "service_callback - msg queue full");
314                         /* If there is no MESSAGE_AVAILABLE in the completion
315                         ** queue, add one
316                         */
317                         if ((user_service->message_available_pos -
318                                 instance->completion_remove) < 0) {
319                                 VCHIQ_STATUS_T status;
320                                 vchiq_log_info(vchiq_arm_log_level,
321                                         "Inserting extra MESSAGE_AVAILABLE");
322                                 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
323                                 status = add_completion(instance, reason,
324                                         NULL, user_service, bulk_userdata);
325                                 if (status != VCHIQ_SUCCESS) {
326                                         DEBUG_TRACE(SERVICE_CALLBACK_LINE);
327                                         return status;
328                                 }
329                         }
330
331                         DEBUG_TRACE(SERVICE_CALLBACK_LINE);
332                         if (down_interruptible(&user_service->remove_event)
333                                 != 0) {
334                                 vchiq_log_info(vchiq_arm_log_level,
335                                         "service_callback interrupted");
336                                 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
337                                 return VCHIQ_RETRY;
338                         } else if (instance->closing) {
339                                 vchiq_log_info(vchiq_arm_log_level,
340                                         "service_callback closing");
341                                 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
342                                 return VCHIQ_ERROR;
343                         }
344                         DEBUG_TRACE(SERVICE_CALLBACK_LINE);
345                         spin_lock(&msg_queue_spinlock);
346                 }
347
348                 user_service->msg_queue[user_service->msg_insert &
349                         (MSG_QUEUE_SIZE - 1)] = header;
350                 user_service->msg_insert++;
351
352                 /* If there is a thread waiting in DEQUEUE_MESSAGE, or if
353                 ** there is a MESSAGE_AVAILABLE in the completion queue then
354                 ** bypass the completion queue.
355                 */
356                 if (((user_service->message_available_pos -
357                         instance->completion_remove) >= 0) ||
358                         user_service->dequeue_pending) {
359                         user_service->dequeue_pending = 0;
360                         skip_completion = 1;
361                 }
362
363                 spin_unlock(&msg_queue_spinlock);
364
365                 up(&user_service->insert_event);
366
367                 header = NULL;
368         }
369
370         if (skip_completion) {
371                 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
372                 return VCHIQ_SUCCESS;
373         }
374
375         DEBUG_TRACE(SERVICE_CALLBACK_LINE);
376
377         return add_completion(instance, reason, header, user_service,
378                 bulk_userdata);
379 }
380
381 /****************************************************************************
382 *
383 *   user_service_free
384 *
385 ***************************************************************************/
386 static void
387 user_service_free(void *userdata)
388 {
389         USER_SERVICE_T *user_service = userdata;
390         
391         _sema_destroy(&user_service->insert_event);
392         _sema_destroy(&user_service->remove_event);
393
394         kfree(user_service);
395 }
396
397 /****************************************************************************
398 *
399 *   close_delivered
400 *
401 ***************************************************************************/
402 static void close_delivered(USER_SERVICE_T *user_service)
403 {
404         vchiq_log_info(vchiq_arm_log_level,
405                 "close_delivered(handle=%x)",
406                 user_service->service->handle);
407
408         if (user_service->close_pending) {
409                 /* Allow the underlying service to be culled */
410                 unlock_service(user_service->service);
411
412                 /* Wake the user-thread blocked in close_ or remove_service */
413                 up(&user_service->close_event);
414  
415                 user_service->close_pending = 0;
416         }
417 }
418
419 /****************************************************************************
420 *
421 *   vchiq_ioctl
422 *
423 ***************************************************************************/
424
425 static int
426 vchiq_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int fflag,
427    struct thread *td)
428 {
429         VCHIQ_INSTANCE_T instance;
430         VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
431         VCHIQ_SERVICE_T *service = NULL;
432         int ret = 0;
433         int i, rc;
434         DEBUG_INITIALISE(g_state.local)
435
436         if ((ret = devfs_get_cdevpriv((void**)&instance))) {
437                 printf("vchiq_ioctl: devfs_get_cdevpriv failed: error %d\n", ret);
438                 return (ret);
439         }
440
441 /* XXXBSD: HACK! */
442 #define _IOC_NR(x) ((x) & 0xff)
443 #define _IOC_TYPE(x)    IOCGROUP(x)
444
445         vchiq_log_trace(vchiq_arm_log_level,
446                  "vchiq_ioctl - instance %x, cmd %s, arg %p",
447                 (unsigned int)instance,
448                 ((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) &&
449                 (_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
450                 ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
451
452         switch (cmd) {
453         case VCHIQ_IOC_SHUTDOWN:
454                 if (!instance->connected)
455                         break;
456
457                 /* Remove all services */
458                 i = 0;
459                 while ((service = next_service_by_instance(instance->state,
460                         instance, &i)) != NULL) {
461                         status = vchiq_remove_service(service->handle);
462                         unlock_service(service);
463                         if (status != VCHIQ_SUCCESS)
464                                 break;
465                 }
466                 service = NULL;
467
468                 if (status == VCHIQ_SUCCESS) {
469                         /* Wake the completion thread and ask it to exit */
470                         instance->closing = 1;
471                         up(&instance->insert_event);
472                 }
473
474                 break;
475
476         case VCHIQ_IOC_CONNECT:
477                 if (instance->connected) {
478                         ret = -EINVAL;
479                         break;
480                 }
481                 rc = lmutex_lock_interruptible(&instance->state->mutex);
482                 if (rc != 0) {
483                         vchiq_log_error(vchiq_arm_log_level,
484                                 "vchiq: connect: could not lock mutex for "
485                                 "state %d: %d",
486                                 instance->state->id, rc);
487                         ret = -EINTR;
488                         break;
489                 }
490                 status = vchiq_connect_internal(instance->state, instance);
491                 lmutex_unlock(&instance->state->mutex);
492
493                 if (status == VCHIQ_SUCCESS)
494                         instance->connected = 1;
495                 else
496                         vchiq_log_error(vchiq_arm_log_level,
497                                 "vchiq: could not connect: %d", status);
498                 break;
499
500         case VCHIQ_IOC_CREATE_SERVICE: {
501                 VCHIQ_CREATE_SERVICE_T args;
502                 USER_SERVICE_T *user_service = NULL;
503                 void *userdata;
504                 int srvstate;
505
506                 memcpy(&args, (const void*)arg, sizeof(args));
507
508                 user_service = kmalloc(sizeof(USER_SERVICE_T), GFP_KERNEL);
509                 if (!user_service) {
510                         ret = -ENOMEM;
511                         break;
512                 }
513
514                 if (args.is_open) {
515                         if (!instance->connected) {
516                                 ret = -ENOTCONN;
517                                 kfree(user_service);
518                                 break;
519                         }
520                         srvstate = VCHIQ_SRVSTATE_OPENING;
521                 } else {
522                         srvstate =
523                                  instance->connected ?
524                                  VCHIQ_SRVSTATE_LISTENING :
525                                  VCHIQ_SRVSTATE_HIDDEN;
526                 }
527
528                 userdata = args.params.userdata;
529                 args.params.callback = service_callback;
530                 args.params.userdata = user_service;
531                 service = vchiq_add_service_internal(
532                                 instance->state,
533                                 &args.params, srvstate,
534                                 instance, user_service_free);
535
536                 if (service != NULL) {
537                         user_service->service = service;
538                         user_service->userdata = userdata;
539                         user_service->instance = instance;
540                         user_service->is_vchi = (args.is_vchi != 0);
541                         user_service->dequeue_pending = 0;
542                         user_service->close_pending = 0;
543                         user_service->message_available_pos =
544                                 instance->completion_remove - 1;
545                         user_service->msg_insert = 0;
546                         user_service->msg_remove = 0;
547                         _sema_init(&user_service->insert_event, 0);
548                         _sema_init(&user_service->remove_event, 0);
549                         _sema_init(&user_service->close_event, 0);
550
551                         if (args.is_open) {
552                                 status = vchiq_open_service_internal
553                                         (service, instance->pid);
554                                 if (status != VCHIQ_SUCCESS) {
555                                         vchiq_remove_service(service->handle);
556                                         service = NULL;
557                                         ret = (status == VCHIQ_RETRY) ?
558                                                 -EINTR : -EIO;
559                                         break;
560                                 }
561                         }
562
563 #ifdef VCHIQ_IOCTL_DEBUG
564                         printf("%s: [CREATE SERVICE] handle = %08x\n", __func__, service->handle);
565 #endif
566                         memcpy((void *)
567                                 &(((VCHIQ_CREATE_SERVICE_T*)
568                                         arg)->handle),
569                                 (const void *)&service->handle,
570                                 sizeof(service->handle));
571
572                         service = NULL;
573                 } else {
574                         ret = -EEXIST;
575                         kfree(user_service);
576                 }
577         } break;
578
579         case VCHIQ_IOC_CLOSE_SERVICE: {
580                 VCHIQ_SERVICE_HANDLE_T handle;
581
582                 memcpy(&handle, (const void*)arg, sizeof(handle));
583
584 #ifdef VCHIQ_IOCTL_DEBUG
585                 printf("%s: [CLOSE SERVICE] handle = %08x\n", __func__, handle);
586 #endif
587
588                 service = find_service_for_instance(instance, handle);
589                 if (service != NULL) {
590                         USER_SERVICE_T *user_service =
591                                 (USER_SERVICE_T *)service->base.userdata;
592                         /* close_pending is false on first entry, and when the
593                            wait in vchiq_close_service has been interrupted. */
594                         if (!user_service->close_pending) {
595                                 status = vchiq_close_service(service->handle);
596                                 if (status != VCHIQ_SUCCESS)
597                                         break;
598                         }
599
600                         /* close_pending is true once the underlying service
601                            has been closed until the client library calls the
602                            CLOSE_DELIVERED ioctl, signalling close_event. */
603                         if (user_service->close_pending &&
604                                 down_interruptible(&user_service->close_event))
605                                 status = VCHIQ_RETRY;
606                 }
607                 else
608                         ret = -EINVAL;
609         } break;
610
611         case VCHIQ_IOC_REMOVE_SERVICE: {
612                 VCHIQ_SERVICE_HANDLE_T handle;
613
614                 memcpy(&handle, (const void*)arg, sizeof(handle));
615
616 #ifdef VCHIQ_IOCTL_DEBUG
617                 printf("%s: [REMOVE SERVICE] handle = %08x\n", __func__, handle);
618 #endif
619
620                 service = find_service_for_instance(instance, handle);
621                 if (service != NULL) {
622                         USER_SERVICE_T *user_service =
623                                 (USER_SERVICE_T *)service->base.userdata;
624                         /* close_pending is false on first entry, and when the
625                            wait in vchiq_close_service has been interrupted. */
626                         if (!user_service->close_pending) {
627                                 status = vchiq_remove_service(service->handle);
628                                 if (status != VCHIQ_SUCCESS)
629                                         break;
630                         }
631
632                         /* close_pending is true once the underlying service
633                            has been closed until the client library calls the
634                            CLOSE_DELIVERED ioctl, signalling close_event. */
635                         if (user_service->close_pending &&
636                                 down_interruptible(&user_service->close_event))
637                                 status = VCHIQ_RETRY;
638                 }
639                 else
640                         ret = -EINVAL;
641         } break;
642
643         case VCHIQ_IOC_USE_SERVICE:
644         case VCHIQ_IOC_RELEASE_SERVICE: {
645                 VCHIQ_SERVICE_HANDLE_T handle;
646
647                 memcpy(&handle, (const void*)arg, sizeof(handle));
648
649 #ifdef VCHIQ_IOCTL_DEBUG
650                 printf("%s: [%s SERVICE] handle = %08x\n", __func__,
651                     cmd == VCHIQ_IOC_USE_SERVICE ? "USE" : "RELEASE", handle);
652 #endif
653
654                 service = find_service_for_instance(instance, handle);
655                 if (service != NULL) {
656                         status = (cmd == VCHIQ_IOC_USE_SERVICE) ?
657                                 vchiq_use_service_internal(service) :
658                                 vchiq_release_service_internal(service);
659                         if (status != VCHIQ_SUCCESS) {
660                                 vchiq_log_error(vchiq_susp_log_level,
661                                         "%s: cmd %s returned error %d for "
662                                         "service %c%c%c%c:%8x",
663                                         __func__,
664                                         (cmd == VCHIQ_IOC_USE_SERVICE) ?
665                                                 "VCHIQ_IOC_USE_SERVICE" :
666                                                 "VCHIQ_IOC_RELEASE_SERVICE",
667                                         status,
668                                         VCHIQ_FOURCC_AS_4CHARS(
669                                                 service->base.fourcc),
670                                         service->client_id);
671                                 ret = -EINVAL;
672                         }
673                 } else
674                         ret = -EINVAL;
675         } break;
676
677         case VCHIQ_IOC_QUEUE_MESSAGE: {
678                 VCHIQ_QUEUE_MESSAGE_T args;
679                 memcpy(&args, (const void*)arg, sizeof(args));
680
681 #ifdef VCHIQ_IOCTL_DEBUG
682                 printf("%s: [QUEUE MESSAGE] handle = %08x\n", __func__, args.handle);
683 #endif
684
685                 service = find_service_for_instance(instance, args.handle);
686
687                 if ((service != NULL) && (args.count <= MAX_ELEMENTS)) {
688                         /* Copy elements into kernel space */
689                         VCHIQ_ELEMENT_T elements[MAX_ELEMENTS];
690                         if (copy_from_user(elements, args.elements,
691                                 args.count * sizeof(VCHIQ_ELEMENT_T)) == 0)
692                                 status = vchiq_queue_message
693                                         (args.handle,
694                                         elements, args.count);
695                         else
696                                 ret = -EFAULT;
697                 } else {
698                         ret = -EINVAL;
699                 }
700         } break;
701
702         case VCHIQ_IOC_QUEUE_BULK_TRANSMIT:
703         case VCHIQ_IOC_QUEUE_BULK_RECEIVE: {
704                 VCHIQ_QUEUE_BULK_TRANSFER_T args;
705                 struct bulk_waiter_node *waiter = NULL;
706                 VCHIQ_BULK_DIR_T dir =
707                         (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ?
708                         VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
709
710                 memcpy(&args, (const void*)arg, sizeof(args));
711
712                 service = find_service_for_instance(instance, args.handle);
713                 if (!service) {
714                         ret = -EINVAL;
715                         break;
716                 }
717
718                 if (args.mode == VCHIQ_BULK_MODE_BLOCKING) {
719                         waiter = kzalloc(sizeof(struct bulk_waiter_node),
720                                 GFP_KERNEL);
721                         if (!waiter) {
722                                 ret = -ENOMEM;
723                                 break;
724                         }
725                         args.userdata = &waiter->bulk_waiter;
726                 } else if (args.mode == VCHIQ_BULK_MODE_WAITING) {
727                         struct list_head *pos;
728                         lmutex_lock(&instance->bulk_waiter_list_mutex);
729                         list_for_each(pos, &instance->bulk_waiter_list) {
730                                 if (list_entry(pos, struct bulk_waiter_node,
731                                         list)->pid == current->p_pid) {
732                                         waiter = list_entry(pos,
733                                                 struct bulk_waiter_node,
734                                                 list);
735                                         list_del(pos);
736                                         break;
737                                 }
738
739                         }
740                         lmutex_unlock(&instance->bulk_waiter_list_mutex);
741                         if (!waiter) {
742                                 vchiq_log_error(vchiq_arm_log_level,
743                                         "no bulk_waiter found for pid %d",
744                                         current->p_pid);
745                                 ret = -ESRCH;
746                                 break;
747                         }
748                         vchiq_log_info(vchiq_arm_log_level,
749                                 "found bulk_waiter %x for pid %d",
750                                 (unsigned int)waiter, current->p_pid);
751                         args.userdata = &waiter->bulk_waiter;
752                 }
753                 status = vchiq_bulk_transfer
754                         (args.handle,
755                          VCHI_MEM_HANDLE_INVALID,
756                          args.data, args.size,
757                          args.userdata, args.mode,
758                          dir);
759                 if (!waiter)
760                         break;
761                 if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
762                         !waiter->bulk_waiter.bulk) {
763                         if (waiter->bulk_waiter.bulk) {
764                                 /* Cancel the signal when the transfer
765                                 ** completes. */
766                                 spin_lock(&bulk_waiter_spinlock);
767                                 waiter->bulk_waiter.bulk->userdata = NULL;
768                                 spin_unlock(&bulk_waiter_spinlock);
769                         }
770                         _sema_destroy(&waiter->bulk_waiter.event);
771                         kfree(waiter);
772                 } else {
773                         const VCHIQ_BULK_MODE_T mode_waiting =
774                                 VCHIQ_BULK_MODE_WAITING;
775                         waiter->pid = current->p_pid;
776                         lmutex_lock(&instance->bulk_waiter_list_mutex);
777                         list_add(&waiter->list, &instance->bulk_waiter_list);
778                         lmutex_unlock(&instance->bulk_waiter_list_mutex);
779                         vchiq_log_info(vchiq_arm_log_level,
780                                 "saved bulk_waiter %x for pid %d",
781                                 (unsigned int)waiter, current->p_pid);
782
783                         memcpy((void *)
784                                 &(((VCHIQ_QUEUE_BULK_TRANSFER_T *)
785                                         arg)->mode),
786                                 (const void *)&mode_waiting,
787                                 sizeof(mode_waiting));
788                 }
789         } break;
790
791         case VCHIQ_IOC_AWAIT_COMPLETION: {
792                 VCHIQ_AWAIT_COMPLETION_T args;
793                 int count = 0;
794
795                 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
796                 if (!instance->connected) {
797                         ret = -ENOTCONN;
798                         break;
799                 }
800
801                 memcpy(&args, (const void*)arg, sizeof(args));
802
803                 lmutex_lock(&instance->completion_mutex);
804
805                 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
806                 while ((instance->completion_remove ==
807                         instance->completion_insert)
808                         && !instance->closing) {
809
810                         DEBUG_TRACE(AWAIT_COMPLETION_LINE);
811                         lmutex_unlock(&instance->completion_mutex);
812                         rc = down_interruptible(&instance->insert_event);
813                         lmutex_lock(&instance->completion_mutex);
814                         if (rc != 0) {
815                                 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
816                                 vchiq_log_info(vchiq_arm_log_level,
817                                         "AWAIT_COMPLETION interrupted");
818                                 ret = -EINTR;
819                                 break;
820                         }
821                 }
822                 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
823
824                 if (ret == 0) {
825                         int msgbufcount = args.msgbufcount;
826                         int remove;
827
828                         remove = instance->completion_remove;
829
830                         for (count = 0; count < args.count; count++) {
831                                 VCHIQ_COMPLETION_DATA_T *completion;
832                                 VCHIQ_SERVICE_T *service1;
833                                 USER_SERVICE_T *user_service;
834                                 VCHIQ_HEADER_T *header;
835
836                                 if (remove == instance->completion_insert)
837                                         break;
838
839                                 completion = &instance->completions[
840                                         remove & (MAX_COMPLETIONS - 1)];
841
842
843                                 /* A read memory barrier is needed to prevent
844                                 ** the prefetch of a stale completion record
845                                 */
846                                 rmb();
847
848                                 service1 = completion->service_userdata;
849                                 user_service = service1->base.userdata;
850                                 completion->service_userdata =
851                                         user_service->userdata;
852
853                                 header = completion->header;
854                                 if (header) {
855                                         void __user *msgbuf;
856                                         int msglen;
857
858                                         msglen = header->size +
859                                                 sizeof(VCHIQ_HEADER_T);
860                                         /* This must be a VCHIQ-style service */
861                                         if (args.msgbufsize < msglen) {
862                                                 vchiq_log_error(
863                                                         vchiq_arm_log_level,
864                                                         "header %x: msgbufsize"
865                                                         " %x < msglen %x",
866                                                         (unsigned int)header,
867                                                         args.msgbufsize,
868                                                         msglen);
869                                                 WARN(1, "invalid message "
870                                                         "size\n");
871                                                 if (count == 0)
872                                                         ret = -EMSGSIZE;
873                                                 break;
874                                         }
875                                         if (msgbufcount <= 0)
876                                                 /* Stall here for lack of a
877                                                 ** buffer for the message. */
878                                                 break;
879                                         /* Get the pointer from user space */
880                                         msgbufcount--;
881                                         if (copy_from_user(&msgbuf,
882                                                 (const void __user *)
883                                                 &args.msgbufs[msgbufcount],
884                                                 sizeof(msgbuf)) != 0) {
885                                                 if (count == 0)
886                                                         ret = -EFAULT;
887                                                 break;
888                                         }
889
890                                         /* Copy the message to user space */
891                                         if (copy_to_user(msgbuf, header,
892                                                 msglen) != 0) {
893                                                 if (count == 0)
894                                                         ret = -EFAULT;
895                                                 break;
896                                         }
897
898                                         /* Now it has been copied, the message
899                                         ** can be released. */
900                                         vchiq_release_message(service1->handle,
901                                                 header);
902
903                                         /* The completion must point to the
904                                         ** msgbuf. */
905                                         completion->header = msgbuf;
906                                 }
907
908                                 if ((completion->reason ==
909                                         VCHIQ_SERVICE_CLOSED) &&
910                                         !instance->use_close_delivered)
911                                         unlock_service(service1);
912
913                                 if (copy_to_user((void __user *)(
914                                         (size_t)args.buf +
915                                         count * sizeof(VCHIQ_COMPLETION_DATA_T)),
916                                         completion,
917                                         sizeof(VCHIQ_COMPLETION_DATA_T)) != 0) {
918                                                 if (ret == 0)
919                                                         ret = -EFAULT;
920                                         break;
921                                 }
922
923                                 /* Ensure that the above copy has completed
924                                 ** before advancing the remove pointer. */
925                                 mb();
926
927                                 instance->completion_remove = ++remove;
928                         }
929
930                         if (msgbufcount != args.msgbufcount) {
931                                 memcpy((void __user *)
932                                         &((VCHIQ_AWAIT_COMPLETION_T *)arg)->
933                                                 msgbufcount,
934                                         &msgbufcount,
935                                         sizeof(msgbufcount));
936                         }
937
938                          if (count != args.count)
939                          {
940                                 memcpy((void __user *)
941                                         &((VCHIQ_AWAIT_COMPLETION_T *)arg)->count,
942                                         &count, sizeof(count));
943                         }
944                 }
945
946                 if (count != 0)
947                         up(&instance->remove_event);
948
949                 if ((ret == 0) && instance->closing)
950                         ret = -ENOTCONN;
951                 /* 
952                  * XXXBSD: ioctl return codes are not negative as in linux, so
953                  * we can not indicate success with positive number of passed 
954                  * messages
955                  */
956                 if (ret > 0)
957                         ret = 0;
958
959                 lmutex_unlock(&instance->completion_mutex);
960                 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
961         } break;
962
963         case VCHIQ_IOC_DEQUEUE_MESSAGE: {
964                 VCHIQ_DEQUEUE_MESSAGE_T args;
965                 USER_SERVICE_T *user_service;
966                 VCHIQ_HEADER_T *header;
967
968                 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
969                 memcpy(&args, (const void*)arg, sizeof(args));
970                 service = find_service_for_instance(instance, args.handle);
971                 if (!service) {
972                         ret = -EINVAL;
973                         break;
974                 }
975                 user_service = (USER_SERVICE_T *)service->base.userdata;
976                 if (user_service->is_vchi == 0) {
977                         ret = -EINVAL;
978                         break;
979                 }
980
981                 spin_lock(&msg_queue_spinlock);
982                 if (user_service->msg_remove == user_service->msg_insert) {
983                         if (!args.blocking) {
984                                 spin_unlock(&msg_queue_spinlock);
985                                 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
986                                 ret = -EWOULDBLOCK;
987                                 break;
988                         }
989                         user_service->dequeue_pending = 1;
990                         do {
991                                 spin_unlock(&msg_queue_spinlock);
992                                 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
993                                 if (down_interruptible(
994                                         &user_service->insert_event) != 0) {
995                                         vchiq_log_info(vchiq_arm_log_level,
996                                                 "DEQUEUE_MESSAGE interrupted");
997                                         ret = -EINTR;
998                                         break;
999                                 }
1000                                 spin_lock(&msg_queue_spinlock);
1001                         } while (user_service->msg_remove ==
1002                                 user_service->msg_insert);
1003
1004                         if (ret)
1005                                 break;
1006                 }
1007
1008                 BUG_ON((int)(user_service->msg_insert -
1009                         user_service->msg_remove) < 0);
1010
1011                 header = user_service->msg_queue[user_service->msg_remove &
1012                         (MSG_QUEUE_SIZE - 1)];
1013                 user_service->msg_remove++;
1014                 spin_unlock(&msg_queue_spinlock);
1015
1016                 up(&user_service->remove_event);
1017                 if (header == NULL)
1018                         ret = -ENOTCONN;
1019                 else if (header->size <= args.bufsize) {
1020                         /* Copy to user space if msgbuf is not NULL */
1021                         if ((args.buf == NULL) ||
1022                                 (copy_to_user((void __user *)args.buf,
1023                                 header->data,
1024                                 header->size) == 0)) {
1025                                 args.bufsize = header->size;
1026                                 memcpy((void *)arg, &args,
1027                                     sizeof(args));
1028                                 vchiq_release_message(
1029                                         service->handle,
1030                                         header);
1031                         } else
1032                                 ret = -EFAULT;
1033                 } else {
1034                         vchiq_log_error(vchiq_arm_log_level,
1035                                 "header %x: bufsize %x < size %x",
1036                                 (unsigned int)header, args.bufsize,
1037                                 header->size);
1038                         WARN(1, "invalid size\n");
1039                         ret = -EMSGSIZE;
1040                 }
1041                 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
1042         } break;
1043
1044         case VCHIQ_IOC_GET_CLIENT_ID: {
1045                 VCHIQ_SERVICE_HANDLE_T handle;
1046
1047                 memcpy(&handle, (const void*)arg, sizeof(handle));
1048
1049                 ret = vchiq_get_client_id(handle);
1050         } break;
1051
1052         case VCHIQ_IOC_GET_CONFIG: {
1053                 VCHIQ_GET_CONFIG_T args;
1054                 VCHIQ_CONFIG_T config;
1055
1056                 memcpy(&args, (const void*)arg, sizeof(args));
1057                 if (args.config_size > sizeof(config)) {
1058                         ret = -EINVAL;
1059                         break;
1060                 }
1061                 status = vchiq_get_config(instance, args.config_size, &config);
1062                 if (status == VCHIQ_SUCCESS) {
1063                         if (copy_to_user((void __user *)args.pconfig,
1064                                     &config, args.config_size) != 0) {
1065                                 ret = -EFAULT;
1066                                 break;
1067                         }
1068                 }
1069         } break;
1070
1071         case VCHIQ_IOC_SET_SERVICE_OPTION: {
1072                 VCHIQ_SET_SERVICE_OPTION_T args;
1073
1074                 memcpy(&args, (const void*)arg, sizeof(args));
1075
1076                 service = find_service_for_instance(instance, args.handle);
1077                 if (!service) {
1078                         ret = -EINVAL;
1079                         break;
1080                 }
1081
1082                 status = vchiq_set_service_option(
1083                                 args.handle, args.option, args.value);
1084         } break;
1085
1086         case VCHIQ_IOC_DUMP_PHYS_MEM: {
1087                 VCHIQ_DUMP_MEM_T  args;
1088
1089                 memcpy(&args, (const void*)arg, sizeof(args));
1090                 printf("IMPLEMENT ME: %s:%d\n", __FILE__, __LINE__);
1091 #if 0
1092                 dump_phys_mem(args.virt_addr, args.num_bytes);
1093 #endif
1094         } break;
1095
1096         case VCHIQ_IOC_LIB_VERSION: {
1097                 unsigned int lib_version = (unsigned int)arg;
1098
1099                 if (lib_version < VCHIQ_VERSION_MIN)
1100                         ret = -EINVAL;
1101                 else if (lib_version >= VCHIQ_VERSION_CLOSE_DELIVERED)
1102                         instance->use_close_delivered = 1;
1103         } break;
1104
1105         case VCHIQ_IOC_CLOSE_DELIVERED: {
1106                 VCHIQ_SERVICE_HANDLE_T handle;
1107                 memcpy(&handle, (const void*)arg, sizeof(handle));
1108
1109                 service = find_closed_service_for_instance(instance, handle);
1110                 if (service != NULL) {
1111                         USER_SERVICE_T *user_service =
1112                                 (USER_SERVICE_T *)service->base.userdata;
1113                         close_delivered(user_service);
1114                 }
1115                 else
1116                         ret = -EINVAL;
1117         } break;
1118
1119         default:
1120                 ret = -ENOTTY;
1121                 break;
1122         }
1123
1124         if (service)
1125                 unlock_service(service);
1126
1127         if (ret == 0) {
1128                 if (status == VCHIQ_ERROR)
1129                         ret = -EIO;
1130                 else if (status == VCHIQ_RETRY)
1131                         ret = -EINTR;
1132         }
1133
1134         if ((status == VCHIQ_SUCCESS) && (ret < 0) && (ret != -EINTR) &&
1135                 (ret != -EWOULDBLOCK))
1136                 vchiq_log_info(vchiq_arm_log_level,
1137                         "  ioctl instance %lx, cmd %s -> status %d, %d",
1138                         (unsigned long)instance,
1139                         (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
1140                                 ioctl_names[_IOC_NR(cmd)] :
1141                                 "<invalid>",
1142                         status, ret);
1143         else
1144                 vchiq_log_trace(vchiq_arm_log_level,
1145                         "  ioctl instance %lx, cmd %s -> status %d, %d",
1146                         (unsigned long)instance,
1147                         (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
1148                                 ioctl_names[_IOC_NR(cmd)] :
1149                                 "<invalid>",
1150                         status, ret);
1151
1152         /* XXXBSD: report BSD-style error to userland */
1153         if (ret < 0)
1154                 ret = -ret;
1155
1156         return ret;
1157 }
1158
1159 static void
1160 instance_dtr(void *data)
1161 {
1162
1163         kfree(data);
1164 }
1165
1166 /****************************************************************************
1167 *
1168 *   vchiq_open
1169 *
1170 ***************************************************************************/
1171
1172 static int
1173 vchiq_open(struct cdev *dev, int flags, int fmt __unused, struct thread *td)
1174 {
1175         vchiq_log_info(vchiq_arm_log_level, "vchiq_open");
1176         /* XXXBSD: do we really need this check? */
1177         if (1) {
1178                 VCHIQ_STATE_T *state = vchiq_get_state();
1179                 VCHIQ_INSTANCE_T instance;
1180
1181                 if (!state) {
1182                         vchiq_log_error(vchiq_arm_log_level,
1183                                 "vchiq has no connection to VideoCore");
1184                         return -ENOTCONN;
1185                 }
1186
1187                 instance = kmalloc(sizeof(*instance), GFP_KERNEL);
1188                 if (!instance)
1189                         return -ENOMEM;
1190
1191                 instance->state = state;
1192                 /* XXXBSD: PID or thread ID? */
1193                 instance->pid = td->td_proc->p_pid;
1194
1195 #ifdef notyet
1196                 ret = vchiq_proc_add_instance(instance);
1197                 if (ret != 0) {
1198                         kfree(instance);
1199                         return ret;
1200                 }
1201 #endif
1202
1203                 _sema_init(&instance->insert_event, 0);
1204                 _sema_init(&instance->remove_event, 0);
1205                 lmutex_init(&instance->completion_mutex);
1206                 lmutex_init(&instance->bulk_waiter_list_mutex);
1207                 INIT_LIST_HEAD(&instance->bulk_waiter_list);
1208
1209                 devfs_set_cdevpriv(instance, instance_dtr);
1210         } 
1211         else {
1212                 vchiq_log_error(vchiq_arm_log_level,
1213                         "Unknown minor device");
1214                 return -ENXIO;
1215         }
1216
1217         return 0;
1218 }
1219
1220 /****************************************************************************
1221 *
1222 *   vchiq_release
1223 *
1224 ***************************************************************************/
1225
1226 static int
1227 vchiq_close(struct cdev *dev, int flags __unused, int fmt __unused,
1228                 struct thread *td)
1229 {
1230         int ret = 0;
1231         if (1) {
1232                 VCHIQ_INSTANCE_T instance;
1233                 VCHIQ_STATE_T *state = vchiq_get_state();
1234                 VCHIQ_SERVICE_T *service;
1235                 int i;
1236
1237                 if ((ret = devfs_get_cdevpriv((void**)&instance))) {
1238                         printf("devfs_get_cdevpriv failed: error %d\n", ret);
1239                         return (ret);
1240                 }
1241
1242                 vchiq_log_info(vchiq_arm_log_level,
1243                         "vchiq_release: instance=%lx",
1244                         (unsigned long)instance);
1245
1246                 if (!state) {
1247                         ret = -EPERM;
1248                         goto out;
1249                 }
1250
1251                 /* Ensure videocore is awake to allow termination. */
1252                 vchiq_use_internal(instance->state, NULL,
1253                                 USE_TYPE_VCHIQ);
1254
1255                 lmutex_lock(&instance->completion_mutex);
1256
1257                 /* Wake the completion thread and ask it to exit */
1258                 instance->closing = 1;
1259                 up(&instance->insert_event);
1260
1261                 lmutex_unlock(&instance->completion_mutex);
1262
1263                 /* Wake the slot handler if the completion queue is full. */
1264                 up(&instance->remove_event);
1265
1266                 /* Mark all services for termination... */
1267                 i = 0;
1268                 while ((service = next_service_by_instance(state, instance,
1269                         &i)) != NULL) {
1270                         USER_SERVICE_T *user_service = service->base.userdata;
1271
1272                         /* Wake the slot handler if the msg queue is full. */
1273                         up(&user_service->remove_event);
1274
1275                         vchiq_terminate_service_internal(service);
1276                         unlock_service(service);
1277                 }
1278
1279                 /* ...and wait for them to die */
1280                 i = 0;
1281                 while ((service = next_service_by_instance(state, instance, &i))
1282                         != NULL) {
1283                         USER_SERVICE_T *user_service = service->base.userdata;
1284
1285                         down(&service->remove_event);
1286
1287                         BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
1288
1289                         spin_lock(&msg_queue_spinlock);
1290
1291                         while (user_service->msg_remove !=
1292                                 user_service->msg_insert) {
1293                                 VCHIQ_HEADER_T *header = user_service->
1294                                         msg_queue[user_service->msg_remove &
1295                                                 (MSG_QUEUE_SIZE - 1)];
1296                                 user_service->msg_remove++;
1297                                 spin_unlock(&msg_queue_spinlock);
1298
1299                                 if (header)
1300                                         vchiq_release_message(
1301                                                 service->handle,
1302                                                 header);
1303                                 spin_lock(&msg_queue_spinlock);
1304                         }
1305
1306                         spin_unlock(&msg_queue_spinlock);
1307
1308                         unlock_service(service);
1309                 }
1310
1311                 /* Release any closed services */
1312                 while (instance->completion_remove !=
1313                         instance->completion_insert) {
1314                         VCHIQ_COMPLETION_DATA_T *completion;
1315                         VCHIQ_SERVICE_T *service1;
1316                         completion = &instance->completions[
1317                                 instance->completion_remove &
1318                                 (MAX_COMPLETIONS - 1)];
1319                         service1 = completion->service_userdata;
1320                         if (completion->reason == VCHIQ_SERVICE_CLOSED)
1321                         {
1322                                 USER_SERVICE_T *user_service =
1323                                         service->base.userdata;
1324
1325                                 /* Wake any blocked user-thread */
1326                                 if (instance->use_close_delivered)
1327                                         up(&user_service->close_event);
1328                                 unlock_service(service1);
1329                         }
1330                         instance->completion_remove++;
1331                 }
1332
1333                 /* Release the PEER service count. */
1334                 vchiq_release_internal(instance->state, NULL);
1335
1336                 {
1337                         struct list_head *pos, *next;
1338                         list_for_each_safe(pos, next,
1339                                 &instance->bulk_waiter_list) {
1340                                 struct bulk_waiter_node *waiter;
1341                                 waiter = list_entry(pos,
1342                                         struct bulk_waiter_node,
1343                                         list);
1344                                 list_del(pos);
1345                                 vchiq_log_info(vchiq_arm_log_level,
1346                                         "bulk_waiter - cleaned up %x "
1347                                         "for pid %d",
1348                                         (unsigned int)waiter, waiter->pid);
1349                                 _sema_destroy(&waiter->bulk_waiter.event);
1350                                 kfree(waiter);
1351                         }
1352                 }
1353
1354         }
1355         else {
1356                 vchiq_log_error(vchiq_arm_log_level,
1357                         "Unknown minor device");
1358                 ret = -ENXIO;
1359         }
1360
1361 out:
1362         return ret;
1363 }
1364
1365 /****************************************************************************
1366 *
1367 *   vchiq_dump
1368 *
1369 ***************************************************************************/
1370
1371 void
1372 vchiq_dump(void *dump_context, const char *str, int len)
1373 {
1374         DUMP_CONTEXT_T *context = (DUMP_CONTEXT_T *)dump_context;
1375
1376         if (context->actual < context->space) {
1377                 int copy_bytes;
1378                 if (context->offset > 0) {
1379                         int skip_bytes = min(len, (int)context->offset);
1380                         str += skip_bytes;
1381                         len -= skip_bytes;
1382                         context->offset -= skip_bytes;
1383                         if (context->offset > 0)
1384                                 return;
1385                 }
1386                 copy_bytes = min(len, (int)(context->space - context->actual));
1387                 if (copy_bytes == 0)
1388                         return;
1389                 memcpy(context->buf + context->actual, str, copy_bytes);
1390                 context->actual += copy_bytes;
1391                 len -= copy_bytes;
1392
1393                 /* If tne terminating NUL is included in the length, then it
1394                 ** marks the end of a line and should be replaced with a
1395                 ** carriage return. */
1396                 if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
1397                         char cr = '\n';
1398                         memcpy(context->buf + context->actual - 1, &cr, 1);
1399                 }
1400         }
1401 }
1402
1403 /****************************************************************************
1404 *
1405 *   vchiq_dump_platform_instance_state
1406 *
1407 ***************************************************************************/
1408
1409 void
1410 vchiq_dump_platform_instances(void *dump_context)
1411 {
1412         VCHIQ_STATE_T *state = vchiq_get_state();
1413         char buf[80];
1414         int len;
1415         int i;
1416
1417         /* There is no list of instances, so instead scan all services,
1418                 marking those that have been dumped. */
1419
1420         for (i = 0; i < state->unused_service; i++) {
1421                 VCHIQ_SERVICE_T *service = state->services[i];
1422                 VCHIQ_INSTANCE_T instance;
1423
1424                 if (service && (service->base.callback == service_callback)) {
1425                         instance = service->instance;
1426                         if (instance)
1427                                 instance->mark = 0;
1428                 }
1429         }
1430
1431         for (i = 0; i < state->unused_service; i++) {
1432                 VCHIQ_SERVICE_T *service = state->services[i];
1433                 VCHIQ_INSTANCE_T instance;
1434
1435                 if (service && (service->base.callback == service_callback)) {
1436                         instance = service->instance;
1437                         if (instance && !instance->mark) {
1438                                 len = snprintf(buf, sizeof(buf),
1439                                         "Instance %x: pid %d,%s completions "
1440                                                 "%d/%d",
1441                                         (unsigned int)instance, instance->pid,
1442                                         instance->connected ? " connected, " :
1443                                                 "",
1444                                         instance->completion_insert -
1445                                                 instance->completion_remove,
1446                                         MAX_COMPLETIONS);
1447
1448                                 vchiq_dump(dump_context, buf, len + 1);
1449
1450                                 instance->mark = 1;
1451                         }
1452                 }
1453         }
1454 }
1455
1456 /****************************************************************************
1457 *
1458 *   vchiq_dump_platform_service_state
1459 *
1460 ***************************************************************************/
1461
1462 void
1463 vchiq_dump_platform_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
1464 {
1465         USER_SERVICE_T *user_service = (USER_SERVICE_T *)service->base.userdata;
1466         char buf[80];
1467         int len;
1468
1469         len = snprintf(buf, sizeof(buf), "  instance %x",
1470                 (unsigned int)service->instance);
1471
1472         if ((service->base.callback == service_callback) &&
1473                 user_service->is_vchi) {
1474                 len += snprintf(buf + len, sizeof(buf) - len,
1475                         ", %d/%d messages",
1476                         user_service->msg_insert - user_service->msg_remove,
1477                         MSG_QUEUE_SIZE);
1478
1479                 if (user_service->dequeue_pending)
1480                         len += snprintf(buf + len, sizeof(buf) - len,
1481                                 " (dequeue pending)");
1482         }
1483
1484         vchiq_dump(dump_context, buf, len + 1);
1485 }
1486
1487 #ifdef notyet
1488 /****************************************************************************
1489 *
1490 *   dump_user_mem
1491 *
1492 ***************************************************************************/
1493
1494 static void
1495 dump_phys_mem(void *virt_addr, uint32_t num_bytes)
1496 {
1497         int            rc;
1498         uint8_t       *end_virt_addr = virt_addr + num_bytes;
1499         int            num_pages;
1500         int            offset;
1501         int            end_offset;
1502         int            page_idx;
1503         int            prev_idx;
1504         struct page   *page;
1505         struct page  **pages;
1506         uint8_t       *kmapped_virt_ptr;
1507
1508         /* Align virtAddr and endVirtAddr to 16 byte boundaries. */
1509
1510         virt_addr = (void *)((unsigned long)virt_addr & ~0x0fuL);
1511         end_virt_addr = (void *)(((unsigned long)end_virt_addr + 15uL) &
1512                 ~0x0fuL);
1513
1514         offset = (int)(long)virt_addr & (PAGE_SIZE - 1);
1515         end_offset = (int)(long)end_virt_addr & (PAGE_SIZE - 1);
1516
1517         num_pages = (offset + num_bytes + PAGE_SIZE - 1) / PAGE_SIZE;
1518
1519         pages = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);
1520         if (pages == NULL) {
1521                 vchiq_log_error(vchiq_arm_log_level,
1522                         "Unable to allocation memory for %d pages\n",
1523                         num_pages);
1524                 return;
1525         }
1526
1527         down_read(&current->mm->mmap_sem);
1528         rc = get_user_pages(current,      /* task */
1529                 current->mm,              /* mm */
1530                 (unsigned long)virt_addr, /* start */
1531                 num_pages,                /* len */
1532                 0,                        /* write */
1533                 0,                        /* force */
1534                 pages,                    /* pages (array of page pointers) */
1535                 NULL);                    /* vmas */
1536         up_read(&current->mm->mmap_sem);
1537
1538         prev_idx = -1;
1539         page = NULL;
1540
1541         while (offset < end_offset) {
1542
1543                 int page_offset = offset % PAGE_SIZE;
1544                 page_idx = offset / PAGE_SIZE;
1545
1546                 if (page_idx != prev_idx) {
1547
1548                         if (page != NULL)
1549                                 kunmap(page);
1550                         page = pages[page_idx];
1551                         kmapped_virt_ptr = kmap(page);
1552
1553                         prev_idx = page_idx;
1554                 }
1555
1556                 if (vchiq_arm_log_level >= VCHIQ_LOG_TRACE)
1557                         vchiq_log_dump_mem("ph",
1558                                 (uint32_t)(unsigned long)&kmapped_virt_ptr[
1559                                         page_offset],
1560                                 &kmapped_virt_ptr[page_offset], 16);
1561
1562                 offset += 16;
1563         }
1564         if (page != NULL)
1565                 kunmap(page);
1566
1567         for (page_idx = 0; page_idx < num_pages; page_idx++)
1568                 page_cache_release(pages[page_idx]);
1569
1570         kfree(pages);
1571 }
1572
1573 /****************************************************************************
1574 *
1575 *   vchiq_read
1576 *
1577 ***************************************************************************/
1578
1579 static ssize_t
1580 vchiq_read(struct file *file, char __user *buf,
1581         size_t count, loff_t *ppos)
1582 {
1583         DUMP_CONTEXT_T context;
1584         context.buf = buf;
1585         context.actual = 0;
1586         context.space = count;
1587         context.offset = *ppos;
1588
1589         vchiq_dump_state(&context, &g_state);
1590
1591         *ppos += context.actual;
1592
1593         return context.actual;
1594 }
1595 #endif
1596
1597 VCHIQ_STATE_T *
1598 vchiq_get_state(void)
1599 {
1600
1601         if (g_state.remote == NULL)
1602                 printk(KERN_ERR "%s: g_state.remote == NULL\n", __func__);
1603         else if (g_state.remote->initialised != 1)
1604                 printk(KERN_NOTICE "%s: g_state.remote->initialised != 1 (%d)\n",
1605                         __func__, g_state.remote->initialised);
1606
1607         return ((g_state.remote != NULL) &&
1608                 (g_state.remote->initialised == 1)) ? &g_state : NULL;
1609 }
1610
1611 /*
1612  * Autosuspend related functionality
1613  */
1614
1615 int
1616 vchiq_videocore_wanted(VCHIQ_STATE_T *state)
1617 {
1618         VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
1619         if (!arm_state)
1620                 /* autosuspend not supported - always return wanted */
1621                 return 1;
1622         else if (arm_state->blocked_count)
1623                 return 1;
1624         else if (!arm_state->videocore_use_count)
1625                 /* usage count zero - check for override unless we're forcing */
1626                 if (arm_state->resume_blocked)
1627                         return 0;
1628                 else
1629                         return vchiq_platform_videocore_wanted(state);
1630         else
1631                 /* non-zero usage count - videocore still required */
1632                 return 1;
1633 }
1634
1635 static VCHIQ_STATUS_T
1636 vchiq_keepalive_vchiq_callback(VCHIQ_REASON_T reason,
1637         VCHIQ_HEADER_T *header,
1638         VCHIQ_SERVICE_HANDLE_T service_user,
1639         void *bulk_user)
1640 {
1641         vchiq_log_error(vchiq_susp_log_level,
1642                 "%s callback reason %d", __func__, reason);
1643         return 0;
1644 }
1645
1646 static int
1647 vchiq_keepalive_thread_func(void *v)
1648 {
1649         VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
1650         VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
1651
1652         VCHIQ_STATUS_T status;
1653         VCHIQ_INSTANCE_T instance;
1654         VCHIQ_SERVICE_HANDLE_T ka_handle;
1655
1656         VCHIQ_SERVICE_PARAMS_T params = {
1657                 .fourcc      = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
1658                 .callback    = vchiq_keepalive_vchiq_callback,
1659                 .version     = KEEPALIVE_VER,
1660                 .version_min = KEEPALIVE_VER_MIN
1661         };
1662
1663         status = vchiq_initialise(&instance);
1664         if (status != VCHIQ_SUCCESS) {
1665                 vchiq_log_error(vchiq_susp_log_level,
1666                         "%s vchiq_initialise failed %d", __func__, status);
1667                 goto exit;
1668         }
1669
1670         status = vchiq_connect(instance);
1671         if (status != VCHIQ_SUCCESS) {
1672                 vchiq_log_error(vchiq_susp_log_level,
1673                         "%s vchiq_connect failed %d", __func__, status);
1674                 goto shutdown;
1675         }
1676
1677         status = vchiq_add_service(instance, &params, &ka_handle);
1678         if (status != VCHIQ_SUCCESS) {
1679                 vchiq_log_error(vchiq_susp_log_level,
1680                         "%s vchiq_open_service failed %d", __func__, status);
1681                 goto shutdown;
1682         }
1683
1684         while (1) {
1685                 long rc = 0, uc = 0;
1686                 if (wait_for_completion_interruptible(&arm_state->ka_evt)
1687                                 != 0) {
1688                         vchiq_log_error(vchiq_susp_log_level,
1689                                 "%s interrupted", __func__);
1690                         flush_signals(current);
1691                         continue;
1692                 }
1693
1694                 /* read and clear counters.  Do release_count then use_count to
1695                  * prevent getting more releases than uses */
1696                 rc = atomic_xchg(&arm_state->ka_release_count, 0);
1697                 uc = atomic_xchg(&arm_state->ka_use_count, 0);
1698
1699                 /* Call use/release service the requisite number of times.
1700                  * Process use before release so use counts don't go negative */
1701                 while (uc--) {
1702                         atomic_inc(&arm_state->ka_use_ack_count);
1703                         status = vchiq_use_service(ka_handle);
1704                         if (status != VCHIQ_SUCCESS) {
1705                                 vchiq_log_error(vchiq_susp_log_level,
1706                                         "%s vchiq_use_service error %d",
1707                                         __func__, status);
1708                         }
1709                 }
1710                 while (rc--) {
1711                         status = vchiq_release_service(ka_handle);
1712                         if (status != VCHIQ_SUCCESS) {
1713                                 vchiq_log_error(vchiq_susp_log_level,
1714                                         "%s vchiq_release_service error %d",
1715                                         __func__, status);
1716                         }
1717                 }
1718         }
1719
1720 shutdown:
1721         vchiq_shutdown(instance);
1722 exit:
1723         return 0;
1724 }
1725
1726 VCHIQ_STATUS_T
1727 vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state)
1728 {
1729         VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
1730
1731         if (arm_state) {
1732                 rwlock_init(&arm_state->susp_res_lock);
1733
1734                 init_completion(&arm_state->ka_evt);
1735                 atomic_set(&arm_state->ka_use_count, 0);
1736                 atomic_set(&arm_state->ka_use_ack_count, 0);
1737                 atomic_set(&arm_state->ka_release_count, 0);
1738
1739                 init_completion(&arm_state->vc_suspend_complete);
1740
1741                 init_completion(&arm_state->vc_resume_complete);
1742                 /* Initialise to 'done' state.  We only want to block on resume
1743                  * completion while videocore is suspended. */
1744                 set_resume_state(arm_state, VC_RESUME_RESUMED);
1745
1746                 init_completion(&arm_state->resume_blocker);
1747                 /* Initialise to 'done' state.  We only want to block on this
1748                  * completion while resume is blocked */
1749                 complete_all(&arm_state->resume_blocker);
1750
1751                 init_completion(&arm_state->blocked_blocker);
1752                 /* Initialise to 'done' state.  We only want to block on this
1753                  * completion while things are waiting on the resume blocker */
1754                 complete_all(&arm_state->blocked_blocker);
1755
1756                 arm_state->suspend_timer_timeout = SUSPEND_TIMER_TIMEOUT_MS;
1757                 arm_state->suspend_timer_running = 0;
1758                 init_timer(&arm_state->suspend_timer);
1759                 arm_state->suspend_timer.data = (unsigned long)(state);
1760                 arm_state->suspend_timer.function = suspend_timer_callback;
1761
1762                 arm_state->first_connect = 0;
1763
1764         }
1765         return status;
1766 }
1767
1768 /*
1769 ** Functions to modify the state variables;
1770 **      set_suspend_state
1771 **      set_resume_state
1772 **
1773 ** There are more state variables than we might like, so ensure they remain in
1774 ** step.  Suspend and resume state are maintained separately, since most of
1775 ** these state machines can operate independently.  However, there are a few
1776 ** states where state transitions in one state machine cause a reset to the
1777 ** other state machine.  In addition, there are some completion events which
1778 ** need to occur on state machine reset and end-state(s), so these are also
1779 ** dealt with in these functions.
1780 **
1781 ** In all states we set the state variable according to the input, but in some
1782 ** cases we perform additional steps outlined below;
1783 **
1784 ** VC_SUSPEND_IDLE - Initialise the suspend completion at the same time.
1785 **                      The suspend completion is completed after any suspend
1786 **                      attempt.  When we reset the state machine we also reset
1787 **                      the completion.  This reset occurs when videocore is
1788 **                      resumed, and also if we initiate suspend after a suspend
1789 **                      failure.
1790 **
1791 ** VC_SUSPEND_IN_PROGRESS - This state is considered the point of no return for
1792 **                      suspend - ie from this point on we must try to suspend
1793 **                      before resuming can occur.  We therefore also reset the
1794 **                      resume state machine to VC_RESUME_IDLE in this state.
1795 **
1796 ** VC_SUSPEND_SUSPENDED - Suspend has completed successfully. Also call
1797 **                      complete_all on the suspend completion to notify
1798 **                      anything waiting for suspend to happen.
1799 **
1800 ** VC_SUSPEND_REJECTED - Videocore rejected suspend. Videocore will also
1801 **                      initiate resume, so no need to alter resume state.
1802 **                      We call complete_all on the suspend completion to notify
1803 **                      of suspend rejection.
1804 **
1805 ** VC_SUSPEND_FAILED - We failed to initiate videocore suspend.  We notify the
1806 **                      suspend completion and reset the resume state machine.
1807 **
1808 ** VC_RESUME_IDLE - Initialise the resume completion at the same time.  The
1809 **                      resume completion is in its 'done' state whenever
1810 **                      videcore is running.  Therfore, the VC_RESUME_IDLE state
1811 **                      implies that videocore is suspended.
1812 **                      Hence, any thread which needs to wait until videocore is
1813 **                      running can wait on this completion - it will only block
1814 **                      if videocore is suspended.
1815 **
1816 ** VC_RESUME_RESUMED - Resume has completed successfully.  Videocore is running.
1817 **                      Call complete_all on the resume completion to unblock
1818 **                      any threads waiting for resume.  Also reset the suspend
1819 **                      state machine to it's idle state.
1820 **
1821 ** VC_RESUME_FAILED - Currently unused - no mechanism to fail resume exists.
1822 */
1823
1824 void
1825 set_suspend_state(VCHIQ_ARM_STATE_T *arm_state,
1826         enum vc_suspend_status new_state)
1827 {
1828         /* set the state in all cases */
1829         arm_state->vc_suspend_state = new_state;
1830
1831         /* state specific additional actions */
1832         switch (new_state) {
1833         case VC_SUSPEND_FORCE_CANCELED:
1834                 complete_all(&arm_state->vc_suspend_complete);
1835                 break;
1836         case VC_SUSPEND_REJECTED:
1837                 complete_all(&arm_state->vc_suspend_complete);
1838                 break;
1839         case VC_SUSPEND_FAILED:
1840                 complete_all(&arm_state->vc_suspend_complete);
1841                 arm_state->vc_resume_state = VC_RESUME_RESUMED;
1842                 complete_all(&arm_state->vc_resume_complete);
1843                 break;
1844         case VC_SUSPEND_IDLE:
1845                 /* TODO: reinit_completion */
1846                 INIT_COMPLETION(arm_state->vc_suspend_complete);
1847                 break;
1848         case VC_SUSPEND_REQUESTED:
1849                 break;
1850         case VC_SUSPEND_IN_PROGRESS:
1851                 set_resume_state(arm_state, VC_RESUME_IDLE);
1852                 break;
1853         case VC_SUSPEND_SUSPENDED:
1854                 complete_all(&arm_state->vc_suspend_complete);
1855                 break;
1856         default:
1857                 BUG();
1858                 break;
1859         }
1860 }
1861
1862 void
1863 set_resume_state(VCHIQ_ARM_STATE_T *arm_state,
1864         enum vc_resume_status new_state)
1865 {
1866         /* set the state in all cases */
1867         arm_state->vc_resume_state = new_state;
1868
1869         /* state specific additional actions */
1870         switch (new_state) {
1871         case VC_RESUME_FAILED:
1872                 break;
1873         case VC_RESUME_IDLE:
1874                 /* TODO: reinit_completion */
1875                 INIT_COMPLETION(arm_state->vc_resume_complete);
1876                 break;
1877         case VC_RESUME_REQUESTED:
1878                 break;
1879         case VC_RESUME_IN_PROGRESS:
1880                 break;
1881         case VC_RESUME_RESUMED:
1882                 complete_all(&arm_state->vc_resume_complete);
1883                 set_suspend_state(arm_state, VC_SUSPEND_IDLE);
1884                 break;
1885         default:
1886                 BUG();
1887                 break;
1888         }
1889 }
1890
1891
1892 /* should be called with the write lock held */
1893 inline void
1894 start_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
1895 {
1896         del_timer(&arm_state->suspend_timer);
1897         arm_state->suspend_timer.expires = jiffies +
1898                 msecs_to_jiffies(arm_state->
1899                         suspend_timer_timeout);
1900         add_timer(&arm_state->suspend_timer);
1901         arm_state->suspend_timer_running = 1;
1902 }
1903
1904 /* should be called with the write lock held */
1905 static inline void
1906 stop_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
1907 {
1908         if (arm_state->suspend_timer_running) {
1909                 del_timer(&arm_state->suspend_timer);
1910                 arm_state->suspend_timer_running = 0;
1911         }
1912 }
1913
1914 static inline int
1915 need_resume(VCHIQ_STATE_T *state)
1916 {
1917         VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
1918         return (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) &&
1919                         (arm_state->vc_resume_state < VC_RESUME_REQUESTED) &&
1920                         vchiq_videocore_wanted(state);
1921 }
1922
1923 static int
1924 block_resume(VCHIQ_ARM_STATE_T *arm_state)
1925 {
1926         int status = VCHIQ_SUCCESS;
1927         const unsigned long timeout_val =
1928                                 msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS);
1929         int resume_count = 0;
1930
1931         /* Allow any threads which were blocked by the last force suspend to
1932          * complete if they haven't already.  Only give this one shot; if
1933          * blocked_count is incremented after blocked_blocker is completed
1934          * (which only happens when blocked_count hits 0) then those threads
1935          * will have to wait until next time around */
1936         if (arm_state->blocked_count) {
1937                 /* TODO: reinit_completion */
1938                 INIT_COMPLETION(arm_state->blocked_blocker);
1939                 write_unlock_bh(&arm_state->susp_res_lock);
1940                 vchiq_log_info(vchiq_susp_log_level, "%s wait for previously "
1941                         "blocked clients", __func__);
1942                 if (wait_for_completion_interruptible_timeout(
1943                                 &arm_state->blocked_blocker, timeout_val)
1944                                         <= 0) {
1945                         vchiq_log_error(vchiq_susp_log_level, "%s wait for "
1946                                 "previously blocked clients failed" , __func__);
1947                         status = VCHIQ_ERROR;
1948                         write_lock_bh(&arm_state->susp_res_lock);
1949                         goto out;
1950                 }
1951                 vchiq_log_info(vchiq_susp_log_level, "%s previously blocked "
1952                         "clients resumed", __func__);
1953                 write_lock_bh(&arm_state->susp_res_lock);
1954         }
1955
1956         /* We need to wait for resume to complete if it's in process */
1957         while (arm_state->vc_resume_state != VC_RESUME_RESUMED &&
1958                         arm_state->vc_resume_state > VC_RESUME_IDLE) {
1959                 if (resume_count > 1) {
1960                         status = VCHIQ_ERROR;
1961                         vchiq_log_error(vchiq_susp_log_level, "%s waited too "
1962                                 "many times for resume" , __func__);
1963                         goto out;
1964                 }
1965                 write_unlock_bh(&arm_state->susp_res_lock);
1966                 vchiq_log_info(vchiq_susp_log_level, "%s wait for resume",
1967                         __func__);
1968                 if (wait_for_completion_interruptible_timeout(
1969                                 &arm_state->vc_resume_complete, timeout_val)
1970                                         <= 0) {
1971                         vchiq_log_error(vchiq_susp_log_level, "%s wait for "
1972                                 "resume failed (%s)", __func__,
1973                                 resume_state_names[arm_state->vc_resume_state +
1974                                                         VC_RESUME_NUM_OFFSET]);
1975                         status = VCHIQ_ERROR;
1976                         write_lock_bh(&arm_state->susp_res_lock);
1977                         goto out;
1978                 }
1979                 vchiq_log_info(vchiq_susp_log_level, "%s resumed", __func__);
1980                 write_lock_bh(&arm_state->susp_res_lock);
1981                 resume_count++;
1982         }
1983         /* TODO: reinit_completion */
1984         INIT_COMPLETION(arm_state->resume_blocker);
1985         arm_state->resume_blocked = 1;
1986
1987 out:
1988         return status;
1989 }
1990
1991 static inline void
1992 unblock_resume(VCHIQ_ARM_STATE_T *arm_state)
1993 {
1994         complete_all(&arm_state->resume_blocker);
1995         arm_state->resume_blocked = 0;
1996 }
1997
1998 /* Initiate suspend via slot handler. Should be called with the write lock
1999  * held */
2000 VCHIQ_STATUS_T
2001 vchiq_arm_vcsuspend(VCHIQ_STATE_T *state)
2002 {
2003         VCHIQ_STATUS_T status = VCHIQ_ERROR;
2004         VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2005
2006         if (!arm_state)
2007                 goto out;
2008
2009         vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2010         status = VCHIQ_SUCCESS;
2011
2012
2013         switch (arm_state->vc_suspend_state) {
2014         case VC_SUSPEND_REQUESTED:
2015                 vchiq_log_info(vchiq_susp_log_level, "%s: suspend already "
2016                         "requested", __func__);
2017                 break;
2018         case VC_SUSPEND_IN_PROGRESS:
2019                 vchiq_log_info(vchiq_susp_log_level, "%s: suspend already in "
2020                         "progress", __func__);
2021                 break;
2022
2023         default:
2024                 /* We don't expect to be in other states, so log but continue
2025                  * anyway */
2026                 vchiq_log_error(vchiq_susp_log_level,
2027                         "%s unexpected suspend state %s", __func__,
2028                         suspend_state_names[arm_state->vc_suspend_state +
2029                                                 VC_SUSPEND_NUM_OFFSET]);
2030                 /* fall through */
2031         case VC_SUSPEND_REJECTED:
2032         case VC_SUSPEND_FAILED:
2033                 /* Ensure any idle state actions have been run */
2034                 set_suspend_state(arm_state, VC_SUSPEND_IDLE);
2035                 /* fall through */
2036         case VC_SUSPEND_IDLE:
2037                 vchiq_log_info(vchiq_susp_log_level,
2038                         "%s: suspending", __func__);
2039                 set_suspend_state(arm_state, VC_SUSPEND_REQUESTED);
2040                 /* kick the slot handler thread to initiate suspend */
2041                 request_poll(state, NULL, 0);
2042                 break;
2043         }
2044
2045 out:
2046         vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
2047         return status;
2048 }
2049
2050 void
2051 vchiq_platform_check_suspend(VCHIQ_STATE_T *state)
2052 {
2053         VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2054         int susp = 0;
2055
2056         if (!arm_state)
2057                 goto out;
2058
2059         vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2060
2061         write_lock_bh(&arm_state->susp_res_lock);
2062         if (arm_state->vc_suspend_state == VC_SUSPEND_REQUESTED &&
2063                         arm_state->vc_resume_state == VC_RESUME_RESUMED) {
2064                 set_suspend_state(arm_state, VC_SUSPEND_IN_PROGRESS);
2065                 susp = 1;
2066         }
2067         write_unlock_bh(&arm_state->susp_res_lock);
2068
2069         if (susp)
2070                 vchiq_platform_suspend(state);
2071
2072 out:
2073         vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
2074         return;
2075 }
2076
2077
2078 static void
2079 output_timeout_error(VCHIQ_STATE_T *state)
2080 {
2081         VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2082         char service_err[50] = "";
2083         int vc_use_count = arm_state->videocore_use_count;
2084         int active_services = state->unused_service;
2085         int i;
2086
2087         if (!arm_state->videocore_use_count) {
2088                 snprintf(service_err, 50, " Videocore usecount is 0");
2089                 goto output_msg;
2090         }
2091         for (i = 0; i < active_services; i++) {
2092                 VCHIQ_SERVICE_T *service_ptr = state->services[i];
2093                 if (service_ptr && service_ptr->service_use_count &&
2094                         (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE)) {
2095                         snprintf(service_err, 50, " %c%c%c%c(%8x) service has "
2096                                 "use count %d%s", VCHIQ_FOURCC_AS_4CHARS(
2097                                         service_ptr->base.fourcc),
2098                                  service_ptr->client_id,
2099                                  service_ptr->service_use_count,
2100                                  service_ptr->service_use_count ==
2101                                          vc_use_count ? "" : " (+ more)");
2102                         break;
2103                 }
2104         }
2105
2106 output_msg:
2107         vchiq_log_error(vchiq_susp_log_level,
2108                 "timed out waiting for vc suspend (%d).%s",
2109                  arm_state->autosuspend_override, service_err);
2110
2111 }
2112
2113 /* Try to get videocore into suspended state, regardless of autosuspend state.
2114 ** We don't actually force suspend, since videocore may get into a bad state
2115 ** if we force suspend at a bad time.  Instead, we wait for autosuspend to
2116 ** determine a good point to suspend.  If this doesn't happen within 100ms we
2117 ** report failure.
2118 **
2119 ** Returns VCHIQ_SUCCESS if videocore suspended successfully, VCHIQ_RETRY if
2120 ** videocore failed to suspend in time or VCHIQ_ERROR if interrupted.
2121 */
2122 VCHIQ_STATUS_T
2123 vchiq_arm_force_suspend(VCHIQ_STATE_T *state)
2124 {
2125         VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2126         VCHIQ_STATUS_T status = VCHIQ_ERROR;
2127         long rc = 0;
2128         int repeat = -1;
2129
2130         if (!arm_state)
2131                 goto out;
2132
2133         vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2134
2135         write_lock_bh(&arm_state->susp_res_lock);
2136
2137         status = block_resume(arm_state);
2138         if (status != VCHIQ_SUCCESS)
2139                 goto unlock;
2140         if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
2141                 /* Already suspended - just block resume and exit */
2142                 vchiq_log_info(vchiq_susp_log_level, "%s already suspended",
2143                         __func__);
2144                 status = VCHIQ_SUCCESS;
2145                 goto unlock;
2146         } else if (arm_state->vc_suspend_state <= VC_SUSPEND_IDLE) {
2147                 /* initiate suspend immediately in the case that we're waiting
2148                  * for the timeout */
2149                 stop_suspend_timer(arm_state);
2150                 if (!vchiq_videocore_wanted(state)) {
2151                         vchiq_log_info(vchiq_susp_log_level, "%s videocore "
2152                                 "idle, initiating suspend", __func__);
2153                         status = vchiq_arm_vcsuspend(state);
2154                 } else if (arm_state->autosuspend_override <
2155                                                 FORCE_SUSPEND_FAIL_MAX) {
2156                         vchiq_log_info(vchiq_susp_log_level, "%s letting "
2157                                 "videocore go idle", __func__);
2158                         status = VCHIQ_SUCCESS;
2159                 } else {
2160                         vchiq_log_warning(vchiq_susp_log_level, "%s failed too "
2161                                 "many times - attempting suspend", __func__);
2162                         status = vchiq_arm_vcsuspend(state);
2163                 }
2164         } else {
2165                 vchiq_log_info(vchiq_susp_log_level, "%s videocore suspend "
2166                         "in progress - wait for completion", __func__);
2167                 status = VCHIQ_SUCCESS;
2168         }
2169
2170         /* Wait for suspend to happen due to system idle (not forced..) */
2171         if (status != VCHIQ_SUCCESS)
2172                 goto unblock_resume;
2173
2174         do {
2175                 write_unlock_bh(&arm_state->susp_res_lock);
2176
2177                 rc = wait_for_completion_interruptible_timeout(
2178                                 &arm_state->vc_suspend_complete,
2179                                 msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS));
2180
2181                 write_lock_bh(&arm_state->susp_res_lock);
2182                 if (rc < 0) {
2183                         vchiq_log_warning(vchiq_susp_log_level, "%s "
2184                                 "interrupted waiting for suspend", __func__);
2185                         status = VCHIQ_ERROR;
2186                         goto unblock_resume;
2187                 } else if (rc == 0) {
2188                         if (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) {
2189                                 /* Repeat timeout once if in progress */
2190                                 if (repeat < 0) {
2191                                         repeat = 1;
2192                                         continue;
2193                                 }
2194                         }
2195                         arm_state->autosuspend_override++;
2196                         output_timeout_error(state);
2197
2198                         status = VCHIQ_RETRY;
2199                         goto unblock_resume;
2200                 }
2201         } while (0 < (repeat--));
2202
2203         /* Check and report state in case we need to abort ARM suspend */
2204         if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED) {
2205                 status = VCHIQ_RETRY;
2206                 vchiq_log_error(vchiq_susp_log_level,
2207                         "%s videocore suspend failed (state %s)", __func__,
2208                         suspend_state_names[arm_state->vc_suspend_state +
2209                                                 VC_SUSPEND_NUM_OFFSET]);
2210                 /* Reset the state only if it's still in an error state.
2211                  * Something could have already initiated another suspend. */
2212                 if (arm_state->vc_suspend_state < VC_SUSPEND_IDLE)
2213                         set_suspend_state(arm_state, VC_SUSPEND_IDLE);
2214
2215                 goto unblock_resume;
2216         }
2217
2218         /* successfully suspended - unlock and exit */
2219         goto unlock;
2220
2221 unblock_resume:
2222         /* all error states need to unblock resume before exit */
2223         unblock_resume(arm_state);
2224
2225 unlock:
2226         write_unlock_bh(&arm_state->susp_res_lock);
2227
2228 out:
2229         vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
2230         return status;
2231 }
2232
2233 void
2234 vchiq_check_suspend(VCHIQ_STATE_T *state)
2235 {
2236         VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2237
2238         if (!arm_state)
2239                 goto out;
2240
2241         vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2242
2243         write_lock_bh(&arm_state->susp_res_lock);
2244         if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED &&
2245                         arm_state->first_connect &&
2246                         !vchiq_videocore_wanted(state)) {
2247                 vchiq_arm_vcsuspend(state);
2248         }
2249         write_unlock_bh(&arm_state->susp_res_lock);
2250
2251 out:
2252         vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
2253         return;
2254 }
2255
2256
2257 int
2258 vchiq_arm_allow_resume(VCHIQ_STATE_T *state)
2259 {
2260         VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2261         int resume = 0;
2262         int ret = -1;
2263
2264         if (!arm_state)
2265                 goto out;
2266
2267         vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2268
2269         write_lock_bh(&arm_state->susp_res_lock);
2270         unblock_resume(arm_state);
2271         resume = vchiq_check_resume(state);
2272         write_unlock_bh(&arm_state->susp_res_lock);
2273
2274         if (resume) {
2275                 if (wait_for_completion_interruptible(
2276                         &arm_state->vc_resume_complete) < 0) {
2277                         vchiq_log_error(vchiq_susp_log_level,
2278                                 "%s interrupted", __func__);
2279                         /* failed, cannot accurately derive suspend
2280                          * state, so exit early. */
2281                         goto out;
2282                 }
2283         }
2284
2285         read_lock_bh(&arm_state->susp_res_lock);
2286         if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
2287                 vchiq_log_info(vchiq_susp_log_level,
2288                                 "%s: Videocore remains suspended", __func__);
2289         } else {
2290                 vchiq_log_info(vchiq_susp_log_level,
2291                                 "%s: Videocore resumed", __func__);
2292                 ret = 0;
2293         }
2294         read_unlock_bh(&arm_state->susp_res_lock);
2295 out:
2296         vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
2297         return ret;
2298 }
2299
2300 /* This function should be called with the write lock held */
2301 int
2302 vchiq_check_resume(VCHIQ_STATE_T *state)
2303 {
2304         VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2305         int resume = 0;
2306
2307         if (!arm_state)
2308                 goto out;
2309
2310         vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2311
2312         if (need_resume(state)) {
2313                 set_resume_state(arm_state, VC_RESUME_REQUESTED);
2314                 request_poll(state, NULL, 0);
2315                 resume = 1;
2316         }
2317
2318 out:
2319         vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
2320         return resume;
2321 }
2322
2323 #ifdef notyet
2324 void
2325 vchiq_platform_check_resume(VCHIQ_STATE_T *state)
2326 {
2327         VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2328         int res = 0;
2329
2330         if (!arm_state)
2331                 goto out;
2332
2333         vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2334
2335         write_lock_bh(&arm_state->susp_res_lock);
2336         if (arm_state->wake_address == 0) {
2337                 vchiq_log_info(vchiq_susp_log_level,
2338                                         "%s: already awake", __func__);
2339                 goto unlock;
2340         }
2341         if (arm_state->vc_resume_state == VC_RESUME_IN_PROGRESS) {
2342                 vchiq_log_info(vchiq_susp_log_level,
2343                                         "%s: already resuming", __func__);
2344                 goto unlock;
2345         }
2346
2347         if (arm_state->vc_resume_state == VC_RESUME_REQUESTED) {
2348                 set_resume_state(arm_state, VC_RESUME_IN_PROGRESS);
2349                 res = 1;
2350         } else
2351                 vchiq_log_trace(vchiq_susp_log_level,
2352                                 "%s: not resuming (resume state %s)", __func__,
2353                                 resume_state_names[arm_state->vc_resume_state +
2354                                                         VC_RESUME_NUM_OFFSET]);
2355
2356 unlock:
2357         write_unlock_bh(&arm_state->susp_res_lock);
2358
2359         if (res)
2360                 vchiq_platform_resume(state);
2361
2362 out:
2363         vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
2364         return;
2365
2366 }
2367 #endif
2368
2369
2370
2371 VCHIQ_STATUS_T
2372 vchiq_use_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
2373                 enum USE_TYPE_E use_type)
2374 {
2375         VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2376         VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
2377         char entity[16];
2378         int *entity_uc;
2379         int local_uc, local_entity_uc;
2380
2381         if (!arm_state)
2382                 goto out;
2383
2384         vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2385
2386         if (use_type == USE_TYPE_VCHIQ) {
2387                 snprintf(entity, sizeof(entity), "VCHIQ:   ");
2388                 entity_uc = &arm_state->peer_use_count;
2389         } else if (service) {
2390                 snprintf(entity, sizeof(entity), "%c%c%c%c:%8x",
2391                         VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
2392                         service->client_id);
2393                 entity_uc = &service->service_use_count;
2394         } else {
2395                 vchiq_log_error(vchiq_susp_log_level, "%s null service "
2396                                 "ptr", __func__);
2397                 ret = VCHIQ_ERROR;
2398                 goto out;
2399         }
2400
2401         write_lock_bh(&arm_state->susp_res_lock);
2402         while (arm_state->resume_blocked) {
2403                 /* If we call 'use' while force suspend is waiting for suspend,
2404                  * then we're about to block the thread which the force is
2405                  * waiting to complete, so we're bound to just time out. In this
2406                  * case, set the suspend state such that the wait will be
2407                  * canceled, so we can complete as quickly as possible. */
2408                 if (arm_state->resume_blocked && arm_state->vc_suspend_state ==
2409                                 VC_SUSPEND_IDLE) {
2410                         set_suspend_state(arm_state, VC_SUSPEND_FORCE_CANCELED);
2411                         break;
2412                 }
2413                 /* If suspend is already in progress then we need to block */
2414                 if (!try_wait_for_completion(&arm_state->resume_blocker)) {
2415                         /* Indicate that there are threads waiting on the resume
2416                          * blocker.  These need to be allowed to complete before
2417                          * a _second_ call to force suspend can complete,
2418                          * otherwise low priority threads might never actually
2419                          * continue */
2420                         arm_state->blocked_count++;
2421                         write_unlock_bh(&arm_state->susp_res_lock);
2422                         vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
2423                                 "blocked - waiting...", __func__, entity);
2424                         if (wait_for_completion_killable(
2425                                         &arm_state->resume_blocker) != 0) {
2426                                 vchiq_log_error(vchiq_susp_log_level, "%s %s "
2427                                         "wait for resume blocker interrupted",
2428                                         __func__, entity);
2429                                 ret = VCHIQ_ERROR;
2430                                 write_lock_bh(&arm_state->susp_res_lock);
2431                                 arm_state->blocked_count--;
2432                                 write_unlock_bh(&arm_state->susp_res_lock);
2433                                 goto out;
2434                         }
2435                         vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
2436                                 "unblocked", __func__, entity);
2437                         write_lock_bh(&arm_state->susp_res_lock);
2438                         if (--arm_state->blocked_count == 0)
2439                                 complete_all(&arm_state->blocked_blocker);
2440                 }
2441         }
2442
2443         stop_suspend_timer(arm_state);
2444
2445         local_uc = ++arm_state->videocore_use_count;
2446         local_entity_uc = ++(*entity_uc);
2447
2448         /* If there's a pending request which hasn't yet been serviced then
2449          * just clear it.  If we're past VC_SUSPEND_REQUESTED state then
2450          * vc_resume_complete will block until we either resume or fail to
2451          * suspend */
2452         if (arm_state->vc_suspend_state <= VC_SUSPEND_REQUESTED)
2453                 set_suspend_state(arm_state, VC_SUSPEND_IDLE);
2454
2455         if ((use_type != USE_TYPE_SERVICE_NO_RESUME) && need_resume(state)) {
2456                 set_resume_state(arm_state, VC_RESUME_REQUESTED);
2457                 vchiq_log_info(vchiq_susp_log_level,
2458                         "%s %s count %d, state count %d",
2459                         __func__, entity, local_entity_uc, local_uc);
2460                 request_poll(state, NULL, 0);
2461         } else
2462                 vchiq_log_trace(vchiq_susp_log_level,
2463                         "%s %s count %d, state count %d",
2464                         __func__, entity, *entity_uc, local_uc);
2465
2466
2467         write_unlock_bh(&arm_state->susp_res_lock);
2468
2469         /* Completion is in a done state when we're not suspended, so this won't
2470          * block for the non-suspended case. */
2471         if (!try_wait_for_completion(&arm_state->vc_resume_complete)) {
2472                 vchiq_log_info(vchiq_susp_log_level, "%s %s wait for resume",
2473                         __func__, entity);
2474                 if (wait_for_completion_killable(
2475                                 &arm_state->vc_resume_complete) != 0) {
2476                         vchiq_log_error(vchiq_susp_log_level, "%s %s wait for "
2477                                 "resume interrupted", __func__, entity);
2478                         ret = VCHIQ_ERROR;
2479                         goto out;
2480                 }
2481                 vchiq_log_info(vchiq_susp_log_level, "%s %s resumed", __func__,
2482                         entity);
2483         }
2484
2485         if (ret == VCHIQ_SUCCESS) {
2486                 VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
2487                 long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
2488                 while (ack_cnt && (status == VCHIQ_SUCCESS)) {
2489                         /* Send the use notify to videocore */
2490                         status = vchiq_send_remote_use_active(state);
2491                         if (status == VCHIQ_SUCCESS)
2492                                 ack_cnt--;
2493                         else
2494                                 atomic_add(ack_cnt,
2495                                         &arm_state->ka_use_ack_count);
2496                 }
2497         }
2498
2499 out:
2500         vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
2501         return ret;
2502 }
2503
2504 VCHIQ_STATUS_T
2505 vchiq_release_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service)
2506 {
2507         VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2508         VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
2509         char entity[16];
2510         int *entity_uc;
2511
2512         if (!arm_state)
2513                 goto out;
2514
2515         vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2516
2517         if (service) {
2518                 snprintf(entity, sizeof(entity), "%c%c%c%c:%8x",
2519                         VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
2520                         service->client_id);
2521                 entity_uc = &service->service_use_count;
2522         } else {
2523                 snprintf(entity, sizeof(entity), "PEER:   ");
2524                 entity_uc = &arm_state->peer_use_count;
2525         }
2526
2527         write_lock_bh(&arm_state->susp_res_lock);
2528         if (!arm_state->videocore_use_count || !(*entity_uc)) {
2529                 /* Don't use BUG_ON - don't allow user thread to crash kernel */
2530                 WARN_ON(!arm_state->videocore_use_count);
2531                 WARN_ON(!(*entity_uc));
2532                 ret = VCHIQ_ERROR;
2533                 goto unlock;
2534         }
2535         --arm_state->videocore_use_count;
2536         --(*entity_uc);
2537
2538         if (!vchiq_videocore_wanted(state)) {
2539                 if (vchiq_platform_use_suspend_timer() &&
2540                                 !arm_state->resume_blocked) {
2541                         /* Only use the timer if we're not trying to force
2542                          * suspend (=> resume_blocked) */
2543                         start_suspend_timer(arm_state);
2544                 } else {
2545                         vchiq_log_info(vchiq_susp_log_level,
2546                                 "%s %s count %d, state count %d - suspending",
2547                                 __func__, entity, *entity_uc,
2548                                 arm_state->videocore_use_count);
2549                         vchiq_arm_vcsuspend(state);
2550                 }
2551         } else
2552                 vchiq_log_trace(vchiq_susp_log_level,
2553                         "%s %s count %d, state count %d",
2554                         __func__, entity, *entity_uc,
2555                         arm_state->videocore_use_count);
2556
2557 unlock:
2558         write_unlock_bh(&arm_state->susp_res_lock);
2559
2560 out:
2561         vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
2562         return ret;
2563 }
2564
2565 void
2566 vchiq_on_remote_use(VCHIQ_STATE_T *state)
2567 {
2568         VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2569         vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2570         atomic_inc(&arm_state->ka_use_count);
2571         complete(&arm_state->ka_evt);
2572 }
2573
2574 void
2575 vchiq_on_remote_release(VCHIQ_STATE_T *state)
2576 {
2577         VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2578         vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2579         atomic_inc(&arm_state->ka_release_count);
2580         complete(&arm_state->ka_evt);
2581 }
2582
2583 VCHIQ_STATUS_T
2584 vchiq_use_service_internal(VCHIQ_SERVICE_T *service)
2585 {
2586         return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
2587 }
2588
2589 VCHIQ_STATUS_T
2590 vchiq_release_service_internal(VCHIQ_SERVICE_T *service)
2591 {
2592         return vchiq_release_internal(service->state, service);
2593 }
2594
2595 static void suspend_timer_callback(unsigned long context)
2596 {
2597         VCHIQ_STATE_T *state = (VCHIQ_STATE_T *)context;
2598         VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2599         if (!arm_state)
2600                 goto out;
2601         vchiq_log_info(vchiq_susp_log_level,
2602                 "%s - suspend timer expired - check suspend", __func__);
2603         vchiq_check_suspend(state);
2604 out:
2605         return;
2606 }
2607
2608 VCHIQ_STATUS_T
2609 vchiq_use_service_no_resume(VCHIQ_SERVICE_HANDLE_T handle)
2610 {
2611         VCHIQ_STATUS_T ret = VCHIQ_ERROR;
2612         VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
2613         if (service) {
2614                 ret = vchiq_use_internal(service->state, service,
2615                                 USE_TYPE_SERVICE_NO_RESUME);
2616                 unlock_service(service);
2617         }
2618         return ret;
2619 }
2620
2621 VCHIQ_STATUS_T
2622 vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle)
2623 {
2624         VCHIQ_STATUS_T ret = VCHIQ_ERROR;
2625         VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
2626         if (service) {
2627                 ret = vchiq_use_internal(service->state, service,
2628                                 USE_TYPE_SERVICE);
2629                 unlock_service(service);
2630         }
2631         return ret;
2632 }
2633
2634 VCHIQ_STATUS_T
2635 vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle)
2636 {
2637         VCHIQ_STATUS_T ret = VCHIQ_ERROR;
2638         VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
2639         if (service) {
2640                 ret = vchiq_release_internal(service->state, service);
2641                 unlock_service(service);
2642         }
2643         return ret;
2644 }
2645
2646 void
2647 vchiq_dump_service_use_state(VCHIQ_STATE_T *state)
2648 {
2649         VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2650         int i, j = 0;
2651         /* Only dump 64 services */
2652         static const int local_max_services = 64;
2653         /* If there's more than 64 services, only dump ones with
2654          * non-zero counts */
2655         int only_nonzero = 0;
2656         static const char *nz = "<-- preventing suspend";
2657
2658         enum vc_suspend_status vc_suspend_state;
2659         enum vc_resume_status  vc_resume_state;
2660         int peer_count;
2661         int vc_use_count;
2662         int active_services;
2663         struct service_data_struct {
2664                 int fourcc;
2665                 int clientid;
2666                 int use_count;
2667         } service_data[local_max_services];
2668
2669         if (!arm_state)
2670                 return;
2671
2672         read_lock_bh(&arm_state->susp_res_lock);
2673         vc_suspend_state = arm_state->vc_suspend_state;
2674         vc_resume_state  = arm_state->vc_resume_state;
2675         peer_count = arm_state->peer_use_count;
2676         vc_use_count = arm_state->videocore_use_count;
2677         active_services = state->unused_service;
2678         if (active_services > local_max_services)
2679                 only_nonzero = 1;
2680
2681         for (i = 0; (i < active_services) && (j < local_max_services); i++) {
2682                 VCHIQ_SERVICE_T *service_ptr = state->services[i];
2683                 if (!service_ptr)
2684                         continue;
2685
2686                 if (only_nonzero && !service_ptr->service_use_count)
2687                         continue;
2688
2689                 if (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE) {
2690                         service_data[j].fourcc = service_ptr->base.fourcc;
2691                         service_data[j].clientid = service_ptr->client_id;
2692                         service_data[j++].use_count = service_ptr->
2693                                                         service_use_count;
2694                 }
2695         }
2696
2697         read_unlock_bh(&arm_state->susp_res_lock);
2698
2699         vchiq_log_warning(vchiq_susp_log_level,
2700                 "-- Videcore suspend state: %s --",
2701                 suspend_state_names[vc_suspend_state + VC_SUSPEND_NUM_OFFSET]);
2702         vchiq_log_warning(vchiq_susp_log_level,
2703                 "-- Videcore resume state: %s --",
2704                 resume_state_names[vc_resume_state + VC_RESUME_NUM_OFFSET]);
2705
2706         if (only_nonzero)
2707                 vchiq_log_warning(vchiq_susp_log_level, "Too many active "
2708                         "services (%d).  Only dumping up to first %d services "
2709                         "with non-zero use-count", active_services,
2710                         local_max_services);
2711
2712         for (i = 0; i < j; i++) {
2713                 vchiq_log_warning(vchiq_susp_log_level,
2714                         "----- %c%c%c%c:%d service count %d %s",
2715                         VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc),
2716                         service_data[i].clientid,
2717                         service_data[i].use_count,
2718                         service_data[i].use_count ? nz : "");
2719         }
2720         vchiq_log_warning(vchiq_susp_log_level,
2721                 "----- VCHIQ use count count %d", peer_count);
2722         vchiq_log_warning(vchiq_susp_log_level,
2723                 "--- Overall vchiq instance use count %d", vc_use_count);
2724
2725         vchiq_dump_platform_use_state(state);
2726 }
2727
2728 VCHIQ_STATUS_T
2729 vchiq_check_service(VCHIQ_SERVICE_T *service)
2730 {
2731         VCHIQ_ARM_STATE_T *arm_state;
2732         VCHIQ_STATUS_T ret = VCHIQ_ERROR;
2733
2734         if (!service || !service->state)
2735                 goto out;
2736
2737         vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2738
2739         arm_state = vchiq_platform_get_arm_state(service->state);
2740
2741         read_lock_bh(&arm_state->susp_res_lock);
2742         if (service->service_use_count)
2743                 ret = VCHIQ_SUCCESS;
2744         read_unlock_bh(&arm_state->susp_res_lock);
2745
2746         if (ret == VCHIQ_ERROR) {
2747                 vchiq_log_error(vchiq_susp_log_level,
2748                         "%s ERROR - %c%c%c%c:%8x service count %d, "
2749                         "state count %d, videocore suspend state %s", __func__,
2750                         VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
2751                         service->client_id, service->service_use_count,
2752                         arm_state->videocore_use_count,
2753                         suspend_state_names[arm_state->vc_suspend_state +
2754                                                 VC_SUSPEND_NUM_OFFSET]);
2755                 vchiq_dump_service_use_state(service->state);
2756         }
2757 out:
2758         return ret;
2759 }
2760
2761 /* stub functions */
2762 void vchiq_on_remote_use_active(VCHIQ_STATE_T *state)
2763 {
2764         (void)state;
2765 }
2766
2767 void vchiq_platform_conn_state_changed(VCHIQ_STATE_T *state,
2768         VCHIQ_CONNSTATE_T oldstate, VCHIQ_CONNSTATE_T newstate)
2769 {
2770         VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
2771         vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,
2772                 get_conn_state_name(oldstate), get_conn_state_name(newstate));
2773         if (state->conn_state == VCHIQ_CONNSTATE_CONNECTED) {
2774                 write_lock_bh(&arm_state->susp_res_lock);
2775                 if (!arm_state->first_connect) {
2776                         char threadname[10];
2777                         arm_state->first_connect = 1;
2778                         write_unlock_bh(&arm_state->susp_res_lock);
2779                         snprintf(threadname, sizeof(threadname), "VCHIQka-%d",
2780                                 state->id);
2781                         arm_state->ka_thread = vchiq_thread_create(
2782                                 &vchiq_keepalive_thread_func,
2783                                 (void *)state,
2784                                 threadname);
2785                         if (arm_state->ka_thread == NULL) {
2786                                 vchiq_log_error(vchiq_susp_log_level,
2787                                         "vchiq: FATAL: couldn't create thread %s",
2788                                         threadname);
2789                         } else {
2790                                 wake_up_process(arm_state->ka_thread);
2791                         }
2792                 } else
2793                         write_unlock_bh(&arm_state->susp_res_lock);
2794         }
2795 }
2796
2797 /****************************************************************************
2798 *
2799 *   vchiq_init - called when the module is loaded.
2800 *
2801 ***************************************************************************/
2802
2803 int __init vchiq_init(void);
2804 int __init
2805 vchiq_init(void)
2806 {
2807         int err;
2808
2809 #ifdef notyet
2810         /* create proc entries */
2811         err = vchiq_proc_init();
2812         if (err != 0)
2813                 goto failed_proc_init;
2814 #endif
2815
2816         vchiq_cdev = make_dev(&vchiq_cdevsw, 0,
2817             UID_ROOT, GID_WHEEL, 0600, "vchiq");
2818         if (!vchiq_cdev) {
2819                 printf("Failed to create /dev/vchiq");
2820                 return (-ENXIO);
2821         }
2822
2823         spin_lock_init(&msg_queue_spinlock);
2824
2825         err = vchiq_platform_init(&g_state);
2826         if (err != 0)
2827                 goto failed_platform_init;
2828
2829         vchiq_log_info(vchiq_arm_log_level,
2830                 "vchiq: initialised - version %d (min %d)",
2831                 VCHIQ_VERSION, VCHIQ_VERSION_MIN);
2832
2833         return 0;
2834
2835 failed_platform_init:
2836         if (vchiq_cdev) {
2837                 destroy_dev(vchiq_cdev);
2838                 vchiq_cdev = NULL;
2839         }
2840         vchiq_log_warning(vchiq_arm_log_level, "could not load vchiq");
2841         return err;
2842 }
2843
2844 #ifdef notyet
2845 static int vchiq_instance_get_use_count(VCHIQ_INSTANCE_T instance)
2846 {
2847         VCHIQ_SERVICE_T *service;
2848         int use_count = 0, i;
2849         i = 0;
2850         while ((service = next_service_by_instance(instance->state,
2851                 instance, &i)) != NULL) {
2852                 use_count += service->service_use_count;
2853                 unlock_service(service);
2854         }
2855         return use_count;
2856 }
2857
2858 /* read the per-process use-count */
2859 static int proc_read_use_count(char *page, char **start,
2860                                off_t off, int count,
2861                                int *eof, void *data)
2862 {
2863         VCHIQ_INSTANCE_T instance = data;
2864         int len, use_count;
2865
2866         use_count = vchiq_instance_get_use_count(instance);
2867         len = snprintf(page+off, count, "%d\n", use_count);
2868
2869         return len;
2870 }
2871
2872 /* add an instance (process) to the proc entries */
2873 static int vchiq_proc_add_instance(VCHIQ_INSTANCE_T instance)
2874 {
2875         char pidstr[32];
2876         struct proc_dir_entry *top, *use_count;
2877         struct proc_dir_entry *clients = vchiq_clients_top();
2878         int pid = instance->pid;
2879
2880         snprintf(pidstr, sizeof(pidstr), "%d", pid);
2881         top = proc_mkdir(pidstr, clients);
2882         if (!top)
2883                 goto fail_top;
2884
2885         use_count = create_proc_read_entry("use_count",
2886                                            0444, top,
2887                                            proc_read_use_count,
2888                                            instance);
2889         if (!use_count)
2890                 goto fail_use_count;
2891
2892         instance->proc_entry = top;
2893
2894         return 0;
2895
2896 fail_use_count:
2897         remove_proc_entry(top->name, clients);
2898 fail_top:
2899         return -ENOMEM;
2900 }
2901
2902 static void vchiq_proc_remove_instance(VCHIQ_INSTANCE_T instance)
2903 {
2904         struct proc_dir_entry *clients = vchiq_clients_top();
2905         remove_proc_entry("use_count", instance->proc_entry);
2906         remove_proc_entry(instance->proc_entry->name, clients);
2907 }
2908
2909 #endif
2910
2911 /****************************************************************************
2912 *
2913 *   vchiq_exit - called when the module is unloaded.
2914 *
2915 ***************************************************************************/
2916
2917 void vchiq_exit(void);
2918 void
2919 vchiq_exit(void)
2920 {
2921         if (vchiq_ehtag == NULL)
2922                 EVENTHANDLER_DEREGISTER(dev_clone, vchiq_ehtag);
2923         vchiq_ehtag = NULL;
2924
2925         vchiq_platform_exit(&g_state);
2926         if (vchiq_cdev) {
2927                 destroy_dev(vchiq_cdev);
2928                 vchiq_cdev = NULL;
2929         }
2930 }