2 * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include <sys/types.h>
32 #include <sys/socket.h>
34 #include <sys/event.h>
45 #include "mp_ws_query.h"
46 #include "mp_rs_query.h"
47 #include "singletons.h"
49 static const char negative_data[1] = { 0 };
51 extern void get_time_func(struct timeval *);
53 static void clear_config_entry(struct configuration_entry *);
54 static void clear_config_entry_part(struct configuration_entry *,
55 const char *, size_t);
57 static int on_query_startup(struct query_state *);
58 static void on_query_destroy(struct query_state *);
60 static int on_read_request_read1(struct query_state *);
61 static int on_read_request_read2(struct query_state *);
62 static int on_read_request_process(struct query_state *);
63 static int on_read_response_write1(struct query_state *);
64 static int on_read_response_write2(struct query_state *);
66 static int on_rw_mapper(struct query_state *);
68 static int on_transform_request_read1(struct query_state *);
69 static int on_transform_request_read2(struct query_state *);
70 static int on_transform_request_process(struct query_state *);
71 static int on_transform_response_write1(struct query_state *);
73 static int on_write_request_read1(struct query_state *);
74 static int on_write_request_read2(struct query_state *);
75 static int on_negative_write_request_process(struct query_state *);
76 static int on_write_request_process(struct query_state *);
77 static int on_write_response_write1(struct query_state *);
80 * Clears the specified configuration entry (clears the cache for positive and
81 * and negative entries) and also for all multipart entries.
84 clear_config_entry(struct configuration_entry *config_entry)
88 TRACE_IN(clear_config_entry);
89 configuration_lock_entry(config_entry, CELT_POSITIVE);
90 if (config_entry->positive_cache_entry != NULL)
91 transform_cache_entry(
92 config_entry->positive_cache_entry,
94 configuration_unlock_entry(config_entry, CELT_POSITIVE);
96 configuration_lock_entry(config_entry, CELT_NEGATIVE);
97 if (config_entry->negative_cache_entry != NULL)
98 transform_cache_entry(
99 config_entry->negative_cache_entry,
101 configuration_unlock_entry(config_entry, CELT_NEGATIVE);
103 configuration_lock_entry(config_entry, CELT_MULTIPART);
104 for (i = 0; i < config_entry->mp_cache_entries_size; ++i)
105 transform_cache_entry(
106 config_entry->mp_cache_entries[i],
108 configuration_unlock_entry(config_entry, CELT_MULTIPART);
110 TRACE_OUT(clear_config_entry);
114 * Clears the specified configuration entry by deleting only the elements,
115 * that are owned by the user with specified eid_str.
118 clear_config_entry_part(struct configuration_entry *config_entry,
119 const char *eid_str, size_t eid_str_length)
121 cache_entry *start, *finish, *mp_entry;
122 TRACE_IN(clear_config_entry_part);
123 configuration_lock_entry(config_entry, CELT_POSITIVE);
124 if (config_entry->positive_cache_entry != NULL)
125 transform_cache_entry_part(
126 config_entry->positive_cache_entry,
127 CTT_CLEAR, eid_str, eid_str_length, KPPT_LEFT);
128 configuration_unlock_entry(config_entry, CELT_POSITIVE);
130 configuration_lock_entry(config_entry, CELT_NEGATIVE);
131 if (config_entry->negative_cache_entry != NULL)
132 transform_cache_entry_part(
133 config_entry->negative_cache_entry,
134 CTT_CLEAR, eid_str, eid_str_length, KPPT_LEFT);
135 configuration_unlock_entry(config_entry, CELT_NEGATIVE);
137 configuration_lock_entry(config_entry, CELT_MULTIPART);
138 if (configuration_entry_find_mp_cache_entries(config_entry,
139 eid_str, &start, &finish) == 0) {
140 for (mp_entry = start; mp_entry != finish; ++mp_entry)
141 transform_cache_entry(*mp_entry, CTT_CLEAR);
143 configuration_unlock_entry(config_entry, CELT_MULTIPART);
145 TRACE_OUT(clear_config_entry_part);
149 * This function is assigned to the query_state structue on its creation.
150 * It's main purpose is to receive credentials from the client.
153 on_query_startup(struct query_state *qstate)
155 struct msghdr cred_hdr;
157 struct cmsgcred *cred;
162 char cred[CMSG_SPACE(sizeof(struct cmsgcred))];
165 TRACE_IN(on_query_startup);
166 assert(qstate != NULL);
168 memset(&cred_hdr, 0, sizeof(struct msghdr));
169 cred_hdr.msg_iov = &iov;
170 cred_hdr.msg_iovlen = 1;
171 cred_hdr.msg_control = (caddr_t)&cmsg;
172 cred_hdr.msg_controllen = CMSG_LEN(sizeof(struct cmsgcred));
174 memset(&iov, 0, sizeof(struct iovec));
175 iov.iov_base = &elem_type;
176 iov.iov_len = sizeof(int);
178 if (recvmsg(qstate->sockfd, &cred_hdr, 0) == -1) {
179 TRACE_OUT(on_query_startup);
183 if (cmsg.hdr.cmsg_len < CMSG_LEN(sizeof(struct cmsgcred))
184 || cmsg.hdr.cmsg_level != SOL_SOCKET
185 || cmsg.hdr.cmsg_type != SCM_CREDS) {
186 TRACE_OUT(on_query_startup);
190 cred = (struct cmsgcred *)CMSG_DATA(&cmsg);
191 qstate->uid = cred->cmcred_uid;
192 qstate->gid = cred->cmcred_gid;
194 #if defined(NS_NSCD_EID_CHECKING) || defined(NS_STRICT_NSCD_EID_CHECKING)
196 * This check is probably a bit redundant - per-user cache is always separated
197 * by the euid/egid pair
199 if (check_query_eids(qstate) != 0) {
200 #ifdef NS_STRICT_NSCD_EID_CHECKING
201 TRACE_OUT(on_query_startup);
204 if ((elem_type != CET_READ_REQUEST) &&
205 (elem_type != CET_MP_READ_SESSION_REQUEST) &&
206 (elem_type != CET_WRITE_REQUEST) &&
207 (elem_type != CET_MP_WRITE_SESSION_REQUEST)) {
208 TRACE_OUT(on_query_startup);
216 case CET_WRITE_REQUEST:
217 qstate->process_func = on_write_request_read1;
219 case CET_READ_REQUEST:
220 qstate->process_func = on_read_request_read1;
222 case CET_TRANSFORM_REQUEST:
223 qstate->process_func = on_transform_request_read1;
225 case CET_MP_WRITE_SESSION_REQUEST:
226 qstate->process_func = on_mp_write_session_request_read1;
228 case CET_MP_READ_SESSION_REQUEST:
229 qstate->process_func = on_mp_read_session_request_read1;
232 TRACE_OUT(on_query_startup);
236 qstate->kevent_watermark = 0;
237 TRACE_OUT(on_query_startup);
242 * on_rw_mapper is used to process multiple read/write requests during
243 * one connection session. It's never called in the beginning (on query_state
244 * creation) as it does not process the multipart requests and does not
245 * receive credentials
248 on_rw_mapper(struct query_state *qstate)
253 TRACE_IN(on_rw_mapper);
254 if (qstate->kevent_watermark == 0) {
255 qstate->kevent_watermark = sizeof(int);
257 result = qstate->read_func(qstate, &elem_type, sizeof(int));
258 if (result != sizeof(int)) {
259 TRACE_OUT(on_rw_mapper);
264 case CET_WRITE_REQUEST:
265 qstate->kevent_watermark = sizeof(size_t);
266 qstate->process_func = on_write_request_read1;
268 case CET_READ_REQUEST:
269 qstate->kevent_watermark = sizeof(size_t);
270 qstate->process_func = on_read_request_read1;
273 TRACE_OUT(on_rw_mapper);
278 TRACE_OUT(on_rw_mapper);
283 * The default query_destroy function
286 on_query_destroy(struct query_state *qstate)
289 TRACE_IN(on_query_destroy);
290 finalize_comm_element(&qstate->response);
291 finalize_comm_element(&qstate->request);
292 TRACE_OUT(on_query_destroy);
296 * The functions below are used to process write requests.
297 * - on_write_request_read1 and on_write_request_read2 read the request itself
298 * - on_write_request_process processes it (if the client requests to
299 * cache the negative result, the on_negative_write_request_process is used)
300 * - on_write_response_write1 sends the response
303 on_write_request_read1(struct query_state *qstate)
305 struct cache_write_request *write_request;
308 TRACE_IN(on_write_request_read1);
309 if (qstate->kevent_watermark == 0)
310 qstate->kevent_watermark = sizeof(size_t) * 3;
312 init_comm_element(&qstate->request, CET_WRITE_REQUEST);
313 write_request = get_cache_write_request(&qstate->request);
315 result = qstate->read_func(qstate, &write_request->entry_length,
317 result += qstate->read_func(qstate,
318 &write_request->cache_key_size, sizeof(size_t));
319 result += qstate->read_func(qstate,
320 &write_request->data_size, sizeof(size_t));
322 if (result != sizeof(size_t) * 3) {
323 TRACE_OUT(on_write_request_read1);
327 if (BUFSIZE_INVALID(write_request->entry_length) ||
328 BUFSIZE_INVALID(write_request->cache_key_size) ||
329 (BUFSIZE_INVALID(write_request->data_size) &&
330 (write_request->data_size != 0))) {
331 TRACE_OUT(on_write_request_read1);
335 write_request->entry = (char *)calloc(1,
336 write_request->entry_length + 1);
337 assert(write_request->entry != NULL);
339 write_request->cache_key = (char *)calloc(1,
340 write_request->cache_key_size +
341 qstate->eid_str_length);
342 assert(write_request->cache_key != NULL);
343 memcpy(write_request->cache_key, qstate->eid_str,
344 qstate->eid_str_length);
346 if (write_request->data_size != 0) {
347 write_request->data = (char *)calloc(1,
348 write_request->data_size);
349 assert(write_request->data != NULL);
352 qstate->kevent_watermark = write_request->entry_length +
353 write_request->cache_key_size +
354 write_request->data_size;
355 qstate->process_func = on_write_request_read2;
358 TRACE_OUT(on_write_request_read1);
363 on_write_request_read2(struct query_state *qstate)
365 struct cache_write_request *write_request;
368 TRACE_IN(on_write_request_read2);
369 write_request = get_cache_write_request(&qstate->request);
371 result = qstate->read_func(qstate, write_request->entry,
372 write_request->entry_length);
373 result += qstate->read_func(qstate, write_request->cache_key +
374 qstate->eid_str_length, write_request->cache_key_size);
375 if (write_request->data_size != 0)
376 result += qstate->read_func(qstate, write_request->data,
377 write_request->data_size);
379 if (result != qstate->kevent_watermark) {
380 TRACE_OUT(on_write_request_read2);
383 write_request->cache_key_size += qstate->eid_str_length;
385 qstate->kevent_watermark = 0;
386 if (write_request->data_size != 0)
387 qstate->process_func = on_write_request_process;
389 qstate->process_func = on_negative_write_request_process;
390 TRACE_OUT(on_write_request_read2);
395 on_write_request_process(struct query_state *qstate)
397 struct cache_write_request *write_request;
398 struct cache_write_response *write_response;
401 TRACE_IN(on_write_request_process);
402 init_comm_element(&qstate->response, CET_WRITE_RESPONSE);
403 write_response = get_cache_write_response(&qstate->response);
404 write_request = get_cache_write_request(&qstate->request);
406 qstate->config_entry = configuration_find_entry(
407 s_configuration, write_request->entry);
409 if (qstate->config_entry == NULL) {
410 write_response->error_code = ENOENT;
412 LOG_ERR_2("write_request", "can't find configuration"
413 " entry '%s'. aborting request", write_request->entry);
417 if (qstate->config_entry->enabled == 0) {
418 write_response->error_code = EACCES;
420 LOG_ERR_2("write_request",
421 "configuration entry '%s' is disabled",
422 write_request->entry);
426 if (qstate->config_entry->perform_actual_lookups != 0) {
427 write_response->error_code = EOPNOTSUPP;
429 LOG_ERR_2("write_request",
430 "entry '%s' performs lookups by itself: "
431 "can't write to it", write_request->entry);
435 configuration_lock_rdlock(s_configuration);
436 c_entry = find_cache_entry(s_cache,
437 qstate->config_entry->positive_cache_params.entry_name);
438 configuration_unlock(s_configuration);
439 if (c_entry != NULL) {
440 configuration_lock_entry(qstate->config_entry, CELT_POSITIVE);
441 qstate->config_entry->positive_cache_entry = c_entry;
442 write_response->error_code = cache_write(c_entry,
443 write_request->cache_key,
444 write_request->cache_key_size,
446 write_request->data_size);
447 configuration_unlock_entry(qstate->config_entry, CELT_POSITIVE);
449 if ((qstate->config_entry->common_query_timeout.tv_sec != 0) ||
450 (qstate->config_entry->common_query_timeout.tv_usec != 0))
451 memcpy(&qstate->timeout,
452 &qstate->config_entry->common_query_timeout,
453 sizeof(struct timeval));
456 write_response->error_code = -1;
459 qstate->kevent_filter = EVFILT_WRITE;
460 qstate->kevent_watermark = sizeof(int);
461 qstate->process_func = on_write_response_write1;
463 TRACE_OUT(on_write_request_process);
468 on_negative_write_request_process(struct query_state *qstate)
470 struct cache_write_request *write_request;
471 struct cache_write_response *write_response;
474 TRACE_IN(on_negative_write_request_process);
475 init_comm_element(&qstate->response, CET_WRITE_RESPONSE);
476 write_response = get_cache_write_response(&qstate->response);
477 write_request = get_cache_write_request(&qstate->request);
479 qstate->config_entry = configuration_find_entry (
480 s_configuration, write_request->entry);
482 if (qstate->config_entry == NULL) {
483 write_response->error_code = ENOENT;
485 LOG_ERR_2("negative_write_request",
486 "can't find configuration"
487 " entry '%s'. aborting request", write_request->entry);
491 if (qstate->config_entry->enabled == 0) {
492 write_response->error_code = EACCES;
494 LOG_ERR_2("negative_write_request",
495 "configuration entry '%s' is disabled",
496 write_request->entry);
500 if (qstate->config_entry->perform_actual_lookups != 0) {
501 write_response->error_code = EOPNOTSUPP;
503 LOG_ERR_2("negative_write_request",
504 "entry '%s' performs lookups by itself: "
505 "can't write to it", write_request->entry);
508 #ifdef NS_NSCD_EID_CHECKING
509 if (check_query_eids(qstate) != 0) {
510 write_response->error_code = EPERM;
516 configuration_lock_rdlock(s_configuration);
517 c_entry = find_cache_entry(s_cache,
518 qstate->config_entry->negative_cache_params.entry_name);
519 configuration_unlock(s_configuration);
520 if (c_entry != NULL) {
521 configuration_lock_entry(qstate->config_entry, CELT_NEGATIVE);
522 qstate->config_entry->negative_cache_entry = c_entry;
523 write_response->error_code = cache_write(c_entry,
524 write_request->cache_key,
525 write_request->cache_key_size,
527 sizeof(negative_data));
528 configuration_unlock_entry(qstate->config_entry, CELT_NEGATIVE);
530 if ((qstate->config_entry->common_query_timeout.tv_sec != 0) ||
531 (qstate->config_entry->common_query_timeout.tv_usec != 0))
532 memcpy(&qstate->timeout,
533 &qstate->config_entry->common_query_timeout,
534 sizeof(struct timeval));
536 write_response->error_code = -1;
539 qstate->kevent_filter = EVFILT_WRITE;
540 qstate->kevent_watermark = sizeof(int);
541 qstate->process_func = on_write_response_write1;
543 TRACE_OUT(on_negative_write_request_process);
548 on_write_response_write1(struct query_state *qstate)
550 struct cache_write_response *write_response;
553 TRACE_IN(on_write_response_write1);
554 write_response = get_cache_write_response(&qstate->response);
555 result = qstate->write_func(qstate, &write_response->error_code,
557 if (result != sizeof(int)) {
558 TRACE_OUT(on_write_response_write1);
562 finalize_comm_element(&qstate->request);
563 finalize_comm_element(&qstate->response);
565 qstate->kevent_watermark = sizeof(int);
566 qstate->kevent_filter = EVFILT_READ;
567 qstate->process_func = on_rw_mapper;
569 TRACE_OUT(on_write_response_write1);
574 * The functions below are used to process read requests.
575 * - on_read_request_read1 and on_read_request_read2 read the request itself
576 * - on_read_request_process processes it
577 * - on_read_response_write1 and on_read_response_write2 send the response
580 on_read_request_read1(struct query_state *qstate)
582 struct cache_read_request *read_request;
585 TRACE_IN(on_read_request_read1);
586 if (qstate->kevent_watermark == 0)
587 qstate->kevent_watermark = sizeof(size_t) * 2;
589 init_comm_element(&qstate->request, CET_READ_REQUEST);
590 read_request = get_cache_read_request(&qstate->request);
592 result = qstate->read_func(qstate,
593 &read_request->entry_length, sizeof(size_t));
594 result += qstate->read_func(qstate,
595 &read_request->cache_key_size, sizeof(size_t));
597 if (result != sizeof(size_t) * 2) {
598 TRACE_OUT(on_read_request_read1);
602 if (BUFSIZE_INVALID(read_request->entry_length) ||
603 BUFSIZE_INVALID(read_request->cache_key_size)) {
604 TRACE_OUT(on_read_request_read1);
608 read_request->entry = (char *)calloc(1,
609 read_request->entry_length + 1);
610 assert(read_request->entry != NULL);
612 read_request->cache_key = (char *)calloc(1,
613 read_request->cache_key_size +
614 qstate->eid_str_length);
615 assert(read_request->cache_key != NULL);
616 memcpy(read_request->cache_key, qstate->eid_str,
617 qstate->eid_str_length);
619 qstate->kevent_watermark = read_request->entry_length +
620 read_request->cache_key_size;
621 qstate->process_func = on_read_request_read2;
624 TRACE_OUT(on_read_request_read1);
629 on_read_request_read2(struct query_state *qstate)
631 struct cache_read_request *read_request;
634 TRACE_IN(on_read_request_read2);
635 read_request = get_cache_read_request(&qstate->request);
637 result = qstate->read_func(qstate, read_request->entry,
638 read_request->entry_length);
639 result += qstate->read_func(qstate,
640 read_request->cache_key + qstate->eid_str_length,
641 read_request->cache_key_size);
643 if (result != qstate->kevent_watermark) {
644 TRACE_OUT(on_read_request_read2);
647 read_request->cache_key_size += qstate->eid_str_length;
649 qstate->kevent_watermark = 0;
650 qstate->process_func = on_read_request_process;
652 TRACE_OUT(on_read_request_read2);
657 on_read_request_process(struct query_state *qstate)
659 struct cache_read_request *read_request;
660 struct cache_read_response *read_response;
661 cache_entry c_entry, neg_c_entry;
663 struct agent *lookup_agent;
664 struct common_agent *c_agent;
667 TRACE_IN(on_read_request_process);
668 init_comm_element(&qstate->response, CET_READ_RESPONSE);
669 read_response = get_cache_read_response(&qstate->response);
670 read_request = get_cache_read_request(&qstate->request);
672 qstate->config_entry = configuration_find_entry(
673 s_configuration, read_request->entry);
674 if (qstate->config_entry == NULL) {
675 read_response->error_code = ENOENT;
677 LOG_ERR_2("read_request",
678 "can't find configuration "
679 "entry '%s'. aborting request", read_request->entry);
683 if (qstate->config_entry->enabled == 0) {
684 read_response->error_code = EACCES;
686 LOG_ERR_2("read_request",
687 "configuration entry '%s' is disabled",
688 read_request->entry);
693 * if we perform lookups by ourselves, then we don't need to separate
694 * cache entries by euid and egid
696 if (qstate->config_entry->perform_actual_lookups != 0)
697 memset(read_request->cache_key, 0, qstate->eid_str_length);
699 #ifdef NS_NSCD_EID_CHECKING
700 if (check_query_eids(qstate) != 0) {
701 /* if the lookup is not self-performing, we check for clients euid/egid */
702 read_response->error_code = EPERM;
708 configuration_lock_rdlock(s_configuration);
709 c_entry = find_cache_entry(s_cache,
710 qstate->config_entry->positive_cache_params.entry_name);
711 neg_c_entry = find_cache_entry(s_cache,
712 qstate->config_entry->negative_cache_params.entry_name);
713 configuration_unlock(s_configuration);
714 if ((c_entry != NULL) && (neg_c_entry != NULL)) {
715 configuration_lock_entry(qstate->config_entry, CELT_POSITIVE);
716 qstate->config_entry->positive_cache_entry = c_entry;
717 read_response->error_code = cache_read(c_entry,
718 read_request->cache_key,
719 read_request->cache_key_size, NULL,
720 &read_response->data_size);
722 if (read_response->error_code == -2) {
723 read_response->data = (char *)malloc(
724 read_response->data_size);
725 assert(read_response != NULL);
726 read_response->error_code = cache_read(c_entry,
727 read_request->cache_key,
728 read_request->cache_key_size,
730 &read_response->data_size);
732 configuration_unlock_entry(qstate->config_entry, CELT_POSITIVE);
734 configuration_lock_entry(qstate->config_entry, CELT_NEGATIVE);
735 qstate->config_entry->negative_cache_entry = neg_c_entry;
736 if (read_response->error_code == -1) {
737 read_response->error_code = cache_read(neg_c_entry,
738 read_request->cache_key,
739 read_request->cache_key_size, NULL,
740 &read_response->data_size);
742 if (read_response->error_code == -2) {
743 read_response->error_code = 0;
744 read_response->data = NULL;
745 read_response->data_size = 0;
748 configuration_unlock_entry(qstate->config_entry, CELT_NEGATIVE);
750 if ((read_response->error_code == -1) &&
751 (qstate->config_entry->perform_actual_lookups != 0)) {
752 free(read_response->data);
753 read_response->data = NULL;
754 read_response->data_size = 0;
756 lookup_agent = find_agent(s_agent_table,
757 read_request->entry, COMMON_AGENT);
759 if ((lookup_agent != NULL) &&
760 (lookup_agent->type == COMMON_AGENT)) {
761 c_agent = (struct common_agent *)lookup_agent;
762 res = c_agent->lookup_func(
763 read_request->cache_key +
764 qstate->eid_str_length,
765 read_request->cache_key_size -
766 qstate->eid_str_length,
767 &read_response->data,
768 &read_response->data_size);
770 if (res == NS_SUCCESS) {
771 read_response->error_code = 0;
772 configuration_lock_entry(
773 qstate->config_entry,
776 read_request->cache_key,
777 read_request->cache_key_size,
779 read_response->data_size);
780 configuration_unlock_entry(
781 qstate->config_entry,
783 } else if ((res == NS_NOTFOUND) ||
784 (res == NS_RETURN)) {
785 configuration_lock_entry(
786 qstate->config_entry,
788 cache_write(neg_c_entry,
789 read_request->cache_key,
790 read_request->cache_key_size,
792 sizeof(negative_data));
793 configuration_unlock_entry(
794 qstate->config_entry,
797 read_response->error_code = 0;
798 read_response->data = NULL;
799 read_response->data_size = 0;
804 if ((qstate->config_entry->common_query_timeout.tv_sec != 0) ||
805 (qstate->config_entry->common_query_timeout.tv_usec != 0))
806 memcpy(&qstate->timeout,
807 &qstate->config_entry->common_query_timeout,
808 sizeof(struct timeval));
810 read_response->error_code = -1;
813 qstate->kevent_filter = EVFILT_WRITE;
814 if (read_response->error_code == 0)
815 qstate->kevent_watermark = sizeof(int) + sizeof(size_t);
817 qstate->kevent_watermark = sizeof(int);
818 qstate->process_func = on_read_response_write1;
820 TRACE_OUT(on_read_request_process);
825 on_read_response_write1(struct query_state *qstate)
827 struct cache_read_response *read_response;
830 TRACE_IN(on_read_response_write1);
831 read_response = get_cache_read_response(&qstate->response);
833 result = qstate->write_func(qstate, &read_response->error_code,
836 if (read_response->error_code == 0) {
837 result += qstate->write_func(qstate, &read_response->data_size,
839 if (result != qstate->kevent_watermark) {
840 TRACE_OUT(on_read_response_write1);
844 qstate->kevent_watermark = read_response->data_size;
845 qstate->process_func = on_read_response_write2;
847 if (result != qstate->kevent_watermark) {
848 TRACE_OUT(on_read_response_write1);
852 qstate->kevent_watermark = 0;
853 qstate->process_func = NULL;
856 TRACE_OUT(on_read_response_write1);
861 on_read_response_write2(struct query_state *qstate)
863 struct cache_read_response *read_response;
866 TRACE_IN(on_read_response_write2);
867 read_response = get_cache_read_response(&qstate->response);
868 if (read_response->data_size > 0) {
869 result = qstate->write_func(qstate, read_response->data,
870 read_response->data_size);
871 if (result != qstate->kevent_watermark) {
872 TRACE_OUT(on_read_response_write2);
877 finalize_comm_element(&qstate->request);
878 finalize_comm_element(&qstate->response);
880 qstate->kevent_watermark = sizeof(int);
881 qstate->kevent_filter = EVFILT_READ;
882 qstate->process_func = on_rw_mapper;
883 TRACE_OUT(on_read_response_write2);
888 * The functions below are used to process write requests.
889 * - on_transform_request_read1 and on_transform_request_read2 read the
891 * - on_transform_request_process processes it
892 * - on_transform_response_write1 sends the response
895 on_transform_request_read1(struct query_state *qstate)
897 struct cache_transform_request *transform_request;
900 TRACE_IN(on_transform_request_read1);
901 if (qstate->kevent_watermark == 0)
902 qstate->kevent_watermark = sizeof(size_t) + sizeof(int);
904 init_comm_element(&qstate->request, CET_TRANSFORM_REQUEST);
906 get_cache_transform_request(&qstate->request);
908 result = qstate->read_func(qstate,
909 &transform_request->entry_length, sizeof(size_t));
910 result += qstate->read_func(qstate,
911 &transform_request->transformation_type, sizeof(int));
913 if (result != sizeof(size_t) + sizeof(int)) {
914 TRACE_OUT(on_transform_request_read1);
918 if ((transform_request->transformation_type != TT_USER) &&
919 (transform_request->transformation_type != TT_ALL)) {
920 TRACE_OUT(on_transform_request_read1);
924 if (transform_request->entry_length != 0) {
925 if (BUFSIZE_INVALID(transform_request->entry_length)) {
926 TRACE_OUT(on_transform_request_read1);
930 transform_request->entry = (char *)calloc(1,
931 transform_request->entry_length + 1);
932 assert(transform_request->entry != NULL);
934 qstate->process_func = on_transform_request_read2;
936 qstate->process_func = on_transform_request_process;
938 qstate->kevent_watermark = transform_request->entry_length;
941 TRACE_OUT(on_transform_request_read1);
946 on_transform_request_read2(struct query_state *qstate)
948 struct cache_transform_request *transform_request;
951 TRACE_IN(on_transform_request_read2);
952 transform_request = get_cache_transform_request(&qstate->request);
954 result = qstate->read_func(qstate, transform_request->entry,
955 transform_request->entry_length);
957 if (result != qstate->kevent_watermark) {
958 TRACE_OUT(on_transform_request_read2);
962 qstate->kevent_watermark = 0;
963 qstate->process_func = on_transform_request_process;
965 TRACE_OUT(on_transform_request_read2);
970 on_transform_request_process(struct query_state *qstate)
972 struct cache_transform_request *transform_request;
973 struct cache_transform_response *transform_response;
974 struct configuration_entry *config_entry;
977 TRACE_IN(on_transform_request_process);
978 init_comm_element(&qstate->response, CET_TRANSFORM_RESPONSE);
979 transform_response = get_cache_transform_response(&qstate->response);
980 transform_request = get_cache_transform_request(&qstate->request);
982 switch (transform_request->transformation_type) {
984 if (transform_request->entry == NULL) {
985 size = configuration_get_entries_size(s_configuration);
986 for (i = 0; i < size; ++i) {
987 config_entry = configuration_get_entry(
990 if (config_entry->perform_actual_lookups == 0)
991 clear_config_entry_part(config_entry,
992 qstate->eid_str, qstate->eid_str_length);
995 qstate->config_entry = configuration_find_entry(
996 s_configuration, transform_request->entry);
998 if (qstate->config_entry == NULL) {
999 LOG_ERR_2("transform_request",
1000 "can't find configuration"
1001 " entry '%s'. aborting request",
1002 transform_request->entry);
1003 transform_response->error_code = -1;
1007 if (qstate->config_entry->perform_actual_lookups != 0) {
1008 LOG_ERR_2("transform_request",
1009 "can't transform the cache entry %s"
1010 ", because it ised for actual lookups",
1011 transform_request->entry);
1012 transform_response->error_code = -1;
1016 clear_config_entry_part(qstate->config_entry,
1017 qstate->eid_str, qstate->eid_str_length);
1021 if (qstate->euid != 0)
1022 transform_response->error_code = -1;
1024 if (transform_request->entry == NULL) {
1025 size = configuration_get_entries_size(
1027 for (i = 0; i < size; ++i) {
1029 configuration_get_entry(
1030 s_configuration, i));
1033 qstate->config_entry = configuration_find_entry(
1035 transform_request->entry);
1037 if (qstate->config_entry == NULL) {
1038 LOG_ERR_2("transform_request",
1039 "can't find configuration"
1040 " entry '%s'. aborting request",
1041 transform_request->entry);
1042 transform_response->error_code = -1;
1046 clear_config_entry(qstate->config_entry);
1051 transform_response->error_code = -1;
1055 qstate->kevent_watermark = 0;
1056 qstate->process_func = on_transform_response_write1;
1057 TRACE_OUT(on_transform_request_process);
1062 on_transform_response_write1(struct query_state *qstate)
1064 struct cache_transform_response *transform_response;
1067 TRACE_IN(on_transform_response_write1);
1068 transform_response = get_cache_transform_response(&qstate->response);
1069 result = qstate->write_func(qstate, &transform_response->error_code,
1071 if (result != sizeof(int)) {
1072 TRACE_OUT(on_transform_response_write1);
1076 finalize_comm_element(&qstate->request);
1077 finalize_comm_element(&qstate->response);
1079 qstate->kevent_watermark = 0;
1080 qstate->process_func = NULL;
1081 TRACE_OUT(on_transform_response_write1);
1086 * Checks if the client's euid and egid do not differ from its uid and gid.
1087 * Returns 0 on success.
1090 check_query_eids(struct query_state *qstate)
1093 return ((qstate->uid != qstate->euid) || (qstate->gid != qstate->egid) ? -1 : 0);
1097 * Uses the qstate fields to process an "alternate" read - when the buffer is
1098 * too large to be received during one socket read operation
1101 query_io_buffer_read(struct query_state *qstate, void *buf, size_t nbytes)
1105 TRACE_IN(query_io_buffer_read);
1106 if ((qstate->io_buffer_size == 0) || (qstate->io_buffer == NULL))
1109 if (nbytes < qstate->io_buffer + qstate->io_buffer_size -
1110 qstate->io_buffer_p)
1113 result = qstate->io_buffer + qstate->io_buffer_size -
1114 qstate->io_buffer_p;
1116 memcpy(buf, qstate->io_buffer_p, result);
1117 qstate->io_buffer_p += result;
1119 if (qstate->io_buffer_p == qstate->io_buffer + qstate->io_buffer_size) {
1120 free(qstate->io_buffer);
1121 qstate->io_buffer = NULL;
1123 qstate->write_func = query_socket_write;
1124 qstate->read_func = query_socket_read;
1127 TRACE_OUT(query_io_buffer_read);
1132 * Uses the qstate fields to process an "alternate" write - when the buffer is
1133 * too large to be sent during one socket write operation
1136 query_io_buffer_write(struct query_state *qstate, const void *buf,
1141 TRACE_IN(query_io_buffer_write);
1142 if ((qstate->io_buffer_size == 0) || (qstate->io_buffer == NULL))
1145 if (nbytes < qstate->io_buffer + qstate->io_buffer_size -
1146 qstate->io_buffer_p)
1149 result = qstate->io_buffer + qstate->io_buffer_size -
1150 qstate->io_buffer_p;
1152 memcpy(qstate->io_buffer_p, buf, result);
1153 qstate->io_buffer_p += result;
1155 if (qstate->io_buffer_p == qstate->io_buffer + qstate->io_buffer_size) {
1156 qstate->use_alternate_io = 1;
1157 qstate->io_buffer_p = qstate->io_buffer;
1159 qstate->write_func = query_socket_write;
1160 qstate->read_func = query_socket_read;
1163 TRACE_OUT(query_io_buffer_write);
1168 * The default "read" function, which reads data directly from socket
1171 query_socket_read(struct query_state *qstate, void *buf, size_t nbytes)
1175 TRACE_IN(query_socket_read);
1176 if (qstate->socket_failed != 0) {
1177 TRACE_OUT(query_socket_read);
1181 result = read(qstate->sockfd, buf, nbytes);
1182 if ((result == -1) || (result < nbytes))
1183 qstate->socket_failed = 1;
1185 TRACE_OUT(query_socket_read);
1190 * The default "write" function, which writes data directly to socket
1193 query_socket_write(struct query_state *qstate, const void *buf, size_t nbytes)
1197 TRACE_IN(query_socket_write);
1198 if (qstate->socket_failed != 0) {
1199 TRACE_OUT(query_socket_write);
1203 result = write(qstate->sockfd, buf, nbytes);
1204 if ((result == -1) || (result < nbytes))
1205 qstate->socket_failed = 1;
1207 TRACE_OUT(query_socket_write);
1212 * Initializes the query_state structure by filling it with the default values.
1214 struct query_state *
1215 init_query_state(int sockfd, size_t kevent_watermark, uid_t euid, gid_t egid)
1217 struct query_state *retval;
1219 TRACE_IN(init_query_state);
1220 retval = (struct query_state *)calloc(1, sizeof(struct query_state));
1221 assert(retval != NULL);
1223 retval->sockfd = sockfd;
1224 retval->kevent_filter = EVFILT_READ;
1225 retval->kevent_watermark = kevent_watermark;
1227 retval->euid = euid;
1228 retval->egid = egid;
1229 retval->uid = retval->gid = -1;
1231 if (asprintf(&retval->eid_str, "%d_%d_", retval->euid,
1232 retval->egid) == -1) {
1236 retval->eid_str_length = strlen(retval->eid_str);
1238 init_comm_element(&retval->request, CET_UNDEFINED);
1239 init_comm_element(&retval->response, CET_UNDEFINED);
1240 retval->process_func = on_query_startup;
1241 retval->destroy_func = on_query_destroy;
1243 retval->write_func = query_socket_write;
1244 retval->read_func = query_socket_read;
1246 get_time_func(&retval->creation_time);
1247 memcpy(&retval->timeout, &s_configuration->query_timeout,
1248 sizeof(struct timeval));
1250 TRACE_OUT(init_query_state);
1255 destroy_query_state(struct query_state *qstate)
1258 TRACE_IN(destroy_query_state);
1259 if (qstate->eid_str != NULL)
1260 free(qstate->eid_str);
1262 if (qstate->io_buffer != NULL)
1263 free(qstate->io_buffer);
1265 qstate->destroy_func(qstate);
1267 TRACE_OUT(destroy_query_state);