2 * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include <sys/types.h>
32 #include <sys/event.h>
33 #include <sys/socket.h>
48 #include "mp_ws_query.h"
49 #include "mp_rs_query.h"
50 #include "singletons.h"
52 static const char negative_data[1] = { 0 };
54 extern void get_time_func(struct timeval *);
56 static void clear_config_entry(struct configuration_entry *);
57 static void clear_config_entry_part(struct configuration_entry *,
58 const char *, size_t);
60 static int on_query_startup(struct query_state *);
61 static void on_query_destroy(struct query_state *);
63 static int on_read_request_read1(struct query_state *);
64 static int on_read_request_read2(struct query_state *);
65 static int on_read_request_process(struct query_state *);
66 static int on_read_response_write1(struct query_state *);
67 static int on_read_response_write2(struct query_state *);
69 static int on_rw_mapper(struct query_state *);
71 static int on_transform_request_read1(struct query_state *);
72 static int on_transform_request_read2(struct query_state *);
73 static int on_transform_request_process(struct query_state *);
74 static int on_transform_response_write1(struct query_state *);
76 static int on_write_request_read1(struct query_state *);
77 static int on_write_request_read2(struct query_state *);
78 static int on_negative_write_request_process(struct query_state *);
79 static int on_write_request_process(struct query_state *);
80 static int on_write_response_write1(struct query_state *);
83 * Clears the specified configuration entry (clears the cache for positive and
84 * and negative entries) and also for all multipart entries.
87 clear_config_entry(struct configuration_entry *config_entry)
91 TRACE_IN(clear_config_entry);
92 configuration_lock_entry(config_entry, CELT_POSITIVE);
93 if (config_entry->positive_cache_entry != NULL)
94 transform_cache_entry(
95 config_entry->positive_cache_entry,
97 configuration_unlock_entry(config_entry, CELT_POSITIVE);
99 configuration_lock_entry(config_entry, CELT_NEGATIVE);
100 if (config_entry->negative_cache_entry != NULL)
101 transform_cache_entry(
102 config_entry->negative_cache_entry,
104 configuration_unlock_entry(config_entry, CELT_NEGATIVE);
106 configuration_lock_entry(config_entry, CELT_MULTIPART);
107 for (i = 0; i < config_entry->mp_cache_entries_size; ++i)
108 transform_cache_entry(
109 config_entry->mp_cache_entries[i],
111 configuration_unlock_entry(config_entry, CELT_MULTIPART);
113 TRACE_OUT(clear_config_entry);
117 * Clears the specified configuration entry by deleting only the elements,
118 * that are owned by the user with specified eid_str.
121 clear_config_entry_part(struct configuration_entry *config_entry,
122 const char *eid_str, size_t eid_str_length)
124 cache_entry *start, *finish, *mp_entry;
125 TRACE_IN(clear_config_entry_part);
126 configuration_lock_entry(config_entry, CELT_POSITIVE);
127 if (config_entry->positive_cache_entry != NULL)
128 transform_cache_entry_part(
129 config_entry->positive_cache_entry,
130 CTT_CLEAR, eid_str, eid_str_length, KPPT_LEFT);
131 configuration_unlock_entry(config_entry, CELT_POSITIVE);
133 configuration_lock_entry(config_entry, CELT_NEGATIVE);
134 if (config_entry->negative_cache_entry != NULL)
135 transform_cache_entry_part(
136 config_entry->negative_cache_entry,
137 CTT_CLEAR, eid_str, eid_str_length, KPPT_LEFT);
138 configuration_unlock_entry(config_entry, CELT_NEGATIVE);
140 configuration_lock_entry(config_entry, CELT_MULTIPART);
141 if (configuration_entry_find_mp_cache_entries(config_entry,
142 eid_str, &start, &finish) == 0) {
143 for (mp_entry = start; mp_entry != finish; ++mp_entry)
144 transform_cache_entry(*mp_entry, CTT_CLEAR);
146 configuration_unlock_entry(config_entry, CELT_MULTIPART);
148 TRACE_OUT(clear_config_entry_part);
152 * This function is assigned to the query_state structue on its creation.
153 * It's main purpose is to receive credentials from the client.
156 on_query_startup(struct query_state *qstate)
160 char pad[CMSG_SPACE(sizeof(struct cmsgcred))];
164 struct cmsgcred *cred;
167 TRACE_IN(on_query_startup);
168 assert(qstate != NULL);
170 memset(&mhdr, 0, sizeof(mhdr));
173 mhdr.msg_control = &cmsg;
174 mhdr.msg_controllen = sizeof(cmsg);
176 memset(&iov, 0, sizeof(iov));
177 iov.iov_base = &elem_type;
178 iov.iov_len = sizeof(elem_type);
180 if (recvmsg(qstate->sockfd, &mhdr, 0) == -1) {
181 TRACE_OUT(on_query_startup);
185 if (mhdr.msg_controllen != CMSG_SPACE(sizeof(struct cmsgcred)) ||
186 cmsg.hdr.cmsg_len != CMSG_LEN(sizeof(struct cmsgcred)) ||
187 cmsg.hdr.cmsg_level != SOL_SOCKET ||
188 cmsg.hdr.cmsg_type != SCM_CREDS) {
189 TRACE_OUT(on_query_startup);
193 cred = (struct cmsgcred *)CMSG_DATA(&cmsg);
194 qstate->uid = cred->cmcred_uid;
195 qstate->gid = cred->cmcred_gid;
197 #if defined(NS_NSCD_EID_CHECKING) || defined(NS_STRICT_NSCD_EID_CHECKING)
199 * This check is probably a bit redundant - per-user cache is always separated
200 * by the euid/egid pair
202 if (check_query_eids(qstate) != 0) {
203 #ifdef NS_STRICT_NSCD_EID_CHECKING
204 TRACE_OUT(on_query_startup);
207 if ((elem_type != CET_READ_REQUEST) &&
208 (elem_type != CET_MP_READ_SESSION_REQUEST) &&
209 (elem_type != CET_WRITE_REQUEST) &&
210 (elem_type != CET_MP_WRITE_SESSION_REQUEST)) {
211 TRACE_OUT(on_query_startup);
219 case CET_WRITE_REQUEST:
220 qstate->process_func = on_write_request_read1;
222 case CET_READ_REQUEST:
223 qstate->process_func = on_read_request_read1;
225 case CET_TRANSFORM_REQUEST:
226 qstate->process_func = on_transform_request_read1;
228 case CET_MP_WRITE_SESSION_REQUEST:
229 qstate->process_func = on_mp_write_session_request_read1;
231 case CET_MP_READ_SESSION_REQUEST:
232 qstate->process_func = on_mp_read_session_request_read1;
235 TRACE_OUT(on_query_startup);
239 qstate->kevent_watermark = 0;
240 TRACE_OUT(on_query_startup);
245 * on_rw_mapper is used to process multiple read/write requests during
246 * one connection session. It's never called in the beginning (on query_state
247 * creation) as it does not process the multipart requests and does not
248 * receive credentials
251 on_rw_mapper(struct query_state *qstate)
256 TRACE_IN(on_rw_mapper);
257 if (qstate->kevent_watermark == 0) {
258 qstate->kevent_watermark = sizeof(int);
260 result = qstate->read_func(qstate, &elem_type, sizeof(int));
261 if (result != sizeof(int)) {
262 TRACE_OUT(on_rw_mapper);
267 case CET_WRITE_REQUEST:
268 qstate->kevent_watermark = sizeof(size_t);
269 qstate->process_func = on_write_request_read1;
271 case CET_READ_REQUEST:
272 qstate->kevent_watermark = sizeof(size_t);
273 qstate->process_func = on_read_request_read1;
276 TRACE_OUT(on_rw_mapper);
281 TRACE_OUT(on_rw_mapper);
286 * The default query_destroy function
289 on_query_destroy(struct query_state *qstate)
292 TRACE_IN(on_query_destroy);
293 finalize_comm_element(&qstate->response);
294 finalize_comm_element(&qstate->request);
295 TRACE_OUT(on_query_destroy);
299 * The functions below are used to process write requests.
300 * - on_write_request_read1 and on_write_request_read2 read the request itself
301 * - on_write_request_process processes it (if the client requests to
302 * cache the negative result, the on_negative_write_request_process is used)
303 * - on_write_response_write1 sends the response
306 on_write_request_read1(struct query_state *qstate)
308 struct cache_write_request *write_request;
311 TRACE_IN(on_write_request_read1);
312 if (qstate->kevent_watermark == 0)
313 qstate->kevent_watermark = sizeof(size_t) * 3;
315 init_comm_element(&qstate->request, CET_WRITE_REQUEST);
316 write_request = get_cache_write_request(&qstate->request);
318 result = qstate->read_func(qstate, &write_request->entry_length,
320 result += qstate->read_func(qstate,
321 &write_request->cache_key_size, sizeof(size_t));
322 result += qstate->read_func(qstate,
323 &write_request->data_size, sizeof(size_t));
325 if (result != sizeof(size_t) * 3) {
326 TRACE_OUT(on_write_request_read1);
330 if (BUFSIZE_INVALID(write_request->entry_length) ||
331 BUFSIZE_INVALID(write_request->cache_key_size) ||
332 (BUFSIZE_INVALID(write_request->data_size) &&
333 (write_request->data_size != 0))) {
334 TRACE_OUT(on_write_request_read1);
338 write_request->entry = calloc(1,
339 write_request->entry_length + 1);
340 assert(write_request->entry != NULL);
342 write_request->cache_key = calloc(1,
343 write_request->cache_key_size +
344 qstate->eid_str_length);
345 assert(write_request->cache_key != NULL);
346 memcpy(write_request->cache_key, qstate->eid_str,
347 qstate->eid_str_length);
349 if (write_request->data_size != 0) {
350 write_request->data = calloc(1,
351 write_request->data_size);
352 assert(write_request->data != NULL);
355 qstate->kevent_watermark = write_request->entry_length +
356 write_request->cache_key_size +
357 write_request->data_size;
358 qstate->process_func = on_write_request_read2;
361 TRACE_OUT(on_write_request_read1);
366 on_write_request_read2(struct query_state *qstate)
368 struct cache_write_request *write_request;
371 TRACE_IN(on_write_request_read2);
372 write_request = get_cache_write_request(&qstate->request);
374 result = qstate->read_func(qstate, write_request->entry,
375 write_request->entry_length);
376 result += qstate->read_func(qstate, write_request->cache_key +
377 qstate->eid_str_length, write_request->cache_key_size);
378 if (write_request->data_size != 0)
379 result += qstate->read_func(qstate, write_request->data,
380 write_request->data_size);
382 if (result != (ssize_t)qstate->kevent_watermark) {
383 TRACE_OUT(on_write_request_read2);
386 write_request->cache_key_size += qstate->eid_str_length;
388 qstate->kevent_watermark = 0;
389 if (write_request->data_size != 0)
390 qstate->process_func = on_write_request_process;
392 qstate->process_func = on_negative_write_request_process;
393 TRACE_OUT(on_write_request_read2);
398 on_write_request_process(struct query_state *qstate)
400 struct cache_write_request *write_request;
401 struct cache_write_response *write_response;
404 TRACE_IN(on_write_request_process);
405 init_comm_element(&qstate->response, CET_WRITE_RESPONSE);
406 write_response = get_cache_write_response(&qstate->response);
407 write_request = get_cache_write_request(&qstate->request);
409 qstate->config_entry = configuration_find_entry(
410 s_configuration, write_request->entry);
412 if (qstate->config_entry == NULL) {
413 write_response->error_code = ENOENT;
415 LOG_ERR_2("write_request", "can't find configuration"
416 " entry '%s'. aborting request", write_request->entry);
420 if (qstate->config_entry->enabled == 0) {
421 write_response->error_code = EACCES;
423 LOG_ERR_2("write_request",
424 "configuration entry '%s' is disabled",
425 write_request->entry);
429 if (qstate->config_entry->perform_actual_lookups != 0) {
430 write_response->error_code = EOPNOTSUPP;
432 LOG_ERR_2("write_request",
433 "entry '%s' performs lookups by itself: "
434 "can't write to it", write_request->entry);
438 configuration_lock_rdlock(s_configuration);
439 c_entry = find_cache_entry(s_cache,
440 qstate->config_entry->positive_cache_params.cep.entry_name);
441 configuration_unlock(s_configuration);
442 if (c_entry != NULL) {
443 configuration_lock_entry(qstate->config_entry, CELT_POSITIVE);
444 qstate->config_entry->positive_cache_entry = c_entry;
445 write_response->error_code = cache_write(c_entry,
446 write_request->cache_key,
447 write_request->cache_key_size,
449 write_request->data_size);
450 configuration_unlock_entry(qstate->config_entry, CELT_POSITIVE);
452 if ((qstate->config_entry->common_query_timeout.tv_sec != 0) ||
453 (qstate->config_entry->common_query_timeout.tv_usec != 0))
454 memcpy(&qstate->timeout,
455 &qstate->config_entry->common_query_timeout,
456 sizeof(struct timeval));
459 write_response->error_code = -1;
462 qstate->kevent_filter = EVFILT_WRITE;
463 qstate->kevent_watermark = sizeof(int);
464 qstate->process_func = on_write_response_write1;
466 TRACE_OUT(on_write_request_process);
471 on_negative_write_request_process(struct query_state *qstate)
473 struct cache_write_request *write_request;
474 struct cache_write_response *write_response;
477 TRACE_IN(on_negative_write_request_process);
478 init_comm_element(&qstate->response, CET_WRITE_RESPONSE);
479 write_response = get_cache_write_response(&qstate->response);
480 write_request = get_cache_write_request(&qstate->request);
482 qstate->config_entry = configuration_find_entry (
483 s_configuration, write_request->entry);
485 if (qstate->config_entry == NULL) {
486 write_response->error_code = ENOENT;
488 LOG_ERR_2("negative_write_request",
489 "can't find configuration"
490 " entry '%s'. aborting request", write_request->entry);
494 if (qstate->config_entry->enabled == 0) {
495 write_response->error_code = EACCES;
497 LOG_ERR_2("negative_write_request",
498 "configuration entry '%s' is disabled",
499 write_request->entry);
503 if (qstate->config_entry->perform_actual_lookups != 0) {
504 write_response->error_code = EOPNOTSUPP;
506 LOG_ERR_2("negative_write_request",
507 "entry '%s' performs lookups by itself: "
508 "can't write to it", write_request->entry);
511 #ifdef NS_NSCD_EID_CHECKING
512 if (check_query_eids(qstate) != 0) {
513 write_response->error_code = EPERM;
519 configuration_lock_rdlock(s_configuration);
520 c_entry = find_cache_entry(s_cache,
521 qstate->config_entry->negative_cache_params.cep.entry_name);
522 configuration_unlock(s_configuration);
523 if (c_entry != NULL) {
524 configuration_lock_entry(qstate->config_entry, CELT_NEGATIVE);
525 qstate->config_entry->negative_cache_entry = c_entry;
526 write_response->error_code = cache_write(c_entry,
527 write_request->cache_key,
528 write_request->cache_key_size,
530 sizeof(negative_data));
531 configuration_unlock_entry(qstate->config_entry, CELT_NEGATIVE);
533 if ((qstate->config_entry->common_query_timeout.tv_sec != 0) ||
534 (qstate->config_entry->common_query_timeout.tv_usec != 0))
535 memcpy(&qstate->timeout,
536 &qstate->config_entry->common_query_timeout,
537 sizeof(struct timeval));
539 write_response->error_code = -1;
542 qstate->kevent_filter = EVFILT_WRITE;
543 qstate->kevent_watermark = sizeof(int);
544 qstate->process_func = on_write_response_write1;
546 TRACE_OUT(on_negative_write_request_process);
551 on_write_response_write1(struct query_state *qstate)
553 struct cache_write_response *write_response;
556 TRACE_IN(on_write_response_write1);
557 write_response = get_cache_write_response(&qstate->response);
558 result = qstate->write_func(qstate, &write_response->error_code,
560 if (result != sizeof(int)) {
561 TRACE_OUT(on_write_response_write1);
565 finalize_comm_element(&qstate->request);
566 finalize_comm_element(&qstate->response);
568 qstate->kevent_watermark = sizeof(int);
569 qstate->kevent_filter = EVFILT_READ;
570 qstate->process_func = on_rw_mapper;
572 TRACE_OUT(on_write_response_write1);
577 * The functions below are used to process read requests.
578 * - on_read_request_read1 and on_read_request_read2 read the request itself
579 * - on_read_request_process processes it
580 * - on_read_response_write1 and on_read_response_write2 send the response
583 on_read_request_read1(struct query_state *qstate)
585 struct cache_read_request *read_request;
588 TRACE_IN(on_read_request_read1);
589 if (qstate->kevent_watermark == 0)
590 qstate->kevent_watermark = sizeof(size_t) * 2;
592 init_comm_element(&qstate->request, CET_READ_REQUEST);
593 read_request = get_cache_read_request(&qstate->request);
595 result = qstate->read_func(qstate,
596 &read_request->entry_length, sizeof(size_t));
597 result += qstate->read_func(qstate,
598 &read_request->cache_key_size, sizeof(size_t));
600 if (result != sizeof(size_t) * 2) {
601 TRACE_OUT(on_read_request_read1);
605 if (BUFSIZE_INVALID(read_request->entry_length) ||
606 BUFSIZE_INVALID(read_request->cache_key_size)) {
607 TRACE_OUT(on_read_request_read1);
611 read_request->entry = calloc(1,
612 read_request->entry_length + 1);
613 assert(read_request->entry != NULL);
615 read_request->cache_key = calloc(1,
616 read_request->cache_key_size +
617 qstate->eid_str_length);
618 assert(read_request->cache_key != NULL);
619 memcpy(read_request->cache_key, qstate->eid_str,
620 qstate->eid_str_length);
622 qstate->kevent_watermark = read_request->entry_length +
623 read_request->cache_key_size;
624 qstate->process_func = on_read_request_read2;
627 TRACE_OUT(on_read_request_read1);
632 on_read_request_read2(struct query_state *qstate)
634 struct cache_read_request *read_request;
637 TRACE_IN(on_read_request_read2);
638 read_request = get_cache_read_request(&qstate->request);
640 result = qstate->read_func(qstate, read_request->entry,
641 read_request->entry_length);
642 result += qstate->read_func(qstate,
643 read_request->cache_key + qstate->eid_str_length,
644 read_request->cache_key_size);
646 if (result != (ssize_t)qstate->kevent_watermark) {
647 TRACE_OUT(on_read_request_read2);
650 read_request->cache_key_size += qstate->eid_str_length;
652 qstate->kevent_watermark = 0;
653 qstate->process_func = on_read_request_process;
655 TRACE_OUT(on_read_request_read2);
660 on_read_request_process(struct query_state *qstate)
662 struct cache_read_request *read_request;
663 struct cache_read_response *read_response;
664 cache_entry c_entry, neg_c_entry;
666 struct agent *lookup_agent;
667 struct common_agent *c_agent;
670 TRACE_IN(on_read_request_process);
671 init_comm_element(&qstate->response, CET_READ_RESPONSE);
672 read_response = get_cache_read_response(&qstate->response);
673 read_request = get_cache_read_request(&qstate->request);
675 qstate->config_entry = configuration_find_entry(
676 s_configuration, read_request->entry);
677 if (qstate->config_entry == NULL) {
678 read_response->error_code = ENOENT;
680 LOG_ERR_2("read_request",
681 "can't find configuration "
682 "entry '%s'. aborting request", read_request->entry);
686 if (qstate->config_entry->enabled == 0) {
687 read_response->error_code = EACCES;
689 LOG_ERR_2("read_request",
690 "configuration entry '%s' is disabled",
691 read_request->entry);
696 * if we perform lookups by ourselves, then we don't need to separate
697 * cache entries by euid and egid
699 if (qstate->config_entry->perform_actual_lookups != 0)
700 memset(read_request->cache_key, 0, qstate->eid_str_length);
702 #ifdef NS_NSCD_EID_CHECKING
703 if (check_query_eids(qstate) != 0) {
704 /* if the lookup is not self-performing, we check for clients euid/egid */
705 read_response->error_code = EPERM;
711 configuration_lock_rdlock(s_configuration);
712 c_entry = find_cache_entry(s_cache,
713 qstate->config_entry->positive_cache_params.cep.entry_name);
714 neg_c_entry = find_cache_entry(s_cache,
715 qstate->config_entry->negative_cache_params.cep.entry_name);
716 configuration_unlock(s_configuration);
717 if ((c_entry != NULL) && (neg_c_entry != NULL)) {
718 configuration_lock_entry(qstate->config_entry, CELT_POSITIVE);
719 qstate->config_entry->positive_cache_entry = c_entry;
720 read_response->error_code = cache_read(c_entry,
721 read_request->cache_key,
722 read_request->cache_key_size, NULL,
723 &read_response->data_size);
725 if (read_response->error_code == -2) {
726 read_response->data = malloc(
727 read_response->data_size);
728 assert(read_response->data != NULL);
729 read_response->error_code = cache_read(c_entry,
730 read_request->cache_key,
731 read_request->cache_key_size,
733 &read_response->data_size);
735 configuration_unlock_entry(qstate->config_entry, CELT_POSITIVE);
737 configuration_lock_entry(qstate->config_entry, CELT_NEGATIVE);
738 qstate->config_entry->negative_cache_entry = neg_c_entry;
739 if (read_response->error_code == -1) {
740 read_response->error_code = cache_read(neg_c_entry,
741 read_request->cache_key,
742 read_request->cache_key_size, NULL,
743 &read_response->data_size);
745 if (read_response->error_code == -2) {
746 read_response->data = malloc(
747 read_response->data_size);
748 assert(read_response->data != NULL);
749 read_response->error_code = cache_read(neg_c_entry,
750 read_request->cache_key,
751 read_request->cache_key_size,
753 &read_response->data_size);
756 configuration_unlock_entry(qstate->config_entry, CELT_NEGATIVE);
758 if ((read_response->error_code == -1) &&
759 (qstate->config_entry->perform_actual_lookups != 0)) {
760 free(read_response->data);
761 read_response->data = NULL;
762 read_response->data_size = 0;
764 lookup_agent = find_agent(s_agent_table,
765 read_request->entry, COMMON_AGENT);
767 if ((lookup_agent != NULL) &&
768 (lookup_agent->type == COMMON_AGENT)) {
769 c_agent = (struct common_agent *)lookup_agent;
770 res = c_agent->lookup_func(
771 read_request->cache_key +
772 qstate->eid_str_length,
773 read_request->cache_key_size -
774 qstate->eid_str_length,
775 &read_response->data,
776 &read_response->data_size);
778 if (res == NS_SUCCESS) {
779 read_response->error_code = 0;
780 configuration_lock_entry(
781 qstate->config_entry,
784 read_request->cache_key,
785 read_request->cache_key_size,
787 read_response->data_size);
788 configuration_unlock_entry(
789 qstate->config_entry,
791 } else if ((res == NS_NOTFOUND) ||
792 (res == NS_RETURN)) {
793 configuration_lock_entry(
794 qstate->config_entry,
796 cache_write(neg_c_entry,
797 read_request->cache_key,
798 read_request->cache_key_size,
800 sizeof(negative_data));
801 configuration_unlock_entry(
802 qstate->config_entry,
805 read_response->error_code = 0;
806 read_response->data = NULL;
807 read_response->data_size = 0;
812 if ((qstate->config_entry->common_query_timeout.tv_sec != 0) ||
813 (qstate->config_entry->common_query_timeout.tv_usec != 0))
814 memcpy(&qstate->timeout,
815 &qstate->config_entry->common_query_timeout,
816 sizeof(struct timeval));
818 read_response->error_code = -1;
821 qstate->kevent_filter = EVFILT_WRITE;
822 if (read_response->error_code == 0)
823 qstate->kevent_watermark = sizeof(int) + sizeof(size_t);
825 qstate->kevent_watermark = sizeof(int);
826 qstate->process_func = on_read_response_write1;
828 TRACE_OUT(on_read_request_process);
833 on_read_response_write1(struct query_state *qstate)
835 struct cache_read_response *read_response;
838 TRACE_IN(on_read_response_write1);
839 read_response = get_cache_read_response(&qstate->response);
841 result = qstate->write_func(qstate, &read_response->error_code,
844 if (read_response->error_code == 0) {
845 result += qstate->write_func(qstate, &read_response->data_size,
847 if (result != (ssize_t)qstate->kevent_watermark) {
848 TRACE_OUT(on_read_response_write1);
852 qstate->kevent_watermark = read_response->data_size;
853 qstate->process_func = on_read_response_write2;
855 if (result != (ssize_t)qstate->kevent_watermark) {
856 TRACE_OUT(on_read_response_write1);
860 qstate->kevent_watermark = 0;
861 qstate->process_func = NULL;
864 TRACE_OUT(on_read_response_write1);
869 on_read_response_write2(struct query_state *qstate)
871 struct cache_read_response *read_response;
874 TRACE_IN(on_read_response_write2);
875 read_response = get_cache_read_response(&qstate->response);
876 if (read_response->data_size > 0) {
877 result = qstate->write_func(qstate, read_response->data,
878 read_response->data_size);
879 if (result != (ssize_t)qstate->kevent_watermark) {
880 TRACE_OUT(on_read_response_write2);
885 finalize_comm_element(&qstate->request);
886 finalize_comm_element(&qstate->response);
888 qstate->kevent_watermark = sizeof(int);
889 qstate->kevent_filter = EVFILT_READ;
890 qstate->process_func = on_rw_mapper;
891 TRACE_OUT(on_read_response_write2);
896 * The functions below are used to process write requests.
897 * - on_transform_request_read1 and on_transform_request_read2 read the
899 * - on_transform_request_process processes it
900 * - on_transform_response_write1 sends the response
903 on_transform_request_read1(struct query_state *qstate)
905 struct cache_transform_request *transform_request;
908 TRACE_IN(on_transform_request_read1);
909 if (qstate->kevent_watermark == 0)
910 qstate->kevent_watermark = sizeof(size_t) + sizeof(int);
912 init_comm_element(&qstate->request, CET_TRANSFORM_REQUEST);
914 get_cache_transform_request(&qstate->request);
916 result = qstate->read_func(qstate,
917 &transform_request->entry_length, sizeof(size_t));
918 result += qstate->read_func(qstate,
919 &transform_request->transformation_type, sizeof(int));
921 if (result != sizeof(size_t) + sizeof(int)) {
922 TRACE_OUT(on_transform_request_read1);
926 if ((transform_request->transformation_type != TT_USER) &&
927 (transform_request->transformation_type != TT_ALL)) {
928 TRACE_OUT(on_transform_request_read1);
932 if (transform_request->entry_length != 0) {
933 if (BUFSIZE_INVALID(transform_request->entry_length)) {
934 TRACE_OUT(on_transform_request_read1);
938 transform_request->entry = calloc(1,
939 transform_request->entry_length + 1);
940 assert(transform_request->entry != NULL);
942 qstate->process_func = on_transform_request_read2;
944 qstate->process_func = on_transform_request_process;
946 qstate->kevent_watermark = transform_request->entry_length;
949 TRACE_OUT(on_transform_request_read1);
954 on_transform_request_read2(struct query_state *qstate)
956 struct cache_transform_request *transform_request;
959 TRACE_IN(on_transform_request_read2);
960 transform_request = get_cache_transform_request(&qstate->request);
962 result = qstate->read_func(qstate, transform_request->entry,
963 transform_request->entry_length);
965 if (result != (ssize_t)qstate->kevent_watermark) {
966 TRACE_OUT(on_transform_request_read2);
970 qstate->kevent_watermark = 0;
971 qstate->process_func = on_transform_request_process;
973 TRACE_OUT(on_transform_request_read2);
978 on_transform_request_process(struct query_state *qstate)
980 struct cache_transform_request *transform_request;
981 struct cache_transform_response *transform_response;
982 struct configuration_entry *config_entry;
985 TRACE_IN(on_transform_request_process);
986 init_comm_element(&qstate->response, CET_TRANSFORM_RESPONSE);
987 transform_response = get_cache_transform_response(&qstate->response);
988 transform_request = get_cache_transform_request(&qstate->request);
990 switch (transform_request->transformation_type) {
992 if (transform_request->entry == NULL) {
993 size = configuration_get_entries_size(s_configuration);
994 for (i = 0; i < size; ++i) {
995 config_entry = configuration_get_entry(
998 if (config_entry->perform_actual_lookups == 0)
999 clear_config_entry_part(config_entry,
1000 qstate->eid_str, qstate->eid_str_length);
1003 qstate->config_entry = configuration_find_entry(
1004 s_configuration, transform_request->entry);
1006 if (qstate->config_entry == NULL) {
1007 LOG_ERR_2("transform_request",
1008 "can't find configuration"
1009 " entry '%s'. aborting request",
1010 transform_request->entry);
1011 transform_response->error_code = -1;
1015 if (qstate->config_entry->perform_actual_lookups != 0) {
1016 LOG_ERR_2("transform_request",
1017 "can't transform the cache entry %s"
1018 ", because it ised for actual lookups",
1019 transform_request->entry);
1020 transform_response->error_code = -1;
1024 clear_config_entry_part(qstate->config_entry,
1025 qstate->eid_str, qstate->eid_str_length);
1029 if (qstate->euid != 0)
1030 transform_response->error_code = -1;
1032 if (transform_request->entry == NULL) {
1033 size = configuration_get_entries_size(
1035 for (i = 0; i < size; ++i) {
1037 configuration_get_entry(
1038 s_configuration, i));
1041 qstate->config_entry = configuration_find_entry(
1043 transform_request->entry);
1045 if (qstate->config_entry == NULL) {
1046 LOG_ERR_2("transform_request",
1047 "can't find configuration"
1048 " entry '%s'. aborting request",
1049 transform_request->entry);
1050 transform_response->error_code = -1;
1054 clear_config_entry(qstate->config_entry);
1059 transform_response->error_code = -1;
1063 qstate->kevent_watermark = 0;
1064 qstate->process_func = on_transform_response_write1;
1065 TRACE_OUT(on_transform_request_process);
1070 on_transform_response_write1(struct query_state *qstate)
1072 struct cache_transform_response *transform_response;
1075 TRACE_IN(on_transform_response_write1);
1076 transform_response = get_cache_transform_response(&qstate->response);
1077 result = qstate->write_func(qstate, &transform_response->error_code,
1079 if (result != sizeof(int)) {
1080 TRACE_OUT(on_transform_response_write1);
1084 finalize_comm_element(&qstate->request);
1085 finalize_comm_element(&qstate->response);
1087 qstate->kevent_watermark = 0;
1088 qstate->process_func = NULL;
1089 TRACE_OUT(on_transform_response_write1);
1094 * Checks if the client's euid and egid do not differ from its uid and gid.
1095 * Returns 0 on success.
1098 check_query_eids(struct query_state *qstate)
1101 return ((qstate->uid != qstate->euid) || (qstate->gid != qstate->egid) ? -1 : 0);
1105 * Uses the qstate fields to process an "alternate" read - when the buffer is
1106 * too large to be received during one socket read operation
1109 query_io_buffer_read(struct query_state *qstate, void *buf, size_t nbytes)
1114 TRACE_IN(query_io_buffer_read);
1115 if ((qstate->io_buffer_size == 0) || (qstate->io_buffer == NULL))
1118 assert(qstate->io_buffer_p <=
1119 qstate->io_buffer + qstate->io_buffer_size);
1120 remaining = qstate->io_buffer + qstate->io_buffer_size -
1121 qstate->io_buffer_p;
1122 if (nbytes < remaining)
1127 memcpy(buf, qstate->io_buffer_p, result);
1128 qstate->io_buffer_p += result;
1130 if (remaining == 0) {
1131 free(qstate->io_buffer);
1132 qstate->io_buffer = NULL;
1134 qstate->write_func = query_socket_write;
1135 qstate->read_func = query_socket_read;
1138 TRACE_OUT(query_io_buffer_read);
1143 * Uses the qstate fields to process an "alternate" write - when the buffer is
1144 * too large to be sent during one socket write operation
1147 query_io_buffer_write(struct query_state *qstate, const void *buf,
1153 TRACE_IN(query_io_buffer_write);
1154 if ((qstate->io_buffer_size == 0) || (qstate->io_buffer == NULL))
1157 assert(qstate->io_buffer_p <=
1158 qstate->io_buffer + qstate->io_buffer_size);
1159 remaining = qstate->io_buffer + qstate->io_buffer_size -
1160 qstate->io_buffer_p;
1161 if (nbytes < remaining)
1166 memcpy(qstate->io_buffer_p, buf, result);
1167 qstate->io_buffer_p += result;
1169 if (remaining == 0) {
1170 qstate->use_alternate_io = 1;
1171 qstate->io_buffer_p = qstate->io_buffer;
1173 qstate->write_func = query_socket_write;
1174 qstate->read_func = query_socket_read;
1177 TRACE_OUT(query_io_buffer_write);
1182 * The default "read" function, which reads data directly from socket
1185 query_socket_read(struct query_state *qstate, void *buf, size_t nbytes)
1189 TRACE_IN(query_socket_read);
1190 if (qstate->socket_failed != 0) {
1191 TRACE_OUT(query_socket_read);
1195 result = read(qstate->sockfd, buf, nbytes);
1196 if (result < 0 || (size_t)result < nbytes)
1197 qstate->socket_failed = 1;
1199 TRACE_OUT(query_socket_read);
1204 * The default "write" function, which writes data directly to socket
1207 query_socket_write(struct query_state *qstate, const void *buf, size_t nbytes)
1211 TRACE_IN(query_socket_write);
1212 if (qstate->socket_failed != 0) {
1213 TRACE_OUT(query_socket_write);
1217 result = write(qstate->sockfd, buf, nbytes);
1218 if (result < 0 || (size_t)result < nbytes)
1219 qstate->socket_failed = 1;
1221 TRACE_OUT(query_socket_write);
1226 * Initializes the query_state structure by filling it with the default values.
1228 struct query_state *
1229 init_query_state(int sockfd, size_t kevent_watermark, uid_t euid, gid_t egid)
1231 struct query_state *retval;
1233 TRACE_IN(init_query_state);
1234 retval = calloc(1, sizeof(*retval));
1235 assert(retval != NULL);
1237 retval->sockfd = sockfd;
1238 retval->kevent_filter = EVFILT_READ;
1239 retval->kevent_watermark = kevent_watermark;
1241 retval->euid = euid;
1242 retval->egid = egid;
1243 retval->uid = retval->gid = -1;
1245 if (asprintf(&retval->eid_str, "%d_%d_", retval->euid,
1246 retval->egid) == -1) {
1250 retval->eid_str_length = strlen(retval->eid_str);
1252 init_comm_element(&retval->request, CET_UNDEFINED);
1253 init_comm_element(&retval->response, CET_UNDEFINED);
1254 retval->process_func = on_query_startup;
1255 retval->destroy_func = on_query_destroy;
1257 retval->write_func = query_socket_write;
1258 retval->read_func = query_socket_read;
1260 get_time_func(&retval->creation_time);
1261 retval->timeout.tv_sec = s_configuration->query_timeout;
1262 retval->timeout.tv_usec = 0;
1264 TRACE_OUT(init_query_state);
1269 destroy_query_state(struct query_state *qstate)
1272 TRACE_IN(destroy_query_state);
1273 if (qstate->eid_str != NULL)
1274 free(qstate->eid_str);
1276 if (qstate->io_buffer != NULL)
1277 free(qstate->io_buffer);
1279 qstate->destroy_func(qstate);
1281 TRACE_OUT(destroy_query_state);