2 * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 #include <sys/types.h>
30 #include <sys/event.h>
31 #include <sys/socket.h>
46 #include "mp_ws_query.h"
47 #include "mp_rs_query.h"
48 #include "singletons.h"
50 static const char negative_data[1] = { 0 };
52 extern void get_time_func(struct timeval *);
54 static void clear_config_entry(struct configuration_entry *);
55 static void clear_config_entry_part(struct configuration_entry *,
56 const char *, size_t);
58 static int on_query_startup(struct query_state *);
59 static void on_query_destroy(struct query_state *);
61 static int on_read_request_read1(struct query_state *);
62 static int on_read_request_read2(struct query_state *);
63 static int on_read_request_process(struct query_state *);
64 static int on_read_response_write1(struct query_state *);
65 static int on_read_response_write2(struct query_state *);
67 static int on_rw_mapper(struct query_state *);
69 static int on_transform_request_read1(struct query_state *);
70 static int on_transform_request_read2(struct query_state *);
71 static int on_transform_request_process(struct query_state *);
72 static int on_transform_response_write1(struct query_state *);
74 static int on_write_request_read1(struct query_state *);
75 static int on_write_request_read2(struct query_state *);
76 static int on_negative_write_request_process(struct query_state *);
77 static int on_write_request_process(struct query_state *);
78 static int on_write_response_write1(struct query_state *);
81 * Clears the specified configuration entry (clears the cache for positive and
82 * and negative entries) and also for all multipart entries.
85 clear_config_entry(struct configuration_entry *config_entry)
89 TRACE_IN(clear_config_entry);
90 configuration_lock_entry(config_entry, CELT_POSITIVE);
91 if (config_entry->positive_cache_entry != NULL)
92 transform_cache_entry(
93 config_entry->positive_cache_entry,
95 configuration_unlock_entry(config_entry, CELT_POSITIVE);
97 configuration_lock_entry(config_entry, CELT_NEGATIVE);
98 if (config_entry->negative_cache_entry != NULL)
99 transform_cache_entry(
100 config_entry->negative_cache_entry,
102 configuration_unlock_entry(config_entry, CELT_NEGATIVE);
104 configuration_lock_entry(config_entry, CELT_MULTIPART);
105 for (i = 0; i < config_entry->mp_cache_entries_size; ++i)
106 transform_cache_entry(
107 config_entry->mp_cache_entries[i],
109 configuration_unlock_entry(config_entry, CELT_MULTIPART);
111 TRACE_OUT(clear_config_entry);
115 * Clears the specified configuration entry by deleting only the elements,
116 * that are owned by the user with specified eid_str.
119 clear_config_entry_part(struct configuration_entry *config_entry,
120 const char *eid_str, size_t eid_str_length)
122 cache_entry *start, *finish, *mp_entry;
123 TRACE_IN(clear_config_entry_part);
124 configuration_lock_entry(config_entry, CELT_POSITIVE);
125 if (config_entry->positive_cache_entry != NULL)
126 transform_cache_entry_part(
127 config_entry->positive_cache_entry,
128 CTT_CLEAR, eid_str, eid_str_length, KPPT_LEFT);
129 configuration_unlock_entry(config_entry, CELT_POSITIVE);
131 configuration_lock_entry(config_entry, CELT_NEGATIVE);
132 if (config_entry->negative_cache_entry != NULL)
133 transform_cache_entry_part(
134 config_entry->negative_cache_entry,
135 CTT_CLEAR, eid_str, eid_str_length, KPPT_LEFT);
136 configuration_unlock_entry(config_entry, CELT_NEGATIVE);
138 configuration_lock_entry(config_entry, CELT_MULTIPART);
139 if (configuration_entry_find_mp_cache_entries(config_entry,
140 eid_str, &start, &finish) == 0) {
141 for (mp_entry = start; mp_entry != finish; ++mp_entry)
142 transform_cache_entry(*mp_entry, CTT_CLEAR);
144 configuration_unlock_entry(config_entry, CELT_MULTIPART);
146 TRACE_OUT(clear_config_entry_part);
150 * This function is assigned to the query_state structue on its creation.
151 * It's main purpose is to receive credentials from the client.
154 on_query_startup(struct query_state *qstate)
158 char pad[CMSG_SPACE(sizeof(struct cmsgcred))];
162 struct cmsgcred *cred;
165 TRACE_IN(on_query_startup);
166 assert(qstate != NULL);
168 memset(&mhdr, 0, sizeof(mhdr));
171 mhdr.msg_control = &cmsg;
172 mhdr.msg_controllen = sizeof(cmsg);
174 memset(&iov, 0, sizeof(iov));
175 iov.iov_base = &elem_type;
176 iov.iov_len = sizeof(elem_type);
178 if (recvmsg(qstate->sockfd, &mhdr, 0) == -1) {
179 TRACE_OUT(on_query_startup);
183 if (mhdr.msg_controllen != CMSG_SPACE(sizeof(struct cmsgcred)) ||
184 cmsg.hdr.cmsg_len != CMSG_LEN(sizeof(struct cmsgcred)) ||
185 cmsg.hdr.cmsg_level != SOL_SOCKET ||
186 cmsg.hdr.cmsg_type != SCM_CREDS) {
187 TRACE_OUT(on_query_startup);
191 cred = (struct cmsgcred *)CMSG_DATA(&cmsg);
192 qstate->uid = cred->cmcred_uid;
193 qstate->gid = cred->cmcred_gid;
195 #if defined(NS_NSCD_EID_CHECKING) || defined(NS_STRICT_NSCD_EID_CHECKING)
197 * This check is probably a bit redundant - per-user cache is always separated
198 * by the euid/egid pair
200 if (check_query_eids(qstate) != 0) {
201 #ifdef NS_STRICT_NSCD_EID_CHECKING
202 TRACE_OUT(on_query_startup);
205 if ((elem_type != CET_READ_REQUEST) &&
206 (elem_type != CET_MP_READ_SESSION_REQUEST) &&
207 (elem_type != CET_WRITE_REQUEST) &&
208 (elem_type != CET_MP_WRITE_SESSION_REQUEST)) {
209 TRACE_OUT(on_query_startup);
217 case CET_WRITE_REQUEST:
218 qstate->process_func = on_write_request_read1;
220 case CET_READ_REQUEST:
221 qstate->process_func = on_read_request_read1;
223 case CET_TRANSFORM_REQUEST:
224 qstate->process_func = on_transform_request_read1;
226 case CET_MP_WRITE_SESSION_REQUEST:
227 qstate->process_func = on_mp_write_session_request_read1;
229 case CET_MP_READ_SESSION_REQUEST:
230 qstate->process_func = on_mp_read_session_request_read1;
233 TRACE_OUT(on_query_startup);
237 qstate->kevent_watermark = 0;
238 TRACE_OUT(on_query_startup);
243 * on_rw_mapper is used to process multiple read/write requests during
244 * one connection session. It's never called in the beginning (on query_state
245 * creation) as it does not process the multipart requests and does not
246 * receive credentials
249 on_rw_mapper(struct query_state *qstate)
254 TRACE_IN(on_rw_mapper);
255 if (qstate->kevent_watermark == 0) {
256 qstate->kevent_watermark = sizeof(int);
258 result = qstate->read_func(qstate, &elem_type, sizeof(int));
259 if (result != sizeof(int)) {
260 TRACE_OUT(on_rw_mapper);
265 case CET_WRITE_REQUEST:
266 qstate->kevent_watermark = sizeof(size_t);
267 qstate->process_func = on_write_request_read1;
269 case CET_READ_REQUEST:
270 qstate->kevent_watermark = sizeof(size_t);
271 qstate->process_func = on_read_request_read1;
274 TRACE_OUT(on_rw_mapper);
279 TRACE_OUT(on_rw_mapper);
284 * The default query_destroy function
287 on_query_destroy(struct query_state *qstate)
290 TRACE_IN(on_query_destroy);
291 finalize_comm_element(&qstate->response);
292 finalize_comm_element(&qstate->request);
293 TRACE_OUT(on_query_destroy);
297 * The functions below are used to process write requests.
298 * - on_write_request_read1 and on_write_request_read2 read the request itself
299 * - on_write_request_process processes it (if the client requests to
300 * cache the negative result, the on_negative_write_request_process is used)
301 * - on_write_response_write1 sends the response
304 on_write_request_read1(struct query_state *qstate)
306 struct cache_write_request *write_request;
309 TRACE_IN(on_write_request_read1);
310 if (qstate->kevent_watermark == 0)
311 qstate->kevent_watermark = sizeof(size_t) * 3;
313 init_comm_element(&qstate->request, CET_WRITE_REQUEST);
314 write_request = get_cache_write_request(&qstate->request);
316 result = qstate->read_func(qstate, &write_request->entry_length,
318 result += qstate->read_func(qstate,
319 &write_request->cache_key_size, sizeof(size_t));
320 result += qstate->read_func(qstate,
321 &write_request->data_size, sizeof(size_t));
323 if (result != sizeof(size_t) * 3) {
324 TRACE_OUT(on_write_request_read1);
328 if (BUFSIZE_INVALID(write_request->entry_length) ||
329 BUFSIZE_INVALID(write_request->cache_key_size) ||
330 (BUFSIZE_INVALID(write_request->data_size) &&
331 (write_request->data_size != 0))) {
332 TRACE_OUT(on_write_request_read1);
336 write_request->entry = calloc(1,
337 write_request->entry_length + 1);
338 assert(write_request->entry != NULL);
340 write_request->cache_key = calloc(1,
341 write_request->cache_key_size +
342 qstate->eid_str_length);
343 assert(write_request->cache_key != NULL);
344 memcpy(write_request->cache_key, qstate->eid_str,
345 qstate->eid_str_length);
347 if (write_request->data_size != 0) {
348 write_request->data = calloc(1,
349 write_request->data_size);
350 assert(write_request->data != NULL);
353 qstate->kevent_watermark = write_request->entry_length +
354 write_request->cache_key_size +
355 write_request->data_size;
356 qstate->process_func = on_write_request_read2;
359 TRACE_OUT(on_write_request_read1);
364 on_write_request_read2(struct query_state *qstate)
366 struct cache_write_request *write_request;
369 TRACE_IN(on_write_request_read2);
370 write_request = get_cache_write_request(&qstate->request);
372 result = qstate->read_func(qstate, write_request->entry,
373 write_request->entry_length);
374 result += qstate->read_func(qstate, write_request->cache_key +
375 qstate->eid_str_length, write_request->cache_key_size);
376 if (write_request->data_size != 0)
377 result += qstate->read_func(qstate, write_request->data,
378 write_request->data_size);
380 if (result != (ssize_t)qstate->kevent_watermark) {
381 TRACE_OUT(on_write_request_read2);
384 write_request->cache_key_size += qstate->eid_str_length;
386 qstate->kevent_watermark = 0;
387 if (write_request->data_size != 0)
388 qstate->process_func = on_write_request_process;
390 qstate->process_func = on_negative_write_request_process;
391 TRACE_OUT(on_write_request_read2);
396 on_write_request_process(struct query_state *qstate)
398 struct cache_write_request *write_request;
399 struct cache_write_response *write_response;
402 TRACE_IN(on_write_request_process);
403 init_comm_element(&qstate->response, CET_WRITE_RESPONSE);
404 write_response = get_cache_write_response(&qstate->response);
405 write_request = get_cache_write_request(&qstate->request);
407 qstate->config_entry = configuration_find_entry(
408 s_configuration, write_request->entry);
410 if (qstate->config_entry == NULL) {
411 write_response->error_code = ENOENT;
413 LOG_ERR_2("write_request", "can't find configuration"
414 " entry '%s'. aborting request", write_request->entry);
418 if (qstate->config_entry->enabled == 0) {
419 write_response->error_code = EACCES;
421 LOG_ERR_2("write_request",
422 "configuration entry '%s' is disabled",
423 write_request->entry);
427 if (qstate->config_entry->perform_actual_lookups != 0) {
428 write_response->error_code = EOPNOTSUPP;
430 LOG_ERR_2("write_request",
431 "entry '%s' performs lookups by itself: "
432 "can't write to it", write_request->entry);
436 configuration_lock_rdlock(s_configuration);
437 c_entry = find_cache_entry(s_cache,
438 qstate->config_entry->positive_cache_params.cep.entry_name);
439 configuration_unlock(s_configuration);
440 if (c_entry != NULL) {
441 configuration_lock_entry(qstate->config_entry, CELT_POSITIVE);
442 qstate->config_entry->positive_cache_entry = c_entry;
443 write_response->error_code = cache_write(c_entry,
444 write_request->cache_key,
445 write_request->cache_key_size,
447 write_request->data_size);
448 configuration_unlock_entry(qstate->config_entry, CELT_POSITIVE);
450 if ((qstate->config_entry->common_query_timeout.tv_sec != 0) ||
451 (qstate->config_entry->common_query_timeout.tv_usec != 0))
452 memcpy(&qstate->timeout,
453 &qstate->config_entry->common_query_timeout,
454 sizeof(struct timeval));
457 write_response->error_code = -1;
460 qstate->kevent_filter = EVFILT_WRITE;
461 qstate->kevent_watermark = sizeof(int);
462 qstate->process_func = on_write_response_write1;
464 TRACE_OUT(on_write_request_process);
469 on_negative_write_request_process(struct query_state *qstate)
471 struct cache_write_request *write_request;
472 struct cache_write_response *write_response;
475 TRACE_IN(on_negative_write_request_process);
476 init_comm_element(&qstate->response, CET_WRITE_RESPONSE);
477 write_response = get_cache_write_response(&qstate->response);
478 write_request = get_cache_write_request(&qstate->request);
480 qstate->config_entry = configuration_find_entry (
481 s_configuration, write_request->entry);
483 if (qstate->config_entry == NULL) {
484 write_response->error_code = ENOENT;
486 LOG_ERR_2("negative_write_request",
487 "can't find configuration"
488 " entry '%s'. aborting request", write_request->entry);
492 if (qstate->config_entry->enabled == 0) {
493 write_response->error_code = EACCES;
495 LOG_ERR_2("negative_write_request",
496 "configuration entry '%s' is disabled",
497 write_request->entry);
501 if (qstate->config_entry->perform_actual_lookups != 0) {
502 write_response->error_code = EOPNOTSUPP;
504 LOG_ERR_2("negative_write_request",
505 "entry '%s' performs lookups by itself: "
506 "can't write to it", write_request->entry);
509 #ifdef NS_NSCD_EID_CHECKING
510 if (check_query_eids(qstate) != 0) {
511 write_response->error_code = EPERM;
517 configuration_lock_rdlock(s_configuration);
518 c_entry = find_cache_entry(s_cache,
519 qstate->config_entry->negative_cache_params.cep.entry_name);
520 configuration_unlock(s_configuration);
521 if (c_entry != NULL) {
522 configuration_lock_entry(qstate->config_entry, CELT_NEGATIVE);
523 qstate->config_entry->negative_cache_entry = c_entry;
524 write_response->error_code = cache_write(c_entry,
525 write_request->cache_key,
526 write_request->cache_key_size,
528 sizeof(negative_data));
529 configuration_unlock_entry(qstate->config_entry, CELT_NEGATIVE);
531 if ((qstate->config_entry->common_query_timeout.tv_sec != 0) ||
532 (qstate->config_entry->common_query_timeout.tv_usec != 0))
533 memcpy(&qstate->timeout,
534 &qstate->config_entry->common_query_timeout,
535 sizeof(struct timeval));
537 write_response->error_code = -1;
540 qstate->kevent_filter = EVFILT_WRITE;
541 qstate->kevent_watermark = sizeof(int);
542 qstate->process_func = on_write_response_write1;
544 TRACE_OUT(on_negative_write_request_process);
549 on_write_response_write1(struct query_state *qstate)
551 struct cache_write_response *write_response;
554 TRACE_IN(on_write_response_write1);
555 write_response = get_cache_write_response(&qstate->response);
556 result = qstate->write_func(qstate, &write_response->error_code,
558 if (result != sizeof(int)) {
559 TRACE_OUT(on_write_response_write1);
563 finalize_comm_element(&qstate->request);
564 finalize_comm_element(&qstate->response);
566 qstate->kevent_watermark = sizeof(int);
567 qstate->kevent_filter = EVFILT_READ;
568 qstate->process_func = on_rw_mapper;
570 TRACE_OUT(on_write_response_write1);
575 * The functions below are used to process read requests.
576 * - on_read_request_read1 and on_read_request_read2 read the request itself
577 * - on_read_request_process processes it
578 * - on_read_response_write1 and on_read_response_write2 send the response
581 on_read_request_read1(struct query_state *qstate)
583 struct cache_read_request *read_request;
586 TRACE_IN(on_read_request_read1);
587 if (qstate->kevent_watermark == 0)
588 qstate->kevent_watermark = sizeof(size_t) * 2;
590 init_comm_element(&qstate->request, CET_READ_REQUEST);
591 read_request = get_cache_read_request(&qstate->request);
593 result = qstate->read_func(qstate,
594 &read_request->entry_length, sizeof(size_t));
595 result += qstate->read_func(qstate,
596 &read_request->cache_key_size, sizeof(size_t));
598 if (result != sizeof(size_t) * 2) {
599 TRACE_OUT(on_read_request_read1);
603 if (BUFSIZE_INVALID(read_request->entry_length) ||
604 BUFSIZE_INVALID(read_request->cache_key_size)) {
605 TRACE_OUT(on_read_request_read1);
609 read_request->entry = calloc(1,
610 read_request->entry_length + 1);
611 assert(read_request->entry != NULL);
613 read_request->cache_key = calloc(1,
614 read_request->cache_key_size +
615 qstate->eid_str_length);
616 assert(read_request->cache_key != NULL);
617 memcpy(read_request->cache_key, qstate->eid_str,
618 qstate->eid_str_length);
620 qstate->kevent_watermark = read_request->entry_length +
621 read_request->cache_key_size;
622 qstate->process_func = on_read_request_read2;
625 TRACE_OUT(on_read_request_read1);
630 on_read_request_read2(struct query_state *qstate)
632 struct cache_read_request *read_request;
635 TRACE_IN(on_read_request_read2);
636 read_request = get_cache_read_request(&qstate->request);
638 result = qstate->read_func(qstate, read_request->entry,
639 read_request->entry_length);
640 result += qstate->read_func(qstate,
641 read_request->cache_key + qstate->eid_str_length,
642 read_request->cache_key_size);
644 if (result != (ssize_t)qstate->kevent_watermark) {
645 TRACE_OUT(on_read_request_read2);
648 read_request->cache_key_size += qstate->eid_str_length;
650 qstate->kevent_watermark = 0;
651 qstate->process_func = on_read_request_process;
653 TRACE_OUT(on_read_request_read2);
658 on_read_request_process(struct query_state *qstate)
660 struct cache_read_request *read_request;
661 struct cache_read_response *read_response;
662 cache_entry c_entry, neg_c_entry;
664 struct agent *lookup_agent;
665 struct common_agent *c_agent;
668 TRACE_IN(on_read_request_process);
669 init_comm_element(&qstate->response, CET_READ_RESPONSE);
670 read_response = get_cache_read_response(&qstate->response);
671 read_request = get_cache_read_request(&qstate->request);
673 qstate->config_entry = configuration_find_entry(
674 s_configuration, read_request->entry);
675 if (qstate->config_entry == NULL) {
676 read_response->error_code = ENOENT;
678 LOG_ERR_2("read_request",
679 "can't find configuration "
680 "entry '%s'. aborting request", read_request->entry);
684 if (qstate->config_entry->enabled == 0) {
685 read_response->error_code = EACCES;
687 LOG_ERR_2("read_request",
688 "configuration entry '%s' is disabled",
689 read_request->entry);
694 * if we perform lookups by ourselves, then we don't need to separate
695 * cache entries by euid and egid
697 if (qstate->config_entry->perform_actual_lookups != 0)
698 memset(read_request->cache_key, 0, qstate->eid_str_length);
700 #ifdef NS_NSCD_EID_CHECKING
701 if (check_query_eids(qstate) != 0) {
702 /* if the lookup is not self-performing, we check for clients euid/egid */
703 read_response->error_code = EPERM;
709 configuration_lock_rdlock(s_configuration);
710 c_entry = find_cache_entry(s_cache,
711 qstate->config_entry->positive_cache_params.cep.entry_name);
712 neg_c_entry = find_cache_entry(s_cache,
713 qstate->config_entry->negative_cache_params.cep.entry_name);
714 configuration_unlock(s_configuration);
715 if ((c_entry != NULL) && (neg_c_entry != NULL)) {
716 configuration_lock_entry(qstate->config_entry, CELT_POSITIVE);
717 qstate->config_entry->positive_cache_entry = c_entry;
718 read_response->error_code = cache_read(c_entry,
719 read_request->cache_key,
720 read_request->cache_key_size, NULL,
721 &read_response->data_size);
723 if (read_response->error_code == -2) {
724 read_response->data = malloc(
725 read_response->data_size);
726 assert(read_response->data != NULL);
727 read_response->error_code = cache_read(c_entry,
728 read_request->cache_key,
729 read_request->cache_key_size,
731 &read_response->data_size);
733 configuration_unlock_entry(qstate->config_entry, CELT_POSITIVE);
735 configuration_lock_entry(qstate->config_entry, CELT_NEGATIVE);
736 qstate->config_entry->negative_cache_entry = neg_c_entry;
737 if (read_response->error_code == -1) {
738 read_response->error_code = cache_read(neg_c_entry,
739 read_request->cache_key,
740 read_request->cache_key_size, NULL,
741 &read_response->data_size);
743 if (read_response->error_code == -2) {
744 read_response->data = malloc(
745 read_response->data_size);
746 assert(read_response->data != NULL);
747 read_response->error_code = cache_read(neg_c_entry,
748 read_request->cache_key,
749 read_request->cache_key_size,
751 &read_response->data_size);
754 configuration_unlock_entry(qstate->config_entry, CELT_NEGATIVE);
756 if ((read_response->error_code == -1) &&
757 (qstate->config_entry->perform_actual_lookups != 0)) {
758 free(read_response->data);
759 read_response->data = NULL;
760 read_response->data_size = 0;
762 lookup_agent = find_agent(s_agent_table,
763 read_request->entry, COMMON_AGENT);
765 if ((lookup_agent != NULL) &&
766 (lookup_agent->type == COMMON_AGENT)) {
767 c_agent = (struct common_agent *)lookup_agent;
768 res = c_agent->lookup_func(
769 read_request->cache_key +
770 qstate->eid_str_length,
771 read_request->cache_key_size -
772 qstate->eid_str_length,
773 &read_response->data,
774 &read_response->data_size);
776 if (res == NS_SUCCESS) {
777 read_response->error_code = 0;
778 configuration_lock_entry(
779 qstate->config_entry,
782 read_request->cache_key,
783 read_request->cache_key_size,
785 read_response->data_size);
786 configuration_unlock_entry(
787 qstate->config_entry,
789 } else if ((res == NS_NOTFOUND) ||
790 (res == NS_RETURN)) {
791 configuration_lock_entry(
792 qstate->config_entry,
794 cache_write(neg_c_entry,
795 read_request->cache_key,
796 read_request->cache_key_size,
798 sizeof(negative_data));
799 configuration_unlock_entry(
800 qstate->config_entry,
803 read_response->error_code = 0;
804 read_response->data = NULL;
805 read_response->data_size = 0;
810 if ((qstate->config_entry->common_query_timeout.tv_sec != 0) ||
811 (qstate->config_entry->common_query_timeout.tv_usec != 0))
812 memcpy(&qstate->timeout,
813 &qstate->config_entry->common_query_timeout,
814 sizeof(struct timeval));
816 read_response->error_code = -1;
819 qstate->kevent_filter = EVFILT_WRITE;
820 if (read_response->error_code == 0)
821 qstate->kevent_watermark = sizeof(int) + sizeof(size_t);
823 qstate->kevent_watermark = sizeof(int);
824 qstate->process_func = on_read_response_write1;
826 TRACE_OUT(on_read_request_process);
831 on_read_response_write1(struct query_state *qstate)
833 struct cache_read_response *read_response;
836 TRACE_IN(on_read_response_write1);
837 read_response = get_cache_read_response(&qstate->response);
839 result = qstate->write_func(qstate, &read_response->error_code,
842 if (read_response->error_code == 0) {
843 result += qstate->write_func(qstate, &read_response->data_size,
845 if (result != (ssize_t)qstate->kevent_watermark) {
846 TRACE_OUT(on_read_response_write1);
850 qstate->kevent_watermark = read_response->data_size;
851 qstate->process_func = on_read_response_write2;
853 if (result != (ssize_t)qstate->kevent_watermark) {
854 TRACE_OUT(on_read_response_write1);
858 qstate->kevent_watermark = 0;
859 qstate->process_func = NULL;
862 TRACE_OUT(on_read_response_write1);
867 on_read_response_write2(struct query_state *qstate)
869 struct cache_read_response *read_response;
872 TRACE_IN(on_read_response_write2);
873 read_response = get_cache_read_response(&qstate->response);
874 if (read_response->data_size > 0) {
875 result = qstate->write_func(qstate, read_response->data,
876 read_response->data_size);
877 if (result != (ssize_t)qstate->kevent_watermark) {
878 TRACE_OUT(on_read_response_write2);
883 finalize_comm_element(&qstate->request);
884 finalize_comm_element(&qstate->response);
886 qstate->kevent_watermark = sizeof(int);
887 qstate->kevent_filter = EVFILT_READ;
888 qstate->process_func = on_rw_mapper;
889 TRACE_OUT(on_read_response_write2);
894 * The functions below are used to process write requests.
895 * - on_transform_request_read1 and on_transform_request_read2 read the
897 * - on_transform_request_process processes it
898 * - on_transform_response_write1 sends the response
901 on_transform_request_read1(struct query_state *qstate)
903 struct cache_transform_request *transform_request;
906 TRACE_IN(on_transform_request_read1);
907 if (qstate->kevent_watermark == 0)
908 qstate->kevent_watermark = sizeof(size_t) + sizeof(int);
910 init_comm_element(&qstate->request, CET_TRANSFORM_REQUEST);
912 get_cache_transform_request(&qstate->request);
914 result = qstate->read_func(qstate,
915 &transform_request->entry_length, sizeof(size_t));
916 result += qstate->read_func(qstate,
917 &transform_request->transformation_type, sizeof(int));
919 if (result != sizeof(size_t) + sizeof(int)) {
920 TRACE_OUT(on_transform_request_read1);
924 if ((transform_request->transformation_type != TT_USER) &&
925 (transform_request->transformation_type != TT_ALL)) {
926 TRACE_OUT(on_transform_request_read1);
930 if (transform_request->entry_length != 0) {
931 if (BUFSIZE_INVALID(transform_request->entry_length)) {
932 TRACE_OUT(on_transform_request_read1);
936 transform_request->entry = calloc(1,
937 transform_request->entry_length + 1);
938 assert(transform_request->entry != NULL);
940 qstate->process_func = on_transform_request_read2;
942 qstate->process_func = on_transform_request_process;
944 qstate->kevent_watermark = transform_request->entry_length;
947 TRACE_OUT(on_transform_request_read1);
952 on_transform_request_read2(struct query_state *qstate)
954 struct cache_transform_request *transform_request;
957 TRACE_IN(on_transform_request_read2);
958 transform_request = get_cache_transform_request(&qstate->request);
960 result = qstate->read_func(qstate, transform_request->entry,
961 transform_request->entry_length);
963 if (result != (ssize_t)qstate->kevent_watermark) {
964 TRACE_OUT(on_transform_request_read2);
968 qstate->kevent_watermark = 0;
969 qstate->process_func = on_transform_request_process;
971 TRACE_OUT(on_transform_request_read2);
976 on_transform_request_process(struct query_state *qstate)
978 struct cache_transform_request *transform_request;
979 struct cache_transform_response *transform_response;
980 struct configuration_entry *config_entry;
983 TRACE_IN(on_transform_request_process);
984 init_comm_element(&qstate->response, CET_TRANSFORM_RESPONSE);
985 transform_response = get_cache_transform_response(&qstate->response);
986 transform_request = get_cache_transform_request(&qstate->request);
988 switch (transform_request->transformation_type) {
990 if (transform_request->entry == NULL) {
991 size = configuration_get_entries_size(s_configuration);
992 for (i = 0; i < size; ++i) {
993 config_entry = configuration_get_entry(
996 if (config_entry->perform_actual_lookups == 0)
997 clear_config_entry_part(config_entry,
998 qstate->eid_str, qstate->eid_str_length);
1001 qstate->config_entry = configuration_find_entry(
1002 s_configuration, transform_request->entry);
1004 if (qstate->config_entry == NULL) {
1005 LOG_ERR_2("transform_request",
1006 "can't find configuration"
1007 " entry '%s'. aborting request",
1008 transform_request->entry);
1009 transform_response->error_code = -1;
1013 if (qstate->config_entry->perform_actual_lookups != 0) {
1014 LOG_ERR_2("transform_request",
1015 "can't transform the cache entry %s"
1016 ", because it ised for actual lookups",
1017 transform_request->entry);
1018 transform_response->error_code = -1;
1022 clear_config_entry_part(qstate->config_entry,
1023 qstate->eid_str, qstate->eid_str_length);
1027 if (qstate->euid != 0)
1028 transform_response->error_code = -1;
1030 if (transform_request->entry == NULL) {
1031 size = configuration_get_entries_size(
1033 for (i = 0; i < size; ++i) {
1035 configuration_get_entry(
1036 s_configuration, i));
1039 qstate->config_entry = configuration_find_entry(
1041 transform_request->entry);
1043 if (qstate->config_entry == NULL) {
1044 LOG_ERR_2("transform_request",
1045 "can't find configuration"
1046 " entry '%s'. aborting request",
1047 transform_request->entry);
1048 transform_response->error_code = -1;
1052 clear_config_entry(qstate->config_entry);
1057 transform_response->error_code = -1;
1061 qstate->kevent_watermark = 0;
1062 qstate->process_func = on_transform_response_write1;
1063 TRACE_OUT(on_transform_request_process);
1068 on_transform_response_write1(struct query_state *qstate)
1070 struct cache_transform_response *transform_response;
1073 TRACE_IN(on_transform_response_write1);
1074 transform_response = get_cache_transform_response(&qstate->response);
1075 result = qstate->write_func(qstate, &transform_response->error_code,
1077 if (result != sizeof(int)) {
1078 TRACE_OUT(on_transform_response_write1);
1082 finalize_comm_element(&qstate->request);
1083 finalize_comm_element(&qstate->response);
1085 qstate->kevent_watermark = 0;
1086 qstate->process_func = NULL;
1087 TRACE_OUT(on_transform_response_write1);
1092 * Checks if the client's euid and egid do not differ from its uid and gid.
1093 * Returns 0 on success.
1096 check_query_eids(struct query_state *qstate)
1099 return ((qstate->uid != qstate->euid) || (qstate->gid != qstate->egid) ? -1 : 0);
1103 * Uses the qstate fields to process an "alternate" read - when the buffer is
1104 * too large to be received during one socket read operation
1107 query_io_buffer_read(struct query_state *qstate, void *buf, size_t nbytes)
1112 TRACE_IN(query_io_buffer_read);
1113 if ((qstate->io_buffer_size == 0) || (qstate->io_buffer == NULL))
1116 assert(qstate->io_buffer_p <=
1117 qstate->io_buffer + qstate->io_buffer_size);
1118 remaining = qstate->io_buffer + qstate->io_buffer_size -
1119 qstate->io_buffer_p;
1120 if (nbytes < remaining)
1125 memcpy(buf, qstate->io_buffer_p, result);
1126 qstate->io_buffer_p += result;
1128 if (remaining == 0) {
1129 free(qstate->io_buffer);
1130 qstate->io_buffer = NULL;
1132 qstate->write_func = query_socket_write;
1133 qstate->read_func = query_socket_read;
1136 TRACE_OUT(query_io_buffer_read);
1141 * Uses the qstate fields to process an "alternate" write - when the buffer is
1142 * too large to be sent during one socket write operation
1145 query_io_buffer_write(struct query_state *qstate, const void *buf,
1151 TRACE_IN(query_io_buffer_write);
1152 if ((qstate->io_buffer_size == 0) || (qstate->io_buffer == NULL))
1155 assert(qstate->io_buffer_p <=
1156 qstate->io_buffer + qstate->io_buffer_size);
1157 remaining = qstate->io_buffer + qstate->io_buffer_size -
1158 qstate->io_buffer_p;
1159 if (nbytes < remaining)
1164 memcpy(qstate->io_buffer_p, buf, result);
1165 qstate->io_buffer_p += result;
1167 if (remaining == 0) {
1168 qstate->use_alternate_io = 1;
1169 qstate->io_buffer_p = qstate->io_buffer;
1171 qstate->write_func = query_socket_write;
1172 qstate->read_func = query_socket_read;
1175 TRACE_OUT(query_io_buffer_write);
1180 * The default "read" function, which reads data directly from socket
1183 query_socket_read(struct query_state *qstate, void *buf, size_t nbytes)
1187 TRACE_IN(query_socket_read);
1188 if (qstate->socket_failed != 0) {
1189 TRACE_OUT(query_socket_read);
1193 result = read(qstate->sockfd, buf, nbytes);
1194 if (result < 0 || (size_t)result < nbytes)
1195 qstate->socket_failed = 1;
1197 TRACE_OUT(query_socket_read);
1202 * The default "write" function, which writes data directly to socket
1205 query_socket_write(struct query_state *qstate, const void *buf, size_t nbytes)
1209 TRACE_IN(query_socket_write);
1210 if (qstate->socket_failed != 0) {
1211 TRACE_OUT(query_socket_write);
1215 result = write(qstate->sockfd, buf, nbytes);
1216 if (result < 0 || (size_t)result < nbytes)
1217 qstate->socket_failed = 1;
1219 TRACE_OUT(query_socket_write);
1224 * Initializes the query_state structure by filling it with the default values.
1226 struct query_state *
1227 init_query_state(int sockfd, size_t kevent_watermark, uid_t euid, gid_t egid)
1229 struct query_state *retval;
1231 TRACE_IN(init_query_state);
1232 retval = calloc(1, sizeof(*retval));
1233 assert(retval != NULL);
1235 retval->sockfd = sockfd;
1236 retval->kevent_filter = EVFILT_READ;
1237 retval->kevent_watermark = kevent_watermark;
1239 retval->euid = euid;
1240 retval->egid = egid;
1241 retval->uid = retval->gid = -1;
1243 if (asprintf(&retval->eid_str, "%d_%d_", retval->euid,
1244 retval->egid) == -1) {
1248 retval->eid_str_length = strlen(retval->eid_str);
1250 init_comm_element(&retval->request, CET_UNDEFINED);
1251 init_comm_element(&retval->response, CET_UNDEFINED);
1252 retval->process_func = on_query_startup;
1253 retval->destroy_func = on_query_destroy;
1255 retval->write_func = query_socket_write;
1256 retval->read_func = query_socket_read;
1258 get_time_func(&retval->creation_time);
1259 retval->timeout.tv_sec = s_configuration->query_timeout;
1260 retval->timeout.tv_usec = 0;
1262 TRACE_OUT(init_query_state);
1267 destroy_query_state(struct query_state *qstate)
1270 TRACE_IN(destroy_query_state);
1271 if (qstate->eid_str != NULL)
1272 free(qstate->eid_str);
1274 if (qstate->io_buffer != NULL)
1275 free(qstate->io_buffer);
1277 qstate->destroy_func(qstate);
1279 TRACE_OUT(destroy_query_state);