2 static char sfs_c_bioSid[] = "@(#)sfs_c_bio.c 2.1 97/10/23";
6 * Copyright (c) 1992-1997,2001 by Standard Performance Evaluation Corporation
8 * Standard Performance Evaluation Corporation (SPEC)
9 * 6585 Merchant Place, Suite 100
12 * This product contains benchmarks acquired from several sources who
13 * understand and agree with SPEC's goal of creating fair and objective
14 * benchmarks to measure computer performance.
16 * This copyright notice is placed here only to protect SPEC in the
17 * event the source is misused in any manner that is contrary to the
18 * spirit, the goals and the intent of SPEC.
20 * The source code is provided to the user or company under the license
21 * agreement for the SPEC Benchmark Suite for this product.
25 * ---------------------- sfs_c_bio.c ---------------------
27 * Routines that attempt to simulate biod behavior
29 * The routines contained here model biod behavior. Simply call
30 * biod_init() to replace regular calls to op_read() and op_write()
31 * with calls to op_biod_read() and op_biod_write(). The variables
32 * max_out_writes and max_out_reads control the maximum number of
33 * outstanding writes and reads respectively.
36 * int biod_init(int, int);
37 * void biod_turn_on(void);
38 * void op_biod_write(int, int, int);
39 * void op_biod_read(int);
42 * uint32_t biod_clnt_call(CLIENT *, uint32_t,
44 * struct biod_req *biod_get_reply(CLIENT *, xdrproc_t,
45 * void *, struct timeval *);
46 * int biod_poll_wait(CLIENT *, uint32_t);
50 * History now kept in SCCS
51 * 03-Mar-92 0.1.0 Corbin
56 * ------------------------- Include Files -------------------------
67 #include <sys/types.h>
72 #include "sfs_c_def.h"
73 #include "rfs_c_def.h"
76 * Information associated with outstanding read/write requests
80 uint32_t xid; /* RPC transmission ID */
81 bool_t in_use; /* Indicates if the entry is in use */
82 int dep_tab_index; /* corresponding index in dep_tab */
83 unsigned int count; /* Count saved for Dump routines */
84 unsigned int offset; /* Offset saved for Dump routines */
85 struct ladtime start; /* Time RPC call was made */
86 struct ladtime stop; /* Time RPC reply was received */
87 struct ladtime timeout; /* Time RPC call will time out */
92 * ---------------------- Static Declarations ----------------------
95 static int max_out_writes;
96 static int max_out_reads;
97 int max_biod_reqs = 0;
98 struct biod_req *biod_reqp;
100 /* forward definitions for local functions */
101 extern uint32_t biod_clnt_call(CLIENT *, uint32_t, xdrproc_t, void *);
102 static struct biod_req *biod_get_reply(CLIENT *, xdrproc_t,
103 void *, struct timeval *);
104 extern int biod_poll_wait(CLIENT *, uint32_t);
106 static int op_biod_write(int, int, int);
107 static int op_biod_read(int);
110 * ---------------------- BIOD Support Routines ----------------------
116 * This function is called during the initialization phase. It performs
117 * the following tasks:
118 * - Allocate memory to hold outstanding biod request information
120 * Returns 0 for OK, -1 for failure
127 // RFS max_out_writes = MAXIMUM(1, out_writes);
128 // RFS max_out_reads = MAXIMUM(1, out_reads);
129 // RFS max_biod_reqs = MAXIMUM(out_writes, out_reads);
130 max_biod_reqs = MAX_OUTSTANDING_REQ; // RFS
132 biod_reqp = (struct biod_req *) calloc(max_biod_reqs,
133 sizeof (struct biod_req));
134 if (biod_reqp == (struct biod_req *)0) {
135 (void) fprintf(stderr, "%s: biod_init calloc failed.\n", sfs_Myname);
136 (void) fflush(stderr);
146 * - Change the operation functions for reads and writes to use the
147 * biod routines. This step should be done last to allow callers
148 * to still run with the old op functions if the biod initialization
154 Ops[WRITE].funct = op_biod_write;
155 Ops[READ].funct = op_biod_read;
163 * This function is called during the termination phase to free any resources
164 * allocated by the biod_init() routine. It performs the following tasks:
165 * - Frees memory associated with outstanding biod request information
166 * - Frees the biod client handle
178 * Perform and RPC biod style write operation of length 'xfer_size'.
179 * If 'append_flag' is true, then write the data to the end of the file.
187 sfs_op_type *op_ptr; /* per operation info */
188 static char *buf = NULL; /* the data buffer */
189 unsigned int size; /* size of data write */
191 attrstat reply2; /* the reply */
193 WRITE3res reply3; /* the reply */
195 struct ladtime curr_time;
196 struct ladtime tmp_time;
197 struct ladtime call_timeout;
198 struct biod_req *reqp;
199 int ret; /* ret val == call success */
200 int num_out_reqs; /* # of outstanding writes */
204 static int calls = 0;
208 if (nfs_version != NFS_VERSION && nfs_version != NFS_V3)
212 * Initialize write buffer to known value
215 buf = init_write_buffer();
220 * For now we treat DATA_SYNC to be the same as FILE_SYNC.
221 * If it is not a V3 op then it must always be stable
223 if (stab_flag == DATA_SYNC || nfs_version != NFS_V3)
224 stab_flag = FILE_SYNC;
226 op_ptr = &Ops[WRITE];
229 /* set up the arguments */
230 (void) memmove((char *) &args2.file, (char *) &Cur_file_ptr->fh2,
232 (void) memmove((char *) &args3.file, (char *) &Cur_file_ptr->fh3,
235 args2.beginoffset = 0; /* unused */
237 if (append_flag == 1) {
238 args2.offset = Cur_file_ptr->attributes2.size;
239 args3.offset = Cur_file_ptr->attributes3.size;
241 if (fh_size(Cur_file_ptr) > xfer_size) {
242 offset = Bytes_per_block * (sfs_random() %
243 (((fh_size(Cur_file_ptr) - xfer_size)
244 / Bytes_per_block) + 1));
245 args2.offset = offset;
246 args3.offset._p._u = 0;
247 args3.offset._p._l = offset;
250 args3.offset._p._u = args3.offset._p._l = 0;
254 size = Bytes_per_block;
255 args2.totalcount = size; /* unused */
256 args2.data.data_len = size;
257 args2.data.data_val = buf;
258 args3.data.data_len = size;
259 args3.data.data_val = buf;
261 args3.stable = stab_flag;
263 /* Calculate the number of NFS writes required */
264 max_cnt = xfer_size / Bytes_per_block;
265 if ((xfer_size % Bytes_per_block) != 0) {
269 /* check our stats to see if this would overflow */
271 if (op_ptr->target_calls > 0 &&
272 (op_ptr->results.good_calls + max_cnt) > op_ptr->target_calls) {
273 max_cnt = op_ptr->target_calls - op_ptr->results.good_calls;
277 if (DEBUG_CHILD_OPS) {
278 (void) fprintf(stderr, "write: %d buffers xfer_size %d\n",
280 (void) fflush(stderr);
283 /* Mark all request slots as not in use */
284 for (reqp = biod_reqp, i = 0; i < max_biod_reqs; i++, reqp++) {
285 reqp->in_use = FALSE;
288 if (Current_test_phase < Warmup_phase) {
289 call_timeout.sec = Nfs_timers[Init].tv_sec;
290 call_timeout.usec = Nfs_timers[Init].tv_usec;
292 call_timeout.sec = Nfs_timers[op_ptr->call_class].tv_sec;
293 call_timeout.usec = Nfs_timers[op_ptr->call_class].tv_usec;
296 /* capture length for possible dump */
297 Dump_length = fh_size(Cur_file_ptr);
299 /* make the call(s) now */
301 while (xfer_size > 0 || num_out_reqs > 0) {
303 * Send out calls async until either the maximum number of outstanding
304 * requests has been reached or there are no more requests to make.
306 while (num_out_reqs < max_out_writes && xfer_size > 0) {
308 /* find an empty write request slot */
309 for (reqp = biod_reqp, i = 0; i < max_out_writes; i++, reqp++) {
310 if (reqp->in_use == FALSE) {
315 if (xfer_size < size) {
317 args2.data.data_len = xfer_size;
318 args2.totalcount = xfer_size; /* unused */
319 args3.data.data_len = xfer_size;
320 args3.count = xfer_size;
324 sfs_gettime(&reqp->start);
325 if (nfs_version == NFS_V3) {
326 reqp->xid = biod_clnt_call(NFS_client,
327 (uint32_t)NFSPROC3_WRITE,
328 xdr_WRITE3args, (char *) &args3);
329 if (reqp->xid != 0) {
330 /* capture count and offset for possible dump */
331 reqp->count = args3.data.data_len;
332 reqp->offset = args3.offset._p._l;
333 reqp->timeout = reqp->start;
334 ADDTIME(reqp->timeout, call_timeout);
339 if (nfs_version == NFS_VERSION) {
340 reqp->xid = biod_clnt_call(NFS_client,
341 (uint32_t)NFSPROC_WRITE,
342 xdr_write, (char *) &args2);
343 if (reqp->xid != 0) {
344 /* capture count and offset for possible dump */
345 reqp->count = args2.data.data_len;
346 reqp->offset = args2.offset;
347 reqp->timeout = reqp->start;
348 ADDTIME(reqp->timeout, call_timeout);
353 if (DEBUG_CHILD_BIOD) {
354 (void) fprintf (stderr,
355 "[%d]:Biod write started xid %x start (%d.%06d) timeo (%d.%06d)\n",
357 reqp->start.sec, reqp->start.usec,
358 reqp->timeout.sec, reqp->timeout.usec);
361 args2.offset += size;
362 args3.offset._p._l += size;
363 if (biod_poll_wait(NFS_client, 0) > 0) {
366 } /* while can make an async call */
369 * Process replies while there is data on the socket buffer.
370 * Just do polls on the select, no sleeping occurs in this loop.
373 error = biod_poll_wait(NFS_client, 0);
376 if (errno == EINTR) {
380 if (DEBUG_CHILD_BIOD) {
381 (void) fprintf(stderr, "%s:[%d]: biod_poll_wait error\n",
383 (void) fflush(stderr);
392 if (nfs_version == NFS_VERSION)
393 reqp = biod_get_reply(NFS_client, xdr_write,
395 &Nfs_timers[op_ptr->call_class]);
396 if (nfs_version == NFS_V3)
397 reqp = biod_get_reply(NFS_client, xdr_WRITE3res,
399 &Nfs_timers[op_ptr->call_class]);
402 * If biod_get_reply returns NULL then we got an RPC
403 * level error, probably a dropped fragment or the
404 * remains of a previous partial request.
406 if (reqp == (struct biod_req *)NULL) {
412 * We have a valid response, check if procedure completed
415 if ((nfs_version == NFS_VERSION &&
416 reply2.status == NFS_OK) ||
417 (nfs_version == NFS_V3 && reply3.status == NFS3_OK)) {
418 Cur_file_ptr->state = Exists;
420 * In updating attributes we may get replies out
421 * of order. We blindly update the attributes
422 * which may cause old attributes to be stored.
423 * XXX We should check for old attributes.
425 if (nfs_version == NFS_VERSION)
426 Cur_file_ptr->attributes2 =
427 reply2.attrstat_u.attributes;
428 if (nfs_version == NFS_V3)
429 Cur_file_ptr->attributes3 =
430 reply3.res_u.ok.file_wcc.after.attr;
431 if (DEBUG_CHILD_RPC) {
432 (void) fprintf(stderr,
433 "%s: WRITE %s %d bytes offset %d \n",
434 sfs_Myname, Cur_filename,
435 reqp->count, reqp->offset);
436 (void) fflush(stderr);
439 /* capture count and offset for possible dump */
440 Dump_count = reqp->count;
441 Dump_offset = reqp->offset;
442 sfs_elapsedtime(op_ptr, &reqp->start, &reqp->stop);
443 op_ptr->results.good_calls++;
444 Ops[TOTAL].results.good_calls++;
446 reqp->in_use = FALSE;
448 if (DEBUG_CHILD_BIOD) {
449 (void) fprintf (stderr,
450 "[%d]:Biod write succeded xid %x start (%d.%06d) timeo (%d.%06d) stop (%d.%06d)\n",
452 reqp->start.sec, reqp->start.usec,
453 reqp->timeout.sec, reqp->timeout.usec,
454 reqp->stop.sec, reqp->stop.usec);
457 op_ptr->results.bad_calls++;
458 Ops[TOTAL].results.bad_calls++;
459 reqp->in_use = FALSE;
461 if (DEBUG_CHILD_BIOD) {
462 (void) fprintf (stderr,
463 "[%d]:Biod write failed xid %x start (%d.%06d) timeo (%d.%06d)\n",
465 reqp->start.sec, reqp->start.usec,
466 reqp->timeout.sec, reqp->timeout.usec);
468 (void) fprintf(stderr,
469 "[%d]:BIOD WRITE FAILED: xid %x",
472 if (nfs_version == NFS_VERSION)
473 (void) fprintf(stderr, " status %d",
475 if (nfs_version == NFS_V3)
476 (void) fprintf(stderr, " status %d",
478 (void) fprintf(stderr, "\n");
483 } while (error > 0 && num_out_reqs > 0);
485 /* Scan for replies that have timed out */
486 if (num_out_reqs > 0) {
487 sfs_gettime(&curr_time);
488 for (reqp = biod_reqp, i = 0; i < max_out_writes; i++, reqp++) {
489 if (reqp->in_use == FALSE) {
492 if (reqp->timeout.sec < curr_time.sec ||
493 (reqp->timeout.sec == curr_time.sec &&
494 reqp->timeout.usec < curr_time.usec)) {
496 op_ptr->results.bad_calls++;
497 Ops[TOTAL].results.bad_calls++;
498 reqp->in_use = FALSE;
500 if (DEBUG_CHILD_BIOD) {
501 (void) fprintf (stderr,
502 "[%d]:Biod write timed out %x start (%d.%06d) timeo (%d.%06d) now (%d.%06d)\n",
504 reqp->start.sec, reqp->start.usec,
505 reqp->timeout.sec, reqp->timeout.usec,
506 curr_time.sec, curr_time.usec);
507 if (biod_poll_wait(NFS_client, 0) > 0) {
508 (void) fprintf(stderr,
509 "[%d]:BIOD WRITE TIMEOUT - data on input queue!\n", calls);
517 * We go to sleep waiting for a reply if all the requests have
518 * been sent and there are outstanding requests, or we cannot
519 * send any more requests.
521 if ((xfer_size <= 0 && num_out_reqs > 0) ||
522 num_out_reqs == max_out_writes) {
524 * Find the next outstanding request that will timeout
525 * and take a time differential to use for the poll timeout.
526 * If the differential is less than zero, then we go to the
527 * top of the loop. Note that we are not picky on errors
528 * returned by select, after the sleep we return to the top
529 * of the loop so extensive error/status checking is not
534 for (reqp = biod_reqp, i = 0; i < max_out_writes; i++, reqp++) {
535 if (reqp->in_use == FALSE) {
538 if (tmp_time.sec == 0 ||
539 (reqp->timeout.sec < tmp_time.sec ||
540 (reqp->timeout.sec == tmp_time.sec &&
541 reqp->timeout.usec < tmp_time.usec))) {
543 tmp_time = reqp->timeout;
546 if (tmp_time.sec == 0 && tmp_time.usec == 0)
548 sfs_gettime(&curr_time);
549 SUBTIME(tmp_time, curr_time);
550 (void) biod_poll_wait(NFS_client,
551 tmp_time.sec * 1000000 + tmp_time.usec);
553 } /* while not done */
557 * If we have not gotten an error and we were asked for an async write
558 * send a commit operation.
560 if (ret && stab_flag != FILE_SYNC)
561 ret += (*Ops[COMMIT].funct)();
565 } /* op_biod_write */
569 * perform an RPC read operation of length 'xfer_size'
575 sfs_op_type *op_ptr; /* per operation info */
576 int max_cnt; /* packet ctrs */
577 char buf[DEFAULT_MAX_BUFSIZE];/* data buffer */
579 readres reply2; /* the reply */
581 READ3res reply3; /* the reply */
583 struct ladtime curr_time;
584 struct ladtime call_timeout;
585 struct ladtime tmp_time;
586 struct biod_req *reqp;
587 int ret; /* ret val == call success */
588 int num_out_reqs; /* # of outstanding writes */
592 static int calls = 0;
596 if (nfs_version != NFS_VERSION && nfs_version != NFS_V3)
602 /* set up the arguments */
603 (void) memmove((char *) &args2.file, (char *) &Cur_file_ptr->fh2,
605 (void) memmove((char *) &args3.file, (char *) &Cur_file_ptr->fh3,
609 * Don't allow a read of less than one block size
611 if (xfer_size < Bytes_per_block)
612 xfer_size = Bytes_per_block;
615 /* Calculate the number of NFS reads required */
616 max_cnt = xfer_size / Bytes_per_block;
617 if ((xfer_size % Bytes_per_block) != 0) {
621 /* check our stats to see if this would overflow */
623 if (op_ptr->target_calls > 0 &&
624 (op_ptr->results.good_calls + max_cnt) > op_ptr->target_calls) {
625 max_cnt = op_ptr->target_calls - op_ptr->results.good_calls;
630 args3.offset._p._l = args3.offset._p._u = 0;
633 * randomly choose an offset that is a multiple of the block size
634 * and constrained by making the transfer fit within the file
636 if (fh_size(Cur_file_ptr) > xfer_size) {
637 offset = Bytes_per_block * (sfs_random() %
638 (((fh_size(Cur_file_ptr) - xfer_size)
639 / Bytes_per_block) + 1));
640 args2.offset = offset;
641 args3.offset._p._u = 0;
642 args3.offset._p._l = offset;
645 size = Bytes_per_block;
648 args2.totalcount = size; /* unused */
650 /* Have lower layers fill in the data directly. */
651 reply2.readres_u.reply.data.data_val = buf;
652 reply3.res_u.ok.data.data_val = buf;
654 if (DEBUG_CHILD_OPS) {
655 (void) fprintf(stderr, "read: %d buffers xfer_size %d\n",
657 (void) fflush(stderr);
660 /* Mark all request slots as not in use */
661 for (reqp = biod_reqp, i = 0; i < max_biod_reqs; i++, reqp++) {
662 reqp->in_use = FALSE;
665 if (Current_test_phase < Warmup_phase) {
666 call_timeout.sec = Nfs_timers[Init].tv_sec;
667 call_timeout.usec = Nfs_timers[Init].tv_usec;
669 call_timeout.sec = Nfs_timers[op_ptr->call_class].tv_sec;
670 call_timeout.usec = Nfs_timers[op_ptr->call_class].tv_usec;
673 /* capture length for possible dump */
674 Dump_length = fh_size(Cur_file_ptr);
676 /* make the call(s) now */
678 while (xfer_size > 0 || num_out_reqs > 0) {
680 * Send out calls async until either the maximum number of outstanding
681 * requests has been reached or there are no more requests to make.
683 while (num_out_reqs < max_out_reads && xfer_size > 0) {
685 /* find an empty read request slot */
686 for (reqp = biod_reqp, i = 0; i < max_out_reads; i++, reqp++) {
687 if (reqp->in_use == FALSE) {
692 if (xfer_size < size) {
694 args2.count = xfer_size;
695 args3.count = xfer_size;
696 args2.totalcount = xfer_size; /* unused */
700 sfs_gettime(&reqp->start);
701 if (nfs_version == NFS_VERSION) {
702 reqp->xid = biod_clnt_call(NFS_client,
703 (uint32_t)NFSPROC_READ,
704 xdr_read, (char *) &args2);
705 if (reqp->xid != 0) {
706 /* capture count and offset for possible dump */
707 reqp->count = args2.count;
708 reqp->offset = args2.offset;
709 reqp->timeout = reqp->start;
710 ADDTIME(reqp->timeout, call_timeout);
714 } else if (nfs_version == NFS_V3) {
715 reqp->xid = biod_clnt_call(NFS_client,
716 (uint32_t)NFSPROC3_READ,
717 xdr_READ3args, (char *) &args3);
718 if (reqp->xid != 0) {
719 /* capture count and offset for possible dump */
720 reqp->count = args3.count;
721 reqp->offset = args3.offset._p._l;
722 reqp->timeout = reqp->start;
723 ADDTIME(reqp->timeout, call_timeout);
729 args2.offset += size;
730 args3.offset._p._l += size;
731 if (biod_poll_wait(NFS_client, 0) > 0) {
734 } /* while can make an async call */
737 * Process replies while there is data on the socket buffer.
738 * Just do polls on the select, no sleeping occurs in this loop.
741 error = biod_poll_wait(NFS_client, 0);
744 if (errno == EINTR) {
748 if (DEBUG_CHILD_BIOD) {
749 (void) fprintf(stderr,
750 "%s:[%d]: biod_poll_wait error\n",
752 (void) fflush(stderr);
761 if (nfs_version == NFS_VERSION)
762 reqp = biod_get_reply(NFS_client, xdr_read,
764 &Nfs_timers[op_ptr->call_class]);
765 if (nfs_version == NFS_V3)
766 reqp = biod_get_reply(NFS_client, xdr_READ3res,
768 &Nfs_timers[op_ptr->call_class]);
771 * If biod_get_reply returns NULL then we got an RPC
772 * level error, probably a dropped fragment or the
773 * remains of a previous partial request.
775 if (reqp == (struct biod_req *)NULL) {
781 * We have a valid response, check if procedure completed
784 if ((nfs_version == NFS_VERSION &&
785 reply2.status == NFS_OK) ||
786 (nfs_version == NFS_V3 &&
787 reply3.status == NFS3_OK)) {
788 Cur_file_ptr->state = Exists;
789 if (DEBUG_CHILD_RPC) {
790 (void) fprintf(stderr, "%s: READ %s %d bytes offset %d\n",
791 sfs_Myname, Cur_filename, reqp->count, reqp->offset);
792 (void) fflush(stderr);
795 * In updating attributes we may get replies out
796 * of order. We blindly update the attributes
797 * which may cause old attributes to be stored.
798 * XXX We should check for old attributes.
800 if (nfs_version == NFS_VERSION) {
801 Cur_file_ptr->attributes2 =
802 reply2.readres_u.reply.attributes;
803 /* capture count and offset for possible dump */
804 Dump_count = reply2.readres_u.reply.data.data_len;
806 if (nfs_version == NFS_V3) {
807 Cur_file_ptr->attributes3 =
808 reply3.res_u.ok.file_attributes.attr;
809 /* capture count and offset for possible dump */
810 Dump_count = reply3.res_u.ok.data.data_len;
813 Dump_offset = reqp->offset;
814 sfs_elapsedtime(op_ptr, &reqp->start, &reqp->stop);
815 op_ptr->results.good_calls++;
816 Ops[TOTAL].results.good_calls++;
818 reqp->in_use = FALSE;
821 op_ptr->results.bad_calls++;
822 Ops[TOTAL].results.bad_calls++;
823 reqp->in_use = FALSE;
826 if (DEBUG_CHILD_BIOD) {
827 (void) fprintf(stderr,
828 "[%d]:BIOD READ FAILED: xid %x",
831 if (nfs_version == NFS_VERSION)
832 (void) fprintf(stderr, " status %d",
834 if (nfs_version == NFS_V3)
835 (void) fprintf(stderr, " status %d",
837 (void) fprintf(stderr, "\n");
842 } while (error > 0 && num_out_reqs > 0);
844 /* Scan for replies that have timed out */
845 if (num_out_reqs > 0) {
846 sfs_gettime(&curr_time);
847 for (reqp = biod_reqp, i = 0; i < max_out_reads; i++, reqp++) {
848 if (reqp->in_use == FALSE) {
851 if (reqp->timeout.sec < curr_time.sec ||
852 (reqp->timeout.sec == curr_time.sec &&
853 reqp->timeout.usec < curr_time.usec)) {
855 op_ptr->results.bad_calls++;
856 Ops[TOTAL].results.bad_calls++;
857 reqp->in_use = FALSE;
859 if (DEBUG_CHILD_BIOD) {
860 (void) fprintf (stderr,
861 "[%d]:Biod read timed out %x (%d.%06d) now (%d.%06d)\n",
863 reqp->timeout.sec, reqp->timeout.usec,
864 curr_time.sec, curr_time.usec);
865 if (biod_poll_wait(NFS_client, 0) > 0) {
866 (void) fprintf(stderr,
867 "[%d]:BIOD READ TIMEOUT - data on input queue!\n", calls);
875 * We go to sleep waiting for a reply if all the requests have
876 * been sent and there are outstanding requests, or we cannot
877 * send any more requests.
879 if ((xfer_size <= 0 && num_out_reqs > 0) ||
880 num_out_reqs == max_out_reads) {
882 * Find the next outstanding request that will timeout
883 * and take a time differential to use for the poll timeout.
884 * If the differential is less than zero, then we go to the
885 * top of the loop. Note that we are not picky on errors
886 * returned by select, after the sleep we return to the top
887 * of the loop so extensive error/status checking is not
892 for (reqp = biod_reqp, i = 0; i < max_out_reads; i++, reqp++) {
893 if (reqp->in_use == FALSE) {
896 if (tmp_time.sec == 0 ||
897 (reqp->timeout.sec < tmp_time.sec ||
898 (reqp->timeout.sec == tmp_time.sec &&
899 reqp->timeout.usec < tmp_time.usec))) {
901 tmp_time = reqp->timeout;
904 if (tmp_time.sec == 0 && tmp_time.usec == 0)
906 sfs_gettime(&curr_time);
907 SUBTIME(tmp_time, curr_time);
908 (void) biod_poll_wait(NFS_client,
909 tmp_time.sec * 1000000 + tmp_time.usec);
911 } /* while not done */
920 * ---------------------- Async RPC Support Routines ----------------------
926 * Returns XID indicating success, 0 indicating failure.
930 CLIENT *clnt_handlep,
935 struct timeval timeout;
939 * Set timeouts to be zero to force message passing semantics.
944 if ((clnt_call(clnt_handlep, proc, xargs, argsp, NULL,
945 &xid, timeout)) != RPC_TIMEDOUT) {
946 clnt_perror(clnt_handlep, "biod_clnt_call failed");
951 } /* biod_clnt_call */
957 * Returns pointer to the biod_req struct entry that a reply was received
958 * for. Returns NULL if an error was detected.
960 * 1) This routine should only be called when it is known that there is
961 * data waiting on the socket.
963 static struct biod_req *
965 CLIENT *clnt_handlep,
974 uint32_t xids[MAX_BIODS];
977 * Load list of valid outstanding xids
979 for (i = 0; i < max_biod_reqs; i++) {
980 if (biod_reqp[i].in_use == TRUE)
981 xids[cnt++] = biod_reqp[i].xid;
987 if ((res = clnt_getreply(clnt_handlep, xresults,
988 resultsp, cnt, xids, &xid, tv)) != RPC_SUCCESS) {
989 if (DEBUG_CHILD_BIOD) {
990 if (res == RPC_CANTDECODERES) {
991 (void) fprintf(stderr, "No xid matched, found %x\n",
999 * Scan to find XID matched in the outstanding request queue.
1001 for (i = 0; i < max_biod_reqs; i++) {
1002 if (biod_reqp[i].in_use == TRUE && biod_reqp[i].xid == xid) {
1003 sfs_gettime(&(biod_reqp[i].stop));
1004 return (&biod_reqp[i]);
1008 return ((struct biod_req *)0);
1009 } /* biod_get_reply */
1014 * Returns -1 on error, 0 for no data available, > 0 to indicate data available
1018 CLIENT *clnt_handlep,
1021 return (clnt_poll(clnt_handlep, usecs));
1022 } /* biod_poll_wait */