2 static char sfs_c_chdSid[] = "@(#)sfs_c_chd.c 2.1 97/10/23";
6 * Copyright (c) 1992-1997,2001 by Standard Performance Evaluation Corporation
8 * Standard Performance Evaluation Corporation (SPEC)
9 * 6585 Merchant Place, Suite 100
12 * This product contains benchmarks acquired from several sources who
13 * understand and agree with SPEC's goal of creating fair and objective
14 * benchmarks to measure computer performance.
16 * This copyright notice is placed here only to protect SPEC in the
17 * event the source is misused in any manner that is contrary to the
18 * spirit, the goals and the intent of SPEC.
20 * The source code is provided to the user or company under the license
21 * agreement for the SPEC Benchmark Suite for this product.
24 /*****************************************************************
26 * Copyright 1991,1992 Legato Systems, Inc. *
27 * Copyright 1991,1992 Auspex Systems, Inc. *
28 * Copyright 1991,1992 Data General Corporation *
29 * Copyright 1991,1992 Digital Equipment Corporation *
30 * Copyright 1991,1992 Interphase Corporation *
31 * Copyright 1991,1992 Sun Microsystems, Inc. *
33 *****************************************************************/
36 * -------------------------- sfs_c_chd.c -------------------------
38 * The sfs child. Routines to initialize child parameters,
39 * initialize test directories, and generate load.
42 * void child(int, float, int, char *);
43 * void init_fileinfo(void);
44 * void init_counters(void);
45 * sfs_fh_type * randfh(int, int, uint_t, sfs_state_type,
47 * int check_access(struct *stat)
48 * int check_fh_access();
51 * void check_call_rate(void);
52 * void init_targets(void);
53 * void init_dirlayout(void);
54 * void init_rpc(void);
55 * void init_testdir(void);
60 * 21-Aug-92 Wittle randfh() uses working set files array.
61 * init_fileinfo() sets up working set.
62 * 02-Jul-92 Teelucksingh Target file size now based on peak load
64 * 04-Jan-92 Pawlowski Added raw data dump hooks.
65 * 16-Dec-91 Wittle Created.
70 * ------------------------- Include Files -------------------------
83 #include <sys/types.h>
88 #include "sfs_c_def.h"
89 #include "sfs_m_def.h"
91 extern struct hostent *Server_hostent;
93 #define PROB_SCALE 1000L
94 #define _M_MODULUS 2147483647L /* (2**31)-1 */
96 #define _GROUP_DIVISOR 500
97 #define _FILES_PER_GROUP 4
98 #define _MIN_GROUPS 12
99 #define _WORKING_SET_AT_25_OPS_PER_SEC 975
102 * ----------------------- External Definitions -----------------------
105 /* forward definitions for local functions */
106 static void check_call_rate(void);
107 static void init_targets(void);
108 static int init_rpc(void);
109 static void init_testdir(void);
110 static int do_op(void);
112 static void init_dirlayout(void);
116 * ------------------- File Set Size Control -------------------------
118 static uint_t Setattr_borrowed = 0; /* setattr op used for file truncate */
119 static uint_t Create_borrowed = 0; /* create op used for file truncate */
122 * ------------- Per Child Load Generation Rate Variables -----------
124 static float Child_call_load; /* per child call/sec rate */
125 static float Child_req_load; /* per child req/sec rate */
126 static uint_t Calls_this_period; /* calls made during the current run period */
127 static uint_t Calls_this_test; /* calls made during the test so far */
128 static uint_t Reqs_this_period; /* reqs made during the current run period */
129 static uint_t Reqs_this_test; /* reqs made during the test so far */
130 static uint_t Sleep_msec_this_test; /* msec slept during the test so far */
131 static uint_t Sleep_msec_this_period;
132 static uint_t Previous_chkpnt_msec; /* beginning time of current run period */
133 static int Target_sleep_mspc; /* targeted sleep time per call */
134 static int Measurement_in_progress = 0;
136 static sfs_work_set_type Dir_working_set;
137 static sfs_work_set_type Io_working_set;
138 static sfs_work_set_type Non_io_working_set;
139 static sfs_work_set_type Symlink_working_set;
141 static uint_t Files_created = 0; /* unique integer part of file names */
142 static char io_buf[BUFSIZ];
144 (_WORKING_SET_AT_25_OPS_PER_SEC/_GROUP_DIVISOR) * _MIN_GROUPS;
146 * ------------------------- SFS Child -------------------------
151 * Child number 'child_num'. Initialize internal data structure and
152 * the test directory, then notify parent (through log file) that we
153 * are ready to start generating 'load' calls per second into the current
154 * working directory, or optionally, into the directories specified by
155 * 'argc' and 'argv'. Wait for the start signal, and then generate load
156 * until we complete all our goal for calls or until the run time expires,
157 * depending on the 'Timed_run' flag. The run time expires when the parent
158 * sends the stop signal.
168 char namebuf[NFS_MAXNAMLEN]; /* unique name for this program */
170 int i; /* general use */
171 int op_count; /* ops completed during each request */
172 uint_t rand_sleep_msec; /* random sleep msec between calls */
173 uint_t current_msec; /* current test time in msecs */
174 double previous_pcnt;
175 CLIENT * mount_client_ptr; /* Mount client handle */
176 char * mount_point; /* Mount point for remote FS */
180 struct ladtime elapsed_time; /* Current_time - Start_time */
181 sfs_results_report_type report; /* final results log */
183 (void) setvbuf(stderr, io_buf, _IOLBF, BUFSIZ);
185 /* Change my name for error logging */
186 if ((nameptr = strrchr(sfs_Myname, '/')) != NULL)
187 sfs_Myname = ++nameptr;
188 (void) sprintf(namebuf, "%s%d", sfs_Myname, child_num);
189 sfs_Myname = namebuf;
190 Child_call_load = load;
191 Current_test_phase = Mount_phase;
193 /* Seed the random number generator based on my child number */
196 * Note: If random seeds are allocated by the prime client
197 * then this code must change.
199 sfs_srandom((int)(load + Child_num + 1));
201 /* Setup user and group information */
205 * Initialize call and request targets.
206 * Calls are the Over-The-Wire (OTW) operations that occur due to
207 * each request. A request may cause one or more calls.
208 * Initialize the child file info and mount the remote test directory.
209 * Set up the rpc and biod structures.
217 * If the mount point list is equal to the number of procs (P), the
218 * mount point for child M is the M'th entry in the list.
219 * If the mount point list is greater than the number of procs (P), the
220 * mount point for client N child M is ((N - 1) * P) + M
222 if (argc == children)
223 mnt_argc = Child_num;
225 mnt_argc = (Client_num - 1) * children + Child_num;
227 if (mnt_argc >= argc) {
228 (void) fprintf(stderr,
229 "%s: Invalid mount point list: required %d only specified %d mount points\n",
230 sfs_Myname, mnt_argc + 1, argc);
231 (void) generic_kill(0, SIGINT);
235 mount_point = argv[mnt_argc];
238 * May require root priv to perform bindresvport operation
240 mount_client_ptr = lad_getmnt_hand(mount_point);
241 if (mount_client_ptr == NULL) {
246 * should be all done doing priv port stuff
249 if (init_rpc() == -1) {
250 (void) fprintf(stderr, "%s: rpc initialization failed\n", sfs_Myname);
251 (void) generic_kill(0, SIGINT);
256 * finish all priv bindresvport calls
259 if (setuid(Real_uid) != (uid_t)0) {
260 (void) fprintf(stderr,"%s: %s%s", sfs_Myname,
261 "cannot perform setuid operation.\n",
262 "Do `make install` as root.\n");
265 init_mount_point(Child_num, mount_point, mount_client_ptr);
268 * Cleanup client handle for mount point
270 clnt_destroy(mount_client_ptr);
273 * Tell parent I'm ready to initialize my test directory,
274 * wait for the go ahead signal.
276 if (write(Log_fd, "x", 1) != 1) {
277 (void) fprintf(stderr, "%s: can't write to synchronization file %s",
278 sfs_Myname, Logname);
279 (void) generic_kill(0, SIGINT);
284 if (DEBUG_CHILD_GENERAL) {
287 (void) fprintf(stderr,
288 "Child %d loading at %3.2f calls/sec (%3.2f reqs/sec) for %d seconds\n",
289 Child_num, Child_call_load, Child_req_load,
290 Runtime - MULTICLIENT_OFFSET);
293 (void) fprintf(stderr,
294 "Child %d loading at %3.2f calls/sec (%3.2f reqs/sec) for %d seconds\n",
295 Child_num, Child_call_load, Child_req_load, Runtime);
298 (void) fprintf(stderr,
299 "Child %d loading at %3.2f calls/sec (%3.2f reqs/sec) for %d calls\n",
300 Child_num, Child_call_load, Child_req_load,
301 Ops[TOTAL].target_calls);
303 (void) fflush(stderr);
306 /* Initialize the test directory */
307 Current_test_phase = Populate_phase;
311 * activate the biod behaviour if desired
313 if (Biod_max_outstanding_reads > 0 || Biod_max_outstanding_writes > 0) {
318 * Tell parent I'm ready to start test, wait for the go ahead signal.
320 if (write(Log_fd, "x", 1) != 1) {
321 (void) fprintf(stderr, "%s: can't write to synchronization file %s\n",
322 sfs_Myname, Logname);
323 (void) generic_kill(0, SIGINT);
328 if (DEBUG_CHILD_GENERAL) {
331 (void) fprintf(stderr,
332 "Child %d loading at %3.2f calls/sec (%3.2f reqs/sec) for %d seconds\n",
333 Child_num, Child_call_load, Child_req_load,
334 Runtime - MULTICLIENT_OFFSET);
337 (void) fprintf(stderr,
338 "Child %d loading at %3.2f calls/sec (%3.2f reqs/sec) for %d seconds\n",
339 Child_num, Child_call_load, Child_req_load, Runtime);
342 (void) fprintf(stderr,
343 "Child %d loading at %3.2f calls/sec (%3.2f reqs/sec) for %d calls\n",
344 Child_num, Child_call_load, Child_req_load,
345 Ops[TOTAL].target_calls);
347 (void) fflush(stderr);
351 /* Start the warmup phase; initialize operation counters */
352 Current_test_phase = Warmup_phase;
354 Measurement_in_progress = 0;
357 * Compute the average sleep time per call.
358 * Start off with the assumption that we can sleep half the time.
359 * Note: using msec-per-call to adjust sleeping time
360 * limits benchmark load rates to less than 1000 calls-per-sec-per-child.
362 Target_sleep_mspc = (int) (((1000.0 / Child_call_load) / 2.0) + .5);
365 * Occasionally, check to see if ops are being generating at the
366 * correct rate. During the warmup phase, checks are made every 2 seconds.
367 * Hopefully, this will allow the test to reach steady state before the
368 * warmup phase ends. During the timed test run, checks are made every
369 * 10 seconds. The switch is made when we receive the start signal.
371 Msec_per_period = DEFAULT_WARM_RATE_CHECK * 1000;
373 /* Loop generating load */
374 while ((Timed_run && Runtime) ||
376 (Ops[TOTAL].results.good_calls < Ops[TOTAL].target_calls))) {
378 if (start_run_phase) {
380 Measurement_in_progress = 1;
382 * Progress is checked every 10 seconds during the test run.
384 Msec_per_period = DEFAULT_RUN_RATE_CHECK * 1000;
389 /* Do an NFS operation, unless we need to sleep for the whole period. */
390 if (Target_sleep_mspc < Msec_per_period)
395 /* if the call was successful, add op_count to the period total. */
397 Calls_this_period += op_count;
402 * If the call was successful,
403 * or we need to sleep for the whole period,
404 * sleep for a while before doing the next op.
406 if ((op_count > 0) || (Target_sleep_mspc >= Msec_per_period)) {
408 * Sleep for the whole period or
409 * for a random (positive) time period in the range
410 * (Target_sleep_mspc +- 1/2(Target_sleep_mspc)).
412 if (Target_sleep_mspc >= Msec_per_period)
413 rand_sleep_msec = Msec_per_period;
414 else if (Target_sleep_mspc >= 1)
415 rand_sleep_msec = (Target_sleep_mspc >> 1)
416 + (sfs_random() % Target_sleep_mspc);
420 if (rand_sleep_msec != 0) {
421 if (DEBUG_CHILD_TIMING) {
422 (void) fprintf(stderr, "Child %d sleep for %d msec\n",
423 Child_num, rand_sleep_msec);
424 (void) fflush(stderr);
426 Sleep_msec_this_period += msec_sleep(rand_sleep_msec);
431 * See if it's time to check our progress.
432 * If an operation was just performed, then Cur_time was updated
433 * in the op routine; otherwise we need to get Cur_time.
436 sfs_gettime(&Cur_time);
439 current_msec = (Cur_time.sec * 1000) + (Cur_time.usec / 1000);
440 if (DEBUG_CHILD_XPOINT) {
441 (void) fprintf(stderr, "cur=%d prev=%d per=%d\n",
442 current_msec, Previous_chkpnt_msec, Msec_per_period);
445 if ((current_msec - Previous_chkpnt_msec) > Msec_per_period) {
449 } /* end while more calls to make */
452 * We are done generating our part of the load.
453 * Store total time in last slot of counts array.
455 * The last slot has the wall clock time of all the load generation.
456 * Individual slots have the wall clock time spent just for the op
459 sfs_gettime(&Cur_time);
460 Measurement_in_progress = 0;
461 elapsed_time.sec = Cur_time.sec;
462 elapsed_time.usec = Cur_time.usec;
463 SUBTIME(elapsed_time, Starttime);
465 Ops[TOTAL].results.time.sec = elapsed_time.sec;
466 Ops[TOTAL].results.time.usec = elapsed_time.usec;
468 if (DEBUG_CHILD_FILES) {
469 (void) fprintf(stderr,
470 "%s: max fss %d KB min fss %d KB\n",
471 sfs_Myname, Most_fss_bytes, Least_fss_bytes);
472 (void) fflush(stderr);
475 if (DEBUG_CHILD_FILES) {
476 (void) fprintf(stderr, "Child %d Files:\n", Child_num);
477 for (i = 0; i < Num_io_files; i++)
478 (void) fprintf(stderr, "Io[%d] use %d xfer %d\n",
479 i, Io_files[i].use_cnt, Io_files[i].xfer_cnt);
480 for (i = 0; i < Num_non_io_files; i++)
481 (void) fprintf(stderr, "Non_io[%d] use %d xfer %d\n",
482 i, Non_io_files[i].use_cnt,
483 Non_io_files[i].xfer_cnt);
484 for (i = 0; i < Num_dir_files; i++)
485 (void) fprintf(stderr, "Dir[%d] use %d xfer %d\n",
486 i, Dirs[i].use_cnt, Dirs[i].xfer_cnt);
487 for (i = 0; i < Num_symlink_files; i++)
488 (void) fprintf(stderr, "Sym[%d] use %d xfer %d\n",
489 i, Symlinks[i].use_cnt, Symlinks[i].xfer_cnt);
490 (void) fflush(stderr);
493 if (DEBUG_CHILD_SETUP) {
494 int j, group_size, offset, index, tot;
495 for (i = 0; i < Io_working_set.access_group_cnt; i++) {
496 group_size = Io_working_set.access_group_size;
497 if (i < (Num_working_io_files -
498 ((Num_working_io_files / Io_working_set.access_group_cnt)
499 * Io_working_set.access_group_cnt)))
502 for (j = 0; j < group_size; j++) {
503 offset = i + (j * Io_working_set.access_group_cnt);
504 index = Io_working_set.entries[offset].index;
505 tot += Io_files[index].use_cnt;
506 (void) fprintf(stderr, "Working[%d] use %d xfer %d\n",
507 offset, Io_files[index].use_cnt,
508 Io_files[index].xfer_cnt);
510 (void) fprintf(stderr, "Group %d total use %d\n", i, tot);
512 (void) fflush(stderr);
515 if (DEBUG_CHILD_GENERAL) {
516 (void) fprintf(stderr, "Child %d Ops:\n", Child_num);
519 (void) fprintf(stderr,
521 (void) fprintf(stderr,
522 " trgt actl trgt actl bad no trgt actl trgt actl\n");
523 (void) fprintf(stderr,
524 " name mix mix cnt cnt cnt cnt mix mix cnt cnt\n");
526 for (i = 0; i < NOPS + 1; i++) {
527 (void) fprintf(stderr,
528 "%11s %4d %4.1f %5d %5d %4d %3d %4.1f %4.1f %6d %6d\n",
529 Ops[i].name, Ops[i].mix_pcnt,
530 (float) (100 * Ops[i].results.good_calls)
531 / (float) Ops[TOTAL].results.good_calls,
532 Ops[i].target_calls, Ops[i].results.good_calls,
533 Ops[i].results.bad_calls, Ops[i].no_calls,
534 Ops[i].req_pcnt - previous_pcnt,
535 (float) (100 * Ops[i].req_cnt) / (float) Ops[TOTAL].req_cnt,
536 Ops[i].target_reqs, Ops[i].req_cnt);
537 previous_pcnt = Ops[i].req_pcnt;
539 (void) fflush(stderr);
542 if (DEBUG_CHILD_GENERAL) {
543 (void) fprintf(stderr, "Child %d made %d of %d calls in %ld sec\n",
544 Child_num, Ops[TOTAL].results.good_calls,
545 Ops[TOTAL].target_calls,
546 Ops[TOTAL].results.time.sec);
547 (void) fflush(stderr);
550 clnt_destroy(NFS_client);
553 /* write stats to log file (append mode) */
554 report.version = nfs_version;
555 for (i = 0; i < NOPS + 1; i++) {
556 report.results_buf[i] = Ops[i].results;
558 report.total_fss_bytes = Total_fss_bytes;
559 report.least_fss_bytes = Least_fss_bytes;
560 report.most_fss_bytes = Most_fss_bytes;
561 report.base_fss_bytes = Base_fss_bytes;
563 if (write(Log_fd, (char *) &report, sizeof(report)) == -1) {
565 (void) fprintf(stderr, "%s: can't write to synchronization file %s ",
566 sfs_Myname, Logname);
569 (void) generic_kill(0, SIGINT);
572 (void) close(Log_fd);
574 print_dump(Client_num, Child_num);
580 * -------------------- Call Target Initialization --------------------
584 * Initialize call and request targets.
589 int call_target; /* total number of calls to make */
590 int req_target; /* total number of reqs to make */
591 int32_t equal_mix; /* equal mix of operations */
592 int32_t slack; /* calls leftover after % mix */
593 int randnum; /* a random number */
594 int32_t i; /* general use */
595 double total_req_pcnt;
596 double previous_pcnt;
601 * Compute number of target calls for each operation.
602 * These are used to estimate the number of filehandles
603 * that will be used for each type of operation.
605 call_target = Ops[TOTAL].target_calls;
606 Ops[TOTAL].target_calls = 0;
608 for (i = 0; i < NOPS; i++) {
609 Ops[i].target_calls = (Ops[i].mix_pcnt * call_target) / 100;
610 Ops[TOTAL].target_calls += Ops[i].target_calls;
611 if (Ops[i].mix_pcnt != 0)
615 /* Put left over calls into the heavier mix operations. */
616 slack = call_target - Ops[TOTAL].target_calls;
617 equal_mix = (100 / nops_used) / 2;
619 randnum = sfs_random() % NOPS;
620 if (Ops[randnum].mix_pcnt != 0 && Ops[randnum].mix_pcnt >= equal_mix) {
621 Ops[randnum].target_calls++;
622 Ops[TOTAL].target_calls++;
628 * compute request targets (based on suggestions from M. Molloy, HP)
631 /* compute total of target requests, based on weighted ops */
632 total_req_pcnt = 0.0;
633 for (i = 0; i < NOPS ; i++) {
636 total_req_pcnt += ((double) Ops[i].mix_pcnt)
637 / Io_dist_ptr->avg_ops_per_read_req;
640 total_req_pcnt += ((double) Ops[i].mix_pcnt)
641 / Io_dist_ptr->avg_ops_per_write_req;
643 case COMMIT: /* Commits never generate requests */
646 total_req_pcnt += (double) Ops[i].mix_pcnt;
652 * compute cumulative frequency distribution percentile for each op.
653 * This code assumes that the NULLCALL does not generate multiple
654 * OTW operations per request.
657 for (i = 0; i < NOPS; i++) {
660 Ops[i].req_pcnt = previous_pcnt +
661 (((100.0 * (double) Ops[i].mix_pcnt)
662 / Io_dist_ptr->avg_ops_per_read_req)
666 Ops[i].req_pcnt = previous_pcnt +
667 (((100.0 * (double) Ops[i].mix_pcnt)
668 / Io_dist_ptr->avg_ops_per_write_req)
671 case COMMIT: /* Commits never generate requests */
672 Ops[i].req_pcnt = previous_pcnt;
675 Ops[i].req_pcnt = previous_pcnt +
676 ((100.0 * (double) Ops[i].mix_pcnt)
680 previous_pcnt = Ops[i].req_pcnt;
682 /* force last bucket to 100 */
683 Ops[NOPS-1].req_pcnt = 100;
685 /* compute the req load rate */
686 Child_req_load = (total_req_pcnt * Child_call_load) / 100.0;
689 * Compute number of target reqs for each operation.
690 * These are used for debugging purposes.
692 req_target = (total_req_pcnt * Ops[TOTAL].target_calls) / 100;
693 Ops[TOTAL].target_reqs = 0;
696 for (i = 0; i < NOPS; i++) {
697 Ops[i].target_reqs = 0;
698 if (Ops[i].mix_pcnt != 0) {
699 Ops[i].target_reqs = ((Ops[i].req_pcnt - previous_pcnt) *
702 Ops[TOTAL].target_reqs += Ops[i].target_reqs;
703 previous_pcnt = Ops[i].req_pcnt;
706 /* Put left over reqs into the heavier mix operations. */
707 slack = req_target - Ops[TOTAL].target_reqs;
708 equal_mix = (100 / nops_used) / 2;
710 randnum = sfs_random() % NOPS;
711 if (Ops[randnum].target_reqs != 0 &&
712 Ops[randnum].req_pcnt >= equal_mix) {
713 Ops[randnum].target_reqs++;
714 Ops[TOTAL].target_reqs++;
718 if (DEBUG_CHILD_GENERAL) {
719 (void) fprintf(stderr,
720 " Op\t Op mix\tCalls\t\t Req mix\t Reqs\t\n");
722 for (i = 0; i < NOPS; i++) {
723 (void) fprintf(stderr, "%8s\t%8d\t%5d\t\t%8.2f\t%5d\n",
725 Ops[i].mix_pcnt, Ops[i].target_calls,
726 Ops[i].req_pcnt - previous_pcnt,
728 previous_pcnt = Ops[i].req_pcnt;
735 * ----------------------- File Set Initialization -----------------------
738 static file_array_initialized = 0;
739 static int file_size_array[100];
742 * For a value between 0-99, return a size based on distribution
750 if (file_array_initialized == 0) {
753 for (j = 0, k = 0; j < 100; j++) {
754 if (j >= Default_file_size_dist[k].pcnt &&
755 Default_file_size_dist[k + 1].size != 0)
757 file_size_array[j] = Default_file_size_dist[k].size * 1024;
759 file_array_initialized++;
761 return (file_size_array[i]);
765 * allocate and initialize the various file information structures.
772 int group_size, group_cnt;
773 int range, previous_range;
776 double e_to_the_lambda;
777 double cumulative_ratio;
778 int num_non_io_to_init;
780 int files_per_generation;
781 sfs_fh_data *fh_datap;
785 * Zero number of files created used to create unique names
790 * Dirs - Initialize the files info structure.
791 * Directories must come first, in initializing test dirs we
792 * need to make sure that any files deleted are no full directories
795 Num_dirs + /* exist: readdir, rmdir */
796 Ops[MKDIR].target_calls + /* non-exist: mkdir */
797 Ops[RMDIR].target_calls; /* empty dir to be removed */
798 if (DEBUG_CHILD_SETUP) {
799 (void) fprintf(stderr, "%s: allocate %d directories\n",
800 sfs_Myname, Num_dir_files);
801 (void) fflush(stderr);
803 Dirs = (sfs_fh_type *) calloc(Num_dir_files, sizeof(sfs_fh_type));
805 if (Dirs == (sfs_fh_type *) 0) {
806 (void) fprintf(stderr,"%s: init_fileinfo dir calloc %d bytes failed",
807 sfs_Myname, Num_dir_files * sizeof(sfs_fh_type));
808 (void) generic_kill(0, SIGINT);
811 for (i = 0; i < Num_dir_files; i++) {
812 Dirs[i].working_set = 0;
813 Dirs[i].state = Nonexistent;
814 if (i <= (Num_dirs + Ops[RMDIR].target_calls)) {
815 Dirs[i].initialize = 1;
816 Dirs[i].fh_data = (sfs_fh_data *)0;
818 Dirs[i].unique_num = i;
821 /* Working Set Directory Files - Initialize the working files array. */
822 Num_working_dirs = Num_dir_files;
823 Dir_working_set.entries = (sfs_work_fh_type *)
824 calloc(Num_working_dirs,
825 sizeof(sfs_work_fh_type));
826 if (Dir_working_set.entries == (sfs_work_fh_type *) 0) {
827 (void) fprintf(stderr,"%s: init_fileinfo wdir calloc %d bytes failed",
828 sfs_Myname, Num_working_dirs * sizeof(sfs_work_fh_type));
829 (void) generic_kill(0, SIGINT);
834 * Dirs are accessed uniformly. See Non_io_files for a description.
836 if (init_rand_range(Num_dir_files)) {
837 (void) fprintf(stderr, "%s: init_fileinfo dir init_rand_range failed",
839 (void) generic_kill(0, SIGINT);
843 for (i = 0; i < Num_working_dirs; i++) {
844 if (Num_working_dirs != Num_dir_files) {
845 /* generate a random subset */
846 index = rand_range(i);
848 /* match the working set one-to-one with the files */
852 Dirs[index].working_set = 1;
853 Dir_working_set.entries[i].index = index;
854 Dir_working_set.entries[i].range = i + 1;
856 Dir_working_set.access_group_size = Num_working_dirs;
857 Dir_working_set.access_group_cnt = 1;
859 Dir_working_set.max_range = Num_working_dirs;
861 if (DEBUG_CHILD_SETUP) {
862 (void) fprintf(stderr, "\nDir size=%d cnt=%d max=%d\n",
863 Dir_working_set.access_group_size,
864 Dir_working_set.access_group_cnt,
865 Dir_working_set.max_range);
866 (void) fflush(stderr);
871 * I/o Files - Initialize the files info structure to Num_io_files.
873 if (DEBUG_CHILD_SETUP) {
874 (void) fprintf(stderr, "%s: allocate %d i/o files, %d working\n",
875 sfs_Myname, Num_io_files, Num_working_io_files);
876 (void) fflush(stderr);
879 Io_files = (sfs_fh_type *) calloc(Num_io_files, sizeof(sfs_fh_type));
880 if (Io_files == (sfs_fh_type *) 0) {
881 (void) fprintf(stderr,"%s: init_fileinfo %d io files calloc %d bytes failed",
882 sfs_Myname, Num_io_files,
883 Num_io_files * sizeof(sfs_fh_type));
884 (void) generic_kill(0, SIGINT);
888 for (i = 0; i < Num_io_files; i++) {
889 Io_files[i].working_set = 0;
890 Io_files[i].state = Nonexistent;
891 Io_files[i].initialize = 1;
892 Io_files[i].size = get_file_size(io_file_num % 100);
893 Io_files[i].unique_num = Files_created++;
894 /* Memory allocation for the fh_data will be done later. */
895 Io_files[i].fh_data = (sfs_fh_data *)0;
900 * Working Set I/o Files - Initialize the working files array.
901 * Only Access_percent of the Io_files are put into the working set.
903 Io_working_set.entries = (sfs_work_fh_type *)
904 calloc(Num_working_io_files,
905 sizeof(sfs_work_fh_type));
906 if (Io_working_set.entries == (sfs_work_fh_type *) 0) {
907 (void) fprintf(stderr,"%s: init_fileinfo wio calloc %d bytes failed",
908 sfs_Myname, Num_working_io_files * sizeof(sfs_work_fh_type));
909 (void) generic_kill(0, SIGINT);
914 if (DEBUG_CHILD_FILES) {
915 (void) fprintf(stderr, "working_set: ");
916 (void) fflush(stderr);
920 * For now, the access distribution is poisson. See below.
922 /* #define UNIFORM_ACCESS */
923 #define POISSON_ACCESS
925 #ifdef UNIFORM_ACCESS
927 * With a uniform access distribution, there is no need for access
929 * Hopefully SPEC-SFS will agree on a non-uniform access function.
930 * (see below for an example using a poisson distribution).
932 if (init_rand_range(Num_io_files)) {
933 (void) fprintf(stderr, "%s: init_fileinfo io init_rand_range failed",
935 (void) generic_kill(0, SIGINT);
939 for (i = 0; i < Num_working_io_files; i++) {
940 if (Num_working_io_files != Num_io_files) {
941 /* generate a random subset */
942 index = rand_range(i);
944 /* match the working set one-to-one with the files */
947 Io_files[index].working_set = 1;
948 Io_working_set.entries[i].index = index;
949 Io_working_set.entries[i].range = i + 1;
951 if (DEBUG_CHILD_FILES) {
952 (void) fprintf(stderr, "%d,", index);
953 (void) fflush(stderr);
956 Io_working_set.access_group_size = Num_working_io_files;
957 Io_working_set.access_group_cnt = 1;
958 Io_working_set.max_range = Num_working_io_files;
960 if (DEBUG_CHILD_FILES) {
961 (void) fprintf(stderr, "\nIo size=%d cnt=%d max=%d\n",
962 Io_working_set.access_group_size,
963 Io_working_set.access_group_cnt,
964 Io_working_set.max_range);
965 (void) fflush(stderr);
968 #endif /* ! UNIFORM_ACCESS */
969 #ifdef POISSON_ACCESS
972 * The working set is partitioned into access groups of Access_group_size
973 * files. Each group is assigned a probability of being accessed.
974 * This is implemented as a cumulative distribution table, with
975 * variable probabilities for each group. The distribution function
976 * is used to generate a sequence of values, one for each group.
977 * Each group is assigned a 'range' value that is the sum of all
978 * previous range values, plus the next value in the distribution
979 * sequence. Thus, the probability of choosing any particular group
980 * is equal to the relative height of the distribution curve at the
981 * point represented by that group.
982 * The choice is made by generating a random number in the range
983 * 0 up to (the sum of all values in the distribution sequence - 1),
984 * and finding the group with the greatest range value less than
986 * Once a group is chosen, a random number in the range
987 * 1 - Access_group_size is used to pick an entry from within the group.
988 * The entry chosen points to a file in the Io_files array.
989 * If the file at Io_files[index] is eligible for the operation,
990 * then it is accessed, otherwise, the access group is searched
991 * sequentially (mod Access_group_size with wrap-around) until an
992 * eligible file is found.
993 * Access_group_size is derived so that there are enough files
994 * in each group to give a good chance of finding an eligible file
995 * for each operation, but so that there are enough groups (each
996 * representing a point on the distribution curve) to generate a
997 * fairly smooth access distribution curve.
1001 * group_cnt = 8 + ((Num_working_io_files/500) * 4);
1003 * The function is chosen to guarentee that each group contains
1004 * at least 1 file, and, beginning with a base of 8 groups, the
1005 * number of groups increases by 4 for each 500 files in the working
1006 * set. It was arrived at heuristically. The goal is to put enough
1007 * files into each group to ensure that a file with the right
1008 * attributes can be found once the group is selected (which can be
1009 * difficult for small working sets), while at the same time creating
1010 * enough groups to provide enough points on the distribution curve
1011 * to yield an interesting access distribution.
1013 * Since this function is being computed per child, the interesting range
1014 * of working set sizes is computed based on a range of per child load
1015 * values from 1 op/sec to 100 op/sec. Note that this assumes an
1016 * average server response time of at least 10 msec, which seems to be
1017 * a good minimum value for a wide range of servers given the default
1018 * mix of NFS operations.
1019 * Based on these load values, the total file set, based on the default
1020 * values of 10 MB/op and 38 files/MB, works out to 380 - 38000 files.
1021 * The default working set of 10% of these files yields a working
1022 * set size of 38 - 3800 files.
1025 files_per_generation = (_GROUP_DIVISOR * generations) / _FILES_PER_GROUP;
1026 Io_working_set.access_group_cnt = generations +
1027 ((Num_working_io_files/files_per_generation) * generations);
1029 * if the number of files in the working set is not a multiple of
1030 * the group size, then some groups will contain (group_size+1) files.
1031 * Thus, this is the base group size.
1033 Io_working_set.access_group_size = Num_working_io_files /
1034 Io_working_set.access_group_cnt;
1036 if (init_rand_range(Num_io_files)) {
1037 (void) fprintf(stderr, "%s: init_fileinfo io init_rand_range failed",
1039 (void) generic_kill(0, SIGINT);
1043 /* randomly set up working set of indices into Io_files */
1044 for (i = 0; i < Num_working_io_files; i++) {
1045 if (Num_working_io_files != Num_io_files) {
1046 /* generate a random subset */
1047 index = rand_range(i);
1049 /* match the working set one-to-one with the files */
1052 Io_files[index].working_set = 1;
1053 Io_working_set.entries[i].index = index;
1055 if (DEBUG_CHILD_FILES) {
1056 (void) fprintf(stderr, "%d,", index);
1057 (void) fflush(stderr);
1061 /* initialization for distribution function */
1063 lambda = (double) (generations / 2);
1064 if (lambda <= 0) lambda = 1;
1065 e_to_the_lambda = exp(lambda);
1066 cumulative_ratio = 1.0;
1068 if (DEBUG_CHILD_FILES) {
1069 (void) fprintf(stderr,
1070 "\ngrp_cnt %d lambda %6.0f e_to_the_lambda %6.2f\n",
1071 Io_working_set.access_group_cnt, lambda,
1073 (void) fflush(stderr);
1076 /* assign a range to each group */
1077 for (i = 0; i < Io_working_set.access_group_cnt; i++) {
1079 * get next value in poisson distribution sequence, using
1080 * lambda^x / (e^(lambda) * x!) , for x=1,2,3,...,group_cnt
1084 if( i % generations == 0)
1086 lambda = (double) (generations / 2);
1087 if (lambda <= 0) lambda = 1;
1088 e_to_the_lambda = exp(lambda);
1089 cumulative_ratio = 1.0;
1091 probability = cumulative_ratio/e_to_the_lambda;
1092 if (probability <= 0.0 || probability > 1.0) {
1093 (void) fprintf(stderr, "%s: access probability = %g while setting up Io_working_set, i=%d of %d\n",
1094 sfs_Myname, probability,
1095 i, Io_working_set.access_group_cnt);
1096 (void) generic_kill(0, SIGINT);
1100 /* convert probability to scaled integer */
1101 next_value = (int) (PROB_SCALE * probability);
1103 /* check for negative numbers */
1104 if (next_value <= 0) {
1105 (void) fprintf(stderr, "%s: next_value = %d while setting up Io_working_set, i=%d of %d\n",
1106 sfs_Myname, next_value,
1107 i, Io_working_set.access_group_cnt);
1108 (void) generic_kill(0, SIGINT);
1112 previous_range = range;
1113 range = previous_range + next_value;
1114 if (range <= previous_range || range < 0) {
1115 (void) fprintf(stderr, "%s: range = %d previous_range = %d while setting up Io_working_set, i=%d of %d\n",
1116 sfs_Myname, range, previous_range,
1117 i, Io_working_set.access_group_cnt);
1118 (void) generic_kill(0, SIGINT);
1122 /* assign range value to each file in this group */
1123 group_size = Io_working_set.access_group_size;
1124 group_cnt = Io_working_set.access_group_cnt;
1125 if (i < (Num_working_io_files -
1126 ((Num_working_io_files / group_cnt) * group_cnt)))
1128 for (j = 0; j < group_size; j++) {
1129 index = i + (j * Io_working_set.access_group_cnt);
1130 Io_working_set.entries[index].range = range;
1133 cumulative_ratio *= lambda / (double) ((i%generations)+1);
1135 if (DEBUG_CHILD_SETUP) {
1136 (void) fprintf(stderr, "group %d next %d range %d\n",
1137 i, next_value, range);
1138 (void) fflush(stderr);
1141 Io_working_set.max_range = range;
1143 if (DEBUG_CHILD_SETUP) {
1144 (void) fprintf(stderr, "\nIo size=%d cnt=%d max=%d\n",
1145 Io_working_set.access_group_size,
1146 Io_working_set.access_group_cnt,
1147 Io_working_set.max_range);
1148 (void) fflush(stderr);
1150 #endif /* POISSON_ACCESS */
1153 /* figure out how many files to allocate and initialize */
1155 /* initialize half the non-I/O files */
1156 /* NOTE: initializing half the non-i/o files works ok with the
1157 default op mix. If the mix is changed affecting the
1158 ratio of creations to removes, there may not be enough
1159 empty slots for file creation (or there may not be
1160 enough created during initialization to handle a lot of
1161 removes that occur early in the test run), and this would
1162 cause do_op() to fail to find a file appropriate for the
1163 chosen op. This will result in the global variable
1164 Ops[op].no_calls being incremented (turn on child level
1165 debugging to check this count), and the do_op() local
1166 variable aborted_ops to be incremented and checked during
1167 runtime for too many failures.
1169 num_non_io_to_init = Num_non_io_files * RATIO_NON_IO_INIT;
1171 if (DEBUG_CHILD_SETUP) {
1172 (void) fprintf(stderr, "%s: allocate %d non-i/o files\n",
1173 sfs_Myname, Num_non_io_files);
1174 (void) fflush(stderr);
1176 Non_io_files = (sfs_fh_type *)
1177 calloc(Num_non_io_files, sizeof(sfs_fh_type));
1178 if (Non_io_files == (sfs_fh_type *) 0) {
1179 (void) fprintf(stderr,"%s: init_fileinfo nio calloc %d bytes failed",
1180 sfs_Myname, Num_non_io_files * sizeof(sfs_fh_type));
1181 (void) generic_kill(0, SIGINT);
1184 for (i = 0; i < Num_non_io_files; i++) {
1185 Non_io_files[i].working_set = 0;
1186 Non_io_files[i].state = Nonexistent;
1187 if (i <= num_non_io_to_init)
1188 Non_io_files[i].initialize = 1;
1189 Non_io_files[i].size = get_file_size(io_file_num % 100);
1190 Non_io_files[i].unique_num = Files_created++;
1191 /* Allocation of fh_data will happen in init_testdir */
1192 Non_io_files[i].fh_data = (sfs_fh_data *)0;
1196 /* Working Set Non i/o Files - Initialize the working files array. */
1197 Num_working_non_io_files = Num_non_io_files;
1198 Non_io_working_set.entries = (sfs_work_fh_type *)
1199 calloc(Num_working_non_io_files,
1200 sizeof(sfs_work_fh_type));
1201 if (Non_io_working_set.entries == (sfs_work_fh_type *) 0) {
1202 (void) fprintf(stderr,"%s: init_fileinfo nwio calloc %d bytes failed",
1203 sfs_Myname, Num_working_io_files * sizeof(sfs_work_fh_type));
1204 (void) generic_kill(0, SIGINT);
1209 * Non_io_files are accessed uniformly. Each entry has a
1210 * 1/Num_working_non_io_files change of being accessed.
1211 * The choice is made by generating a random number in the range
1212 * 0 through (Num_working_non_io_files - 1) and finding the entry
1213 * with the greatest range value less than the random number.
1214 * If the file at Non_io_files[index] is eligible for the operation,
1215 * it is accessed, otherwise, the access group that the entry belongs
1216 * to is searched sequentially until an eligible file is found.
1217 * For non i/o files, all of the working set files are in the same
1218 * access group (since they access is uniform, this is ok, and
1219 * maximizes the chances of finding an eligible file).
1221 if (init_rand_range(Num_non_io_files)) {
1222 (void) fprintf(stderr, "%s: init_fileinfo non_io init_rand_range failed",
1224 (void) generic_kill(0, SIGINT);
1228 for (i = 0; i < Num_working_non_io_files; i++) {
1229 if (Num_working_non_io_files != Num_non_io_files) {
1230 /* generate a random subset */
1231 index = rand_range(i);
1233 /* match the working set one-to-one with the files */
1236 Non_io_files[index].working_set = 1;
1237 Non_io_working_set.entries[i].index = index;
1238 Non_io_working_set.entries[i].range = i + 1;
1240 Non_io_working_set.access_group_size = Num_working_non_io_files;
1241 Non_io_working_set.access_group_cnt = 1;
1242 Non_io_working_set.max_range = Num_working_non_io_files;
1244 if (DEBUG_CHILD_SETUP) {
1245 (void) fprintf(stderr, "\nNon_io size=%d cnt=%d max=%d\n",
1246 Non_io_working_set.access_group_size,
1247 Non_io_working_set.access_group_cnt,
1248 Non_io_working_set.max_range);
1249 (void) fflush(stderr);
1253 /* Symlinks - Initialize the files info structure. */
1255 Num_symlinks + /* exist: readlink */
1256 Ops[SYMLINK].target_calls; /* non-exist: symlink */
1257 if (DEBUG_CHILD_SETUP) {
1258 (void) fprintf(stderr, "%s: allocate %d symlinks\n",
1259 sfs_Myname, Num_symlink_files);
1260 (void) fflush(stderr);
1262 Symlinks = (sfs_fh_type *)
1263 calloc(Num_symlink_files, sizeof(sfs_fh_type));
1264 if (Symlinks == (sfs_fh_type *) 0) {
1265 (void) fprintf(stderr,"%s: init_fileinfo sym calloc %d bytes failed",
1266 sfs_Myname, (Num_symlink_files * sizeof(sfs_fh_type)));
1267 (void) generic_kill(0, SIGINT);
1270 for (i = 0; i < Num_symlink_files; i++) {
1271 Symlinks[i].working_set = 0;
1272 Symlinks[i].state = Nonexistent;
1273 if (i <= Num_symlinks)
1274 Symlinks[i].initialize = 1;
1275 Symlinks[i].fh_data = (sfs_fh_data *)0;
1276 Symlinks[i].unique_num = i;
1279 /* Working Set Symlinks - Initialize the working files array. */
1280 /* This appears to cause the following loop to be mostly dead */
1281 /* code. It is unclear why this line is here. One */
1282 /* possibility is that Num_symlink_files should be */
1283 /* Num_symlinks. XXX */
1284 Num_working_symlinks = Num_symlink_files;
1285 Symlink_working_set.entries = (sfs_work_fh_type *)
1286 calloc(Num_working_symlinks,
1287 sizeof(sfs_work_fh_type));
1288 if (Symlink_working_set.entries == (sfs_work_fh_type *) 0) {
1289 (void) fprintf(stderr,"%s: init_fileinfo wsym calloc %d bytes failed",
1290 sfs_Myname, Num_working_symlinks * sizeof(sfs_work_fh_type));
1291 (void) generic_kill(0, SIGINT);
1296 * Symlinks are accessed uniformly. See Non_io_files for a description.
1298 if (init_rand_range(Num_symlink_files)) {
1299 (void) fprintf(stderr, "%s: init_fileinfo sym init_rand_range failed",
1301 (void) generic_kill(0, SIGINT);
1305 for (i = 0; i < Num_working_symlinks; i++) {
1306 if (Num_working_symlinks != Num_symlink_files) {
1307 /* generate a random subset */
1308 index = rand_range(i);
1310 /* match the working set one-to-one with the files */
1314 Symlinks[index].working_set = 1;
1315 Symlink_working_set.entries[i].index = index;
1316 Symlink_working_set.entries[i].range = i + 1;
1318 Symlink_working_set.access_group_size = Num_working_symlinks;
1319 Symlink_working_set.access_group_cnt = 1;
1320 Symlink_working_set.max_range = Num_working_symlinks;
1322 if (DEBUG_CHILD_SETUP) {
1323 (void) fprintf(stderr, "\nSymlink size=%d cnt=%d max=%d\n",
1324 Symlink_working_set.access_group_size,
1325 Symlink_working_set.access_group_cnt,
1326 Symlink_working_set.max_range);
1327 (void) fflush(stderr);
1331 * Free up random number range
1333 (void)init_rand_range(0);
1336 } /* init_fileinfo */
1339 * allocate and initialize the directory layout of the files
1341 * We can only place files in directories that can't be removed
1344 init_dirlayout(void)
1349 * Initially create directories only one level deep so all directories
1350 * must be in the parent directory.
1352 for (i = 0; i < Num_dir_files; i++) {
1353 Dirs[i].dir = &Export_dir;
1357 * Files must only be placed in the first Num_dirs entries leaving
1358 * a set for directory create and remove.
1361 for (i = 0; i < Num_io_files; i++) {
1362 if (i != 0 && (i % Files_per_dir) == 0)
1364 Io_files[i].dir = &Dirs[j];
1368 * All non-io and symlink files are placed in the parent directory
1370 for (i = 0; i < Num_non_io_files; i++) {
1371 Non_io_files[i].dir = &Export_dir;
1374 for (i = 0; i < Num_symlink_files; i++) {
1375 Symlinks[i].dir = &Export_dir;
1380 * allocate and initialize client handles
1386 * Set up the client handles. We get them all before trying one
1387 * out to insure that the client handle for LOOKUP class is allocated
1388 * before calling op_getattr().
1390 if (DEBUG_CHILD_GENERAL) {
1391 (void) fprintf(stderr, "%s: set up client handle\n", sfs_Myname);
1394 NFS_client = lad_clnt_create(Tcp? 1: 0, Server_hostent,
1395 (uint32_t) NFS_PROGRAM,
1396 (uint32_t) nfs_version,
1397 RPC_ANYSOCK, &Nfs_timers[0]);
1399 if (NFS_client == ((CLIENT *) NULL)) {
1404 * create credentials using the REAL uid
1406 NFS_client->cl_auth = authunix_create(lad_hostname, (int)Real_uid,
1407 (int)Cur_gid, 0, NULL);
1409 /* Initialize biod simulation mechanism if desired. */
1410 if (Biod_max_outstanding_reads > 0 || Biod_max_outstanding_writes > 0) {
1411 if (biod_init(Biod_max_outstanding_writes,
1412 Biod_max_outstanding_reads) == -1) {
1421 * Initialize the test directory 'parentdir'/testdir'dirnum'.
1423 * If the directory already exists, check to see that all of the
1424 * files exist and can be written. If the directory doesn't exist
1425 * create it and fill it with the proper files. The caller is
1426 * left with his cwd being the test directory.
1428 * Each child pseudo-mount's his own test directory to get its filehandle.
1430 * Files, directories, and symlinks all have the same name structure
1431 * but they are strictly ordered, files first, directories next, then symlinks.
1432 * While initializing after a previous run we may have to delete existing
1433 * files of the wrong type and then create them later.
1435 * XXX In the future it is probably wiser to have seperate namespaces for
1436 * each type of file.
1448 int alloc_count, dealloc_count;
1450 * Create directories first so operations that
1451 * require them will have a file to work with.
1453 alloc_count=dealloc_count=0;
1454 for (filenum = 0; filenum < Num_dir_files; filenum++) {
1455 sfs_gettime(&Cur_time);
1457 Cur_file_ptr = &Dirs[filenum];
1459 if(Cur_file_ptr->fh_data == (sfs_fh_data *)0)
1462 Cur_file_ptr->fh_data = calloc(1,sizeof(sfs_fh_data));
1463 Cur_file_ptr->attributes2.type = NFNON;
1464 Cur_file_ptr->attributes3.type = NF3NON;
1465 if(Cur_file_ptr->working_set == 1)
1471 (void) sprintf(Cur_filename, Dirspec, Cur_file_ptr->unique_num);
1473 if (DEBUG_CHILD_SETUP) {
1474 (void) fprintf(stderr, "%s: initialize %s (DIR)\n",
1475 sfs_Myname, Cur_filename);
1476 (void) fflush(stderr);
1479 if ((ret = lad_lookup(Cur_file_ptr, Cur_filename)) == -1) {
1480 /* some error that I don't know what to do with, quit. */
1481 (void) generic_kill(0, SIGINT);
1487 if (fh_isdir(Cur_file_ptr) && Cur_file_ptr->initialize)
1492 free(Cur_file_ptr->fh_data);
1493 Cur_file_ptr->fh_data=(sfs_fh_data *)0;
1498 if (lad_remove(Cur_file_ptr, Cur_filename) != 0) {
1499 /* some error that I don't know what to do with, quit. */
1500 (void) generic_kill(0, SIGINT);
1505 if (!Cur_file_ptr->initialize) {
1506 /* dir shouldn't exist */
1510 free(Cur_file_ptr->fh_data);
1511 Cur_file_ptr->fh_data=(sfs_fh_data *)0;
1516 /* make the directory */
1517 if (lad_mkdir(Cur_file_ptr, Cur_filename) == -1) {
1518 /* some error that I don't know what to do with, quit. */
1519 (void) generic_kill(0, SIGINT);
1525 free(Cur_file_ptr->fh_data);
1526 Cur_file_ptr->fh_data=(sfs_fh_data *)0;
1528 } /* end for each directory */
1531 * Setup for file i/o operations.
1532 * Verify that we can read and write all the files.
1533 * Make sure we have the attributes && fh for all regular files.
1534 * Create any missing files.
1536 max_filenum = Num_io_files + Num_non_io_files;
1537 alloc_count=dealloc_count=0;
1538 for (filenum = 0; filenum < max_filenum; filenum++) {
1539 sfs_gettime(&Cur_time);
1541 if (filenum < Num_io_files) {
1542 Cur_file_ptr = &Io_files[filenum];
1544 Cur_file_ptr = &Non_io_files[filenum - Num_io_files];
1547 (void) sprintf(Cur_filename, Filespec, Cur_file_ptr->unique_num);
1549 if(Cur_file_ptr->fh_data == (sfs_fh_data *)0)
1552 Cur_file_ptr->fh_data = calloc(1,sizeof(sfs_fh_data));
1553 Cur_file_ptr->attributes2.type = NFNON;
1554 Cur_file_ptr->attributes3.type = NF3NON;
1555 if(Cur_file_ptr->working_set == 1)
1562 * Get the size this file should be initialized to, then reset
1563 * so we don't get confused.
1565 init_size = Cur_file_ptr->size;
1566 Cur_file_ptr->size = 0;
1568 if (DEBUG_CHILD_SETUP) {
1569 (void) fprintf(stderr, "%s: initialize %s (REG for %sIO)\n",
1570 sfs_Myname, Cur_filename,
1571 (non ? "non-": ""));
1572 (void) fflush(stderr);
1575 if ((ret = lad_lookup(Cur_file_ptr, Cur_filename)) == -1) {
1576 /* some error that I don't know what to do with, quit. */
1577 (void) generic_kill(0, SIGINT);
1583 * If file exists and it shouldn't, remove it
1585 if (!Cur_file_ptr->initialize) {
1586 if (lad_remove(Cur_file_ptr, Cur_filename) != 0) {
1587 /* some error that I don't know what to do with, quit. */
1588 (void) generic_kill(0, SIGINT);
1594 free(Cur_file_ptr->fh_data);
1595 Cur_file_ptr->fh_data=(sfs_fh_data *)0;
1601 * file exists: make sure it is
1603 * - accessible (permissions ok)
1604 * if not, remove it (if necessary) and recreate it
1605 * or extend or truncate it to the standard length.
1607 if (fh_isfile(Cur_file_ptr) &&
1608 check_fh_access(Cur_file_ptr) == 0) {
1611 if (lad_remove(Cur_file_ptr, Cur_filename) != 0) {
1612 /* some error that I don't know what to do with, quit. */
1613 (void) generic_kill(0, SIGINT);
1617 } /* end if the file exists */
1619 /* the file doesn't exist */
1620 if (!Cur_file_ptr->initialize) {
1621 /* file doesn't exist and it shouldn't */
1625 free(Cur_file_ptr->fh_data);
1626 Cur_file_ptr->fh_data=(sfs_fh_data *)0;
1631 /* if the file doesn't exist (or was removed), create it */
1632 if (lad_create(Cur_file_ptr, Cur_filename) == -1) {
1633 /* some error that I don't know what to do with, quit. */
1634 (void) generic_kill(0, SIGINT);
1639 /* the non-i/o regular files can be left empty */
1640 if (filenum >= Num_io_files) {
1641 /* Truncate if it has grown */
1642 if (fh_size(Cur_file_ptr) != 0) {
1643 if (lad_truncate(Cur_file_ptr, 0)) {
1644 /* some error that I don't know what to do with, quit. */
1645 (void) generic_kill(0, SIGINT);
1652 free(Cur_file_ptr->fh_data);
1653 Cur_file_ptr->fh_data=(sfs_fh_data *)0;
1658 /* the i/o file must be prefilled, check if file too big */
1659 if (fh_size(Cur_file_ptr) > init_size) {
1660 /* Truncate if it has grown */
1661 if (fh_size(Cur_file_ptr) != 0) {
1662 if (lad_truncate(Cur_file_ptr, init_size)) {
1663 /* some error that I don't know what to do with, quit. */
1664 (void) generic_kill(0, SIGINT);
1671 free(Cur_file_ptr->fh_data);
1672 Cur_file_ptr->fh_data=(sfs_fh_data *)0;
1677 /* the i/o file must be prefilled, set up the write arguments. */
1678 if (fh_size(Cur_file_ptr) < init_size) {
1679 append_size = init_size - fh_size(Cur_file_ptr);
1681 if (lad_write(Cur_file_ptr, fh_size(Cur_file_ptr), append_size)) {
1682 /* some error that I don't know what to do with, quit. */
1683 (void) generic_kill(0, SIGINT);
1690 free(Cur_file_ptr->fh_data);
1691 Cur_file_ptr->fh_data=(sfs_fh_data *)0;
1693 } /* end for each regular file */
1696 * Create symlinks so operations that
1697 * require them will have a file to work with.
1699 alloc_count=dealloc_count=0;
1700 for (filenum = 0; filenum < Num_symlink_files; filenum++) {
1701 char symlink_target[SFS_MAXNAMLEN];
1703 sfs_gettime(&Cur_time);
1705 Cur_file_ptr = &Symlinks[filenum];
1706 (void) sprintf(Cur_filename, Symspec, Cur_file_ptr->unique_num);
1709 if(Cur_file_ptr->fh_data == (sfs_fh_data *)0)
1712 Cur_file_ptr->fh_data = calloc(1,sizeof(sfs_fh_data));
1713 Cur_file_ptr->attributes2.type = NFNON;
1714 Cur_file_ptr->attributes3.type = NF3NON;
1715 if(Cur_file_ptr->working_set == 1)
1720 if (DEBUG_CHILD_SETUP) {
1721 (void) fprintf(stderr, "%s: initialize %s (SYMLINK)\n",
1722 sfs_Myname, Cur_filename);
1723 (void) fflush(stderr);
1726 if ((ret = lad_lookup(Cur_file_ptr, Cur_filename)) == -1) {
1727 /* some error that I don't know what to do with, quit. */
1728 (void) generic_kill(0, SIGINT);
1734 if (lad_remove(Cur_file_ptr, Cur_filename) != 0) {
1735 /* some error that I don't know what to do with, quit. */
1736 (void) generic_kill(0, SIGINT);
1741 /* File doesn't exist */
1742 if (Cur_file_ptr->initialize) {
1743 /* make the symlink */
1744 (void) sprintf(symlink_target, Filespec, filenum);
1745 if (lad_symlink(Cur_file_ptr, symlink_target, Cur_filename) != 0) {
1746 /* some error that I don't know what to do with, quit. */
1747 (void) generic_kill(0, SIGINT);
1754 free(Cur_file_ptr->fh_data);
1755 Cur_file_ptr->fh_data=(sfs_fh_data *)0;
1757 } /* end for each symlink */
1758 } /* init_testdir */
1761 * Initialize the test results counters.
1769 /* Ready to go - initialize operation counters */
1770 for (i = 0; i < NOPS + 1; i++) {
1772 Ops[i].results.good_calls = 0;
1773 Ops[i].results.bad_calls = 0;
1774 Ops[i].results.fast_calls = 0;
1775 Ops[i].results.time.sec = 0;
1776 Ops[i].results.time.usec = 0;
1777 Ops[i].results.msec2 = 0;
1780 /* initialize use count for each file */
1781 for (i = 0; i < Num_io_files; i++) {
1782 Io_files[i].use_cnt = 0;
1783 Io_files[i].xfer_cnt = 0;
1785 for (i = 0; i < Num_non_io_files; i++)
1786 Non_io_files[i].use_cnt = 0;
1787 for (i = 0; i < Num_dir_files; i++)
1788 Dirs[i].use_cnt = 0;
1789 for (i = 0; i < Num_symlink_files; i++)
1790 Symlinks[i].use_cnt = 0;
1792 /* initialize timers and period variables */
1793 sfs_gettime(&Starttime);
1794 Cur_time = Starttime;
1795 start_msec = (Starttime.sec * 1000) + (Starttime.usec / 1000);
1796 Previous_chkpnt_msec = start_msec;
1797 Calls_this_period = 0;
1798 Reqs_this_period = 0;
1799 Sleep_msec_this_period = 0;
1800 Calls_this_test = 0;
1802 Sleep_msec_this_test = 0;
1808 * ------------------------- Load Generation -------------------------
1812 * The routines below attempt to do over-the-wire operations.
1813 * Each op tries to cause one or more of a particular
1814 * NFS operation to go over the wire. Each individual op routine
1815 * returns how many OTW calls were made.
1817 * An array of file information is kept for files existing in
1818 * the test directory. File handles, attributes, names, etc
1819 * are stored in this array.
1824 #define OP_ABORTED (-1)
1825 #define OP_BORROWED (-2)
1826 #define OP_SKIPPED (-3)
1828 * Randomly perform an operation according to the req mix weightings.
1837 static int failed_ops = 0;
1838 static int aborted_ops = 0;
1839 static int borrowed_ops = 0;
1842 if (DEBUG_CHILD_OPS) {
1843 (void) fprintf(stderr, "testop start op=%s\n", Ops[Testop].name);
1845 op_count = op(Testop);
1846 if (DEBUG_CHILD_OPS) {
1847 (void) fprintf(stderr, "end op=%s\n", Ops[Testop].name);
1852 /* get a random number and search the Ops tables for the proper entry */
1853 ratio = sfs_random() % 10000;
1854 for (opnum = 0; Ops[opnum].req_pcnt <= ratio / 100.0 ; opnum++) {
1859 * If test targeted a a specific number of ops,
1860 * and the call would put us over the call target for this op,
1861 * search Ops table sequentially for an op that hasn't
1862 * reached its target yet
1865 start_opnum = opnum;
1866 for (; Ops[opnum].results.good_calls >= Ops[opnum].target_calls;) {
1867 opnum = (opnum + 1) % NOPS;
1868 if (opnum == start_opnum)
1873 if (DEBUG_CHILD_RPC) {
1874 (void) fprintf(stderr, "(%d,%d,%d) ",
1875 Child_num, Ops[TOTAL].results.good_calls, opnum);
1876 (void) fflush(stderr);
1879 /* attempt the op */
1880 op_count = op(opnum);
1882 /* count the operations as completed or check for too many errors */
1884 Ops[opnum].req_cnt++;
1885 Ops[TOTAL].req_cnt++;
1886 } else if (op_count == 0) {
1888 if (DEBUG_CHILD_OPS) {
1889 (void) fprintf(stderr, "Child %d - %d failed %d op\n",
1890 Child_num, failed_ops, opnum);
1891 (void) fflush(stderr);
1893 if ((failed_ops % 50) == 0) {
1894 (void) fprintf(stderr, "Child %d - %d failed ops\n",
1895 Child_num, failed_ops);
1896 (void) fflush(stderr);
1898 } else if (op_count == OP_ABORTED) {
1900 if (DEBUG_CHILD_OPS) {
1901 (void) fprintf(stderr, "Child %d - %d aborted %d op\n",
1902 Child_num, aborted_ops, opnum);
1903 (void) fflush(stderr);
1905 if ((aborted_ops % 50) == 0) {
1906 (void) fprintf(stderr, "Child %d - %d aborted ops\n",
1907 Child_num, aborted_ops);
1908 (void) fflush(stderr);
1910 } else if (op_count == OP_BORROWED) {
1912 if (DEBUG_CHILD_OPS) {
1913 (void) fprintf(stderr, "Child %d - %d borrowed %d op\n",
1914 Child_num, borrowed_ops, opnum);
1915 (void) fflush(stderr);
1917 } else if (op_count == OP_SKIPPED) {
1918 if (DEBUG_CHILD_OPS) {
1919 (void) fprintf(stderr, "Child %d - skipped %d op\n",
1921 (void) fflush(stderr);
1931 * Because file sizes are variable in length, it is possible that
1932 * a group chosen for a large transfer size may not contain a file
1933 * that large. Loop calling randfh to try and find another group
1934 * with a large enough file, but only up to IO_LOOP_MAX times.
1936 #define IO_LOOP_MAX 5
1939 * Call the RPC operation generator for op 'opnum'.
1940 * The return values of the op generator routines is the count
1941 * of operations performed. This routine also returns that count.
1942 * A return of 0 means no operation was attempted,
1943 * OP_ABORTED (-1) means that the operation failed.
1944 * OP_BORROWED (-2) means that the operation was borrowed.
1945 * OP_SKIPPED (-3) means that the operation was not done on purpose.
1953 sfs_io_op_dist_type *dist; /* io size distribution */
1961 uint_t append_flag = 0;
1962 uint_t randfh_flags = 0;
1968 /* pick a file that make sense for the operation */
1972 Cur_file_ptr = randfh(opnum, 0, 0, Exists, Sfs_io_file);
1976 Cur_file_ptr = randfh(opnum, 0, 0, Exists, Sfs_io_file);
1980 if (Setattr_borrowed != 0) {
1982 return(OP_BORROWED);
1984 Cur_file_ptr = randfh(opnum, 0, 0, Exists, Sfs_io_file);
1988 Cur_file_ptr = randfh(opnum, 0, 0, Nonexistent, Sfs_non_io_file);
1992 ratio = (int) (sfs_random() % 100);
1993 if (ratio < Num_failed_lookup)
1994 Cur_file_ptr = randfh(opnum, 0, 0, Nonexistent, Sfs_non_io_file);
1996 Cur_file_ptr = randfh(opnum, 0, 0, Exists, Sfs_io_file);
2000 Cur_file_ptr = randfh(opnum, 0, 0, Exists, Sfs_symlink);
2005 /* special handling for i/o operations */
2006 dist = Io_dist_ptr->read;
2008 /* determine number of full buffers and their total size */
2009 ratio = (sfs_random() % 100);
2010 for (i = 0; dist[i].pcnt <= ratio; i++)
2012 buf_size = dist[i].bufs * Bytes_per_block;
2014 /* determine size of fragment */
2015 /* 1KB - (Kb_per_block - 1) KB fragment */
2016 ratio = sfs_random();
2017 if (Kb_per_block > 1)
2018 ratio = ratio % (Kb_per_block-1);
2021 ratio = (ratio + 1) * 1024;
2022 frag_size = dist[i].frags * ratio;
2024 xfer_size = buf_size + frag_size;
2027 Cur_file_ptr = randfh(opnum, xfer_size, 0, Exists,
2029 } while (Cur_file_ptr == (sfs_fh_type *) -1 &&
2030 io_loop++ < IO_LOOP_MAX);
2034 Cur_file_ptr = randfh(opnum, 0, 0, Nonexistent, Sfs_non_io_file);
2038 /* special handling for i/o operations */
2039 dist = Io_dist_ptr->write;
2041 /* determine number of full buffers and their total size */
2042 ratio = (sfs_random() % 100);
2043 for (i = 0; dist[i].pcnt <= ratio; i++)
2045 buf_size = dist[i].bufs * Bytes_per_block;
2047 /* determine size of fragment */
2048 /* 1KB - (Kb_per_block - 1) KB fragment */
2049 ratio = sfs_random();
2050 if (Kb_per_block > 1)
2051 ratio = ratio % (Kb_per_block-1);
2054 ratio = (ratio + 1) * 1024;
2055 frag_size = dist[i].frags * ratio;
2057 xfer_size = buf_size + frag_size;
2059 /* decide if it should append or overwrite. */
2060 ratio = (sfs_random() % 100);
2061 if (ratio < Append_percent) {
2063 randfh_flags &= RANDFH_APPEND;
2066 /* decide if a truncation will be needed */
2068 ((Cur_fss_bytes + (xfer_size / 1024)) > Limit_fss_bytes)) {
2069 randfh_flags &= RANDFH_TRUNC;
2073 Cur_file_ptr = randfh(opnum, xfer_size,
2075 Exists, Sfs_io_file);
2076 } while (Cur_file_ptr == (sfs_fh_type *) -1 &&
2077 io_loop++ < IO_LOOP_MAX);
2081 if (Create_borrowed != 0) {
2083 return(OP_BORROWED);
2085 if ((Cur_file_ptr = randfh(opnum, 0, 0, Nonexistent,
2086 Sfs_non_io_file)) != (sfs_fh_type *) NULL)
2089 /* if there are no Nonexistent files, use one that exists */
2090 Cur_file_ptr = randfh(opnum, 0, 0, Exists,
2092 /* flag create of existing file for data dump interface */
2093 dump_create_existing_file = TRUE;
2097 Cur_file_ptr = randfh(opnum, 0, 0, Exists, Sfs_non_io_file);
2101 Cur_file_ptr = randfh(opnum, 0, 0, Exists, Sfs_non_io_file);
2105 Cur_file_ptr = randfh(opnum, 0, 0, Nonexistent,
2110 Cur_file_ptr = randfh(opnum, 0, 0, Nonexistent, Sfs_symlink);
2115 Cur_file_ptr = randfh(opnum, 0, 0, Nonexistent, Sfs_dir);
2120 Cur_file_ptr = randfh(opnum, 0, 0, Empty_dir, Sfs_dir);
2125 Cur_file_ptr = randfh(opnum, 0, 0, Exists, Sfs_dir);
2130 Cur_file_ptr = randfh(opnum, 0, 0, Exists, Sfs_io_file);
2134 Cur_file_ptr = randfh(opnum, 0, 0, Exists, Sfs_io_file);
2141 Cur_file_ptr = randfh(opnum, 0, 0, Exists, Sfs_non_io_file);
2145 Cur_file_ptr = randfh(opnum, 0, 0, Nonexistent, Sfs_non_io_file);
2149 Cur_file_ptr = randfh(opnum, 0, 0, Exists, Sfs_non_io_file);
2153 Cur_file_ptr = randfh(opnum, 0, 0, Exists, Sfs_dir);
2158 (void) fprintf(stderr, "%s: invalid operation %d\n", sfs_Myname, opnum);
2159 (void) generic_kill(0, SIGINT);
2161 } /* switch on opnum */
2163 if (Cur_file_ptr == (sfs_fh_type *) NULL ||
2164 Cur_file_ptr == (sfs_fh_type *) -1) {
2165 Ops[opnum].no_calls++;
2169 (void) sprintf(Cur_filename, spec, Cur_file_ptr->unique_num);
2171 /* Call the op routine. For io operations, maintain file set size info. */
2175 op_count = (*Ops[opnum].funct)(-1);
2179 op_count = (*Ops[opnum].funct)(xfer_size);
2181 Cur_file_ptr->xfer_cnt += (xfer_size + 1023) / 1024;
2182 else if (DEBUG_CHILD_ERROR) {
2183 (void) fprintf(stderr, "%s: READ failed\n", sfs_Myname);
2184 (void) fflush(stderr);
2191 /* if appending, we may need to truncate the file first */
2193 ((Cur_fss_bytes + (xfer_size / 1024)) > Limit_fss_bytes)) {
2195 /* use either SETATTR or CREATE for truncation */
2196 file_size = fh_size(Cur_file_ptr);
2197 trunc_op = -1; /* assume there are no ops to borrow */
2199 if (Ops[SETATTR].mix_pcnt == 0 && Ops[CREATE].mix_pcnt == 0)
2200 trunc_op = -1; /* no ops to borrow */
2202 else if (Ops[SETATTR].mix_pcnt > 0 && Ops[CREATE].mix_pcnt > 0){
2203 /* only borrow if the target hasn't been met yet */
2204 if (Ops[SETATTR].results.good_calls
2205 >= Ops[SETATTR].target_calls) {
2206 if (Ops[CREATE].results.good_calls
2207 < Ops[CREATE].target_calls) {
2208 trunc_op = CREATE; /* borrow a CREATE */
2210 } else if (Ops[CREATE].results.good_calls
2211 >= Ops[CREATE].target_calls) {
2212 trunc_op = SETATTR; /* borrow a SETATTR */
2214 /* borrow weighted by mix percentage */
2215 if ((Ops[SETATTR].mix_pcnt * Create_borrowed) >
2216 (Ops[CREATE].mix_pcnt * Setattr_borrowed))
2222 } else if (Ops[SETATTR].results.good_calls <
2223 Ops[SETATTR].target_calls) {
2224 /* only borrow if the target hasn't been met yet */
2225 trunc_op = SETATTR; /* borrow a SETATTR */
2227 } else if (Ops[CREATE].results.good_calls <
2228 Ops[CREATE].target_calls) {
2229 /* only borrow if the target hasn't been met yet */
2230 trunc_op = CREATE; /* borrow a CREATE */
2233 /* perform the truncation and update the file set size */
2234 if (trunc_op != -1) {
2235 dump_truncate_op = TRUE;
2236 if (trunc_op == SETATTR) {
2237 trunc_count = (*Ops[SETATTR].funct)(0);
2238 if (trunc_count > 0) {
2240 if (DEBUG_CHILD_FILES) {
2241 (void) fprintf(stderr, "%s: SETATTR TRUNCATE\n",
2243 (void) fflush(stderr);
2246 } else if (trunc_op == CREATE) {
2247 trunc_count = (*Ops[CREATE].funct)();
2248 if (trunc_count > 0) {
2250 if (DEBUG_CHILD_FILES) {
2251 (void) fprintf(stderr, "%s: CREATE TRUNCATE\n",
2253 (void) fflush(stderr);
2258 Cur_fss_bytes -= (file_size / 1024);
2259 if (Cur_fss_bytes < Least_fss_bytes)
2260 Least_fss_bytes = Cur_fss_bytes;
2262 } /* end of if an append is needed */
2265 * do the write request
2266 * specify the stable flag to always be off, it is not used
2269 op_count = (*Ops[opnum].funct)(xfer_size, append_flag, 0);
2271 Cur_file_ptr->xfer_cnt += (xfer_size + 1023) / 1024;
2272 else if (DEBUG_CHILD_ERROR) {
2273 (void) fprintf(stderr, "%s: WRITE failed\n", sfs_Myname);
2274 (void) fflush(stderr);
2277 Cur_fss_bytes += (xfer_size / 1024);
2278 if (Cur_fss_bytes > Most_fss_bytes)
2279 Most_fss_bytes = Cur_fss_bytes;
2281 op_count += trunc_count;
2285 op_count = (*Ops[opnum].funct)();
2288 } /* send switch on opnum */
2290 if ((DEBUG_CHILD_ERROR) && (op_count <= 0)) {
2291 (void) fprintf(stderr, "%s: OP %d failed\n", sfs_Myname, opnum);
2292 (void) fflush(stderr);
2301 * Return an entry into the fh array for a file of type 'file_type'
2302 * with existence state 'file_state'. When 'opnum' specifies an I/O
2303 * operation, the file must be atleast 'xfer_size' bytes long
2304 * (except when 'append_flag' is true). If 'trunc_flag', spare the
2305 * first file found that is longer than the base file size (to support
2306 * the READ operation). If only one file is longer than the base file
2307 * size, return the the next longest file.
2314 sfs_state_type file_state,
2315 sfs_file_type file_type)
2317 sfs_fh_type * files; /* file array */
2318 int fh; /* index into file array */
2319 int found_fh = -1; /* index into file array */
2320 uint_t append_flag = flags & RANDFH_APPEND;
2321 uint_t trunc_flag = flags & RANDFH_TRUNC;
2323 sfs_work_set_type * work_set; /* work_set array */
2324 int start_file; /* index into work_set array */
2325 int file; /* index into work_set array */
2327 int nworkfiles; /* # files in work_set */
2328 int group_cnt; /* # file groups in work_set */
2329 int group_size; /* size of each group in work_set */
2330 int group; /* group index with work_set */
2331 int offset; /* file index within group */
2333 int value; /* distribution function value */
2334 int previous; /* binary search for value */
2335 int low; /* binary search for value */
2336 int high; /* binary search for value */
2338 int found_file = 0; /* count */
2340 static int op_num = 0;
2347 * if the more than one type of file will do, choose one.
2348 * Note: this code assumes specific values and order for
2349 * the entries in sfs_file_enum_type.
2351 switch (file_type) {
2354 file_type = (int) (sfs_random() % 2);
2358 file_type = (int) (sfs_random() % 3);
2362 file_type = (int) (sfs_random() % 4);
2368 } /* end switch on file type */
2370 /* get the file type arrays */
2371 switch (file_type) {
2375 work_set = &Io_working_set;
2376 nworkfiles = Num_working_io_files;
2379 case Sfs_non_io_file:
2380 files = Non_io_files;
2381 work_set = &Non_io_working_set;
2382 nworkfiles = Num_working_non_io_files;
2387 work_set = &Symlink_working_set;
2388 nworkfiles = Num_working_symlinks;
2393 work_set = &Dir_working_set;
2394 nworkfiles = Num_working_dirs;
2398 (void) fprintf(stderr, "%s: invalid file type\n", sfs_Myname);
2399 (void) kill(0, SIGINT);
2401 } /* end switch on file type */
2404 * Pick the access group.
2406 * Each access group consists of those files in the working set
2407 * (numbered according to the file's index in the array) that
2408 * have the same value modulo the number of groups. For example,
2409 * a working set of 13 files with 3 groups is organized as
2411 * ----- -----------------
2412 * 0 0, 3, 6, 9, 12 ie, == 0 mod 3
2413 * 1 1, 4, 7, 10 ie, == 1 mod 3
2414 * 2 2, 5, 8, 11 ie, == 2 mod 3
2416 * Generate a random number mod the maximum range value of the working set.
2417 * and then binary search the first group_cnt entries in the working set
2418 * to find the group whose range contains the random number.
2419 * (this implements the file access distribution function)
2421 max_range = work_set->max_range;
2422 rand_int = (long) sfs_random();
2424 while ((rand_int / max_range) >= (_M_MODULUS / max_range)) {
2426 * for large values of max_range, modulo doesn't provide a uniform
2427 * distribution unless we exclude these values ...
2429 rand_int = (long) sfs_random();
2431 value = rand_int % max_range;
2433 if (DEBUG_CHILD_OPS) {
2434 (void) fprintf(stderr, "randfh: size=%d cnt=%d max=%d val=%3d\n",
2435 work_set->access_group_size,
2436 work_set->access_group_cnt,
2437 work_set->max_range, value);
2438 (void) fflush(stderr);
2442 for (low = 0, high = work_set->access_group_cnt-1, group = (low + high)/2;;
2443 previous = group, group = (low + high)/2) {
2445 if (DEBUG_CHILD_OPS) {
2446 (void) fprintf(stderr,
2447 "PICK GROUP low=%d hi=%d grp=%d range=%d val=%d\n",
2448 low, high, group, work_set->entries[group].range,
2450 (void) fflush(stderr);
2453 if (previous == group)
2455 if (work_set->entries[group].range == value)
2457 if (work_set->entries[group].range > value) {
2460 if (work_set->entries[group-1].range < value)
2464 } else if (work_set->entries[group].range < value) {
2465 if (work_set->entries[group+1].range > value) {
2474 * Pick a file within the group to operate on.
2475 * Since (working_set_size / group_size) may have a remainder,
2476 * groups may have either group_size or (group_size+1) files.
2478 group_cnt = work_set->access_group_cnt;
2479 group_size = work_set->access_group_size;
2480 if (group < (nworkfiles - ((nworkfiles / group_cnt) * group_cnt)))
2483 if (DEBUG_CHILD_OPS) {
2484 (void) fprintf(stderr, "Selected group = %d\n", group);
2485 (void) fflush(stderr);
2488 * Beginning with a random starting point in the group,
2489 * search for a file that is eligible for this operation.
2490 * index is an index into the files in the group.
2491 * file and start_file are indices into the working set array.
2493 if (DEBUG_CHILD_OPS) {
2494 (void) fprintf(stderr, "group_size = %d\n", group_size);
2495 (void) fflush(stderr);
2498 offset = (int) (sfs_random() % group_size);
2499 start_file = group + (offset * group_cnt);
2505 fh = work_set->entries[file].index;
2507 if (DEBUG_CHILD_OPS) {
2508 (void) fprintf(stderr, "PICK FILE op= %d file=%d fh=%d\n",
2510 (void) fprintf(stderr, "fh_state = %d file_state= %d\n",
2511 files[fh].state, file_state);
2512 (void) fflush(stderr);
2515 /* look for a file that has the right state attribute */
2516 if (files[fh].state == file_state) {
2517 f_size = fh_size(&files[fh]);
2520 * for read and write ops and setattr truncates,
2521 * the file must be large enough to do the xfer or truncate.
2523 if ((opnum == READ) || (opnum == WRITE && !append_flag) ||
2527 * If the request is a read and the transfer size is
2528 * less than or equal to be block size, grab the first
2529 * file that is less than or equal in size. Should never
2530 * see a transfer size less than block size as it will
2531 * be rounded up for the request. This allows small files
2534 if (opnum == READ && xfer_size <= Bytes_per_block) {
2535 if (f_size <= Bytes_per_block) {
2540 /* #define FIRST_FIT */
2543 if (f_size >= xfer_size) {
2549 if (DEBUG_CHILD_FIT) {
2550 (void) fprintf(stderr,
2551 "%s: %8d: xfer_size %d f_size %d best_delta %d found %d\n",
2552 sfs_Myname, op_num, xfer_size, f_size, best_delta, found_file);
2553 (void) fflush(stderr);
2557 * If we find an good enough match we should use it.
2558 * Define good enough to be xfer_size <= X < xfer_size + 8K
2559 * If not we continue to search for the best fit within
2560 * a fixed distance 8.
2562 if (f_size >= xfer_size) {
2563 if (f_size < (xfer_size + 8 * 1024)) {
2569 delta = f_size - xfer_size;
2571 if (found_fh == -1) {
2574 /* break; Removed as per Robinson */
2577 if (delta < best_delta) {
2582 if (found_file >= 8) {
2588 /* for non-i/o ops, only requirement is proper file state */
2593 offset = (offset + 1) % group_size;
2594 file = group + (offset * group_cnt);
2595 } while (file != start_file);
2597 if (found_fh == -1) {
2598 /* didn't find a file for this operation */
2599 if (DEBUG_CHILD_FIT) {
2600 if (opnum == READ || (opnum == WRITE && !append_flag) ||
2602 (void) fprintf(stderr, "%s: no file for %d byte %d op\n",
2603 sfs_Myname, xfer_size, opnum);
2605 (void) fprintf(stderr, "%s: no file for %d op\n",
2608 (void) fflush(stderr);
2609 return((sfs_fh_type *) -1);
2611 return((sfs_fh_type *) NULL);
2614 files[found_fh].use_cnt++;
2615 return(&files[found_fh]);
2619 * ------------------------ Miscellaneous Subroutines -----------------------
2623 * check to make sure that we have both read and write permissions
2624 * for this file or directory given in 'statb'.
2625 * return: 0 == ok, -1 == bad
2632 if (statb->st_uid == Real_uid) {
2633 if ((statb->st_mode & 0400) && (statb->st_mode & 0200)) {
2641 if (statb->st_gid == Cur_gid) {
2642 if ((statb->st_mode & 040) && (statb->st_mode & 020)) {
2650 if ((statb->st_mode & 04) && (statb->st_mode & 02)) {
2656 } /* check_access */
2659 * check to make sure that we have both read and write permissions
2660 * for this file or directory given in the file attributes.
2661 * return: 0 == ok, -1 == bad
2664 check_fh_access(sfs_fh_type *file_ptr)
2667 if (fh_uid(file_ptr) == Real_uid) {
2668 if ((fh_mode(file_ptr) & 0400) && (fh_mode(file_ptr) & 0200)) {
2676 if (fh_gid(file_ptr) == Cur_gid) {
2677 if ((fh_mode(file_ptr) & 040) && (fh_mode(file_ptr) & 020)) {
2685 if ((fh_mode(file_ptr) & 04) && (fh_mode(file_ptr) & 02)) {
2692 static int last_bad_calls = 0;
2695 * Adjust the sleep time per call based on a number of global variables,
2700 int call_target_per_period; /* target calls for each period */
2701 int req_target_per_period; /* target reqs for each period */
2702 int call_target_this_test; /* target calls for test so far */
2703 int req_target_this_test; /* target reqs for test so far */
2704 int msec_this_period; /* actual length of this period */
2705 int msec_this_test; /* actual length of test so far */
2706 uint_t current_msec; /* current time in msecs */
2707 int old_target_sleep_mspc;
2708 struct ladtime elapsed_time; /* Current_time - Start_time */
2710 int reqs_needed_next_period;/* req target for the next period */
2711 float mspc; /* target msec per call, with/sleep */
2712 float work_mspc; /* actual msec worked / call */
2715 if (Child_num == -1)
2716 /* I'm the parent, ignore the signal */
2719 /* update the test so far totals */
2720 Calls_this_test += Calls_this_period;
2721 Reqs_this_test += Reqs_this_period;
2722 Sleep_msec_this_test += Sleep_msec_this_period;
2724 /* compute per period targets */
2725 call_target_per_period = (int) (Child_call_load *
2726 ((float) Msec_per_period / (float) 1000));
2727 req_target_per_period = (int) (Child_req_load *
2728 ((float) Msec_per_period / (float) 1000));
2731 * The child() routine retrieved the Cur_time when deciding to call us.
2732 * Use Cur_time to compute the elapsed time since the last checkpoint
2733 * and the current checkpoint time (ie, elapsed time since test began)
2735 /* sfs_gettime(&Cur_time); */
2736 elapsed_time.sec = Cur_time.sec;
2737 elapsed_time.usec = Cur_time.usec;
2738 SUBTIME(elapsed_time, Starttime);
2740 msec_this_test = (elapsed_time.sec * 1000) + (elapsed_time.usec / 1000);
2741 current_msec = (Cur_time.sec * 1000) + (Cur_time.usec / 1000);
2742 msec_this_period = current_msec - Previous_chkpnt_msec;
2744 if (msec_this_test < Sleep_msec_this_test) {
2745 if (DEBUG_CHILD_XPOINT) {
2746 (void) fprintf(stderr,
2747 "Accum. sleep time %d is msecs ahead of wall clock\n",
2748 Sleep_msec_this_test - msec_this_test);
2749 (void) fflush(stderr);
2751 Sleep_msec_this_test = msec_this_test;
2754 /* compute targets for test so far */
2755 call_target_this_test = (int) ((Child_call_load * (float) msec_this_test)
2757 req_target_this_test = (int) ((Child_req_load * (float) msec_this_test)
2760 /* save the old sleep rate */
2761 old_target_sleep_mspc = Target_sleep_mspc;
2763 /* Compute how long each request has taken on average. */
2764 if (Reqs_this_test != 0)
2765 work_mspc = ((float) (msec_this_test - Sleep_msec_this_test))
2766 / (float) Reqs_this_test;
2768 work_mspc = (1000.0 / (float) Child_req_load) / 2.0;
2771 * Compute the number of reqs needed in the next period
2772 * in order to just meet the reqstarget for the test when that period ends.
2773 * (Try to make up the entire shortage or overage in the next period.)
2774 * Beware that we might not need to make any reqs next period.
2776 reqs_needed_next_period = (req_target_this_test - Reqs_this_test)
2777 + req_target_per_period;
2779 if (reqs_needed_next_period <= 0) {
2780 /* if no reqs are needed, set the sleep time to the whole period */
2782 Target_sleep_mspc = Msec_per_period;
2784 /* decide how much time is available for each request */
2785 mspc = (float) (Msec_per_period) / (float) (reqs_needed_next_period);
2786 Target_sleep_mspc = (int) (mspc - work_mspc);
2789 /* Don't increase the target_sleep_mspc by much more than a factor of two,
2790 because doing so can lead to violent oscillations. */
2791 if (Target_sleep_mspc > 2*(old_target_sleep_mspc + 5)) {
2792 Target_sleep_mspc = 2*(old_target_sleep_mspc + 5);
2795 if (Target_sleep_mspc >= Msec_per_period) {
2796 Target_sleep_mspc = Msec_per_period;
2797 if (DEBUG_CHILD_XPOINT) {
2798 (void) fprintf(stderr,
2799 "Child %d: 0 call, rqnd %d mspc %3.2f wmspc %3.2f time %d slp %d reqs %d\n",
2800 Child_num, reqs_needed_next_period, mspc, work_mspc,
2801 msec_this_test, Sleep_msec_this_test, Reqs_this_test);
2802 (void) fflush(stderr);
2804 if (Measurement_in_progress) {
2805 (void) fprintf(stderr,
2806 "Child %d: 0 calls during measurement interval\n",Child_num);
2807 (void) fprintf(stderr,
2808 "Child %d: probably unstable, try more processes.\n",Child_num);
2809 (void) generic_kill(0, SIGINT);
2810 (void) fflush(stderr);
2814 if (Target_sleep_mspc <= 0) {
2815 Target_sleep_mspc = 0;
2816 if (DEBUG_CHILD_XPOINT) {
2817 (void) fprintf(stderr,
2818 "Child %d: 0 slp, rqnd %d mspc %3.2f wmspc %3.2f time %d slp %d reqs %d\n",
2819 Child_num, reqs_needed_next_period, mspc, work_mspc,
2820 msec_this_test, Sleep_msec_this_test, Reqs_this_test);
2821 (void) fflush(stderr);
2825 if (DEBUG_CHILD_XPOINT) {
2826 (void) fprintf(stderr, "Child %d\n%s", Child_num,
2827 " msec_prd calls_prd reqs_prd calls_tot req_tot mspc_req\n");
2828 (void) fprintf(stderr, "target: %8d %9d %8d %9d %8d %6.2f\n",
2830 call_target_per_period, req_target_per_period,
2831 call_target_this_test, req_target_this_test,
2832 1000.0 / (float) req_target_per_period);
2833 (void) fprintf(stderr, "actual: %8d %9d %8d %9d %8d ->%6.2f\n",
2835 Calls_this_period, Reqs_this_period,
2836 Calls_this_test, Reqs_this_test,
2838 (void) fprintf(stderr,
2839 " old_sleep_mspc %5d new_sleep_mspc %5d\n\n",
2840 old_target_sleep_mspc, Target_sleep_mspc);
2844 * check for too many failed RPC calls
2845 * and print a warning if there are too many.
2847 if (((Ops[TOTAL].results.bad_calls - last_bad_calls) > 100) ||
2848 ((Ops[TOTAL].results.good_calls > 300) &&
2849 ((Ops[TOTAL].results.bad_calls - last_bad_calls) >
2850 Ops[TOTAL].results.good_calls/50))) {
2851 (void) fprintf(stderr,
2852 "%s: too many failed RPC calls - %d good %d bad\n",
2853 sfs_Myname, Ops[TOTAL].results.good_calls,
2854 Ops[TOTAL].results.bad_calls);
2855 last_bad_calls = Ops[TOTAL].results.bad_calls;
2858 /* reset the period counters */
2859 Calls_this_period = 0;
2860 Reqs_this_period = 0;
2861 Sleep_msec_this_period = 0;
2862 Previous_chkpnt_msec = current_msec;
2864 } /* check_call_rate */