1 ! 2! PROGRAM : STAT.BAS 3! VERSION : 1.0 4! DATE : 20-Dec-1981 5! AUTHOR : A. Frank Hattyar 10! Crocker National Bank & ! 155 Fifth Street & ! San Francisco, California 94103 & ! & ! This program is being provided to DECUS free of charge. Maintenance & ! of the program will be done by the author on a time-permitting basis. & ! If you have any bug fixes or enhancements to this program then please & ! send them to the author at the above address. & ! & 20 ! Modifications & 100 ! Description & ! & ! This program collects system statistics (ala DISPLAY) and & ! writes them out to a disk file at every dump interval (currently & ! every half hour). During every sample interval (2 secs) the & ! free page count, modified page count and open file count are examined & ! for maximum and minimum values, and subsequent average calculations. & ! This should provide a rough estimate of the values of these counts. & ! & ! Two macro subroutines are provided in STATS.MAR. They are the & ! following: & ! GETSTATS - Gets the modified and free page counts & ! and the open file count & ! & ! DUMPSTATS - Calculates differences between current & ! bucket values and values at last dump & ! & ! In addition, disk operation figures are also saved. There is enough & ! buffer space for up to 10 drives. & ! & ! The program automatically closes out the current dump file on a & ! Saturday, renames it, kicks off a batch job and reopens a new file & ! for the new week. The undocumented RTL routine LIB$EXECUTE_CLI is & ! used, however this will probably change in version 3.0 of VMS. & ! & 900 external integer function getstats, & dumpstats, & utl_day_of_week, & sys$getdev, & lib$execute_cli, & sys$hiber, & sys$schdwk, & sys$gettim, & sys$asctim & \ map (statbuf) a$ = 32%, aves%(8%), a1$ = 344%, dev%(9%), & t0%, t1%, b$ = 52% & \ map (statbuf) fill$ = 452%, s_time%(1%) & \ map (temp) fill$ = 28%, ops%, fill$ = 100% & \ map (temp) nulbuff$ = 132% & \ map (dummy) sleep_interval1%, sleep_interval2%, & base_time1%, base_time2%, wake_time1%, wake_time2%, & factor1%, factor2%, m1%, m2% & \ dim dev$(10%),devold%(10%) & ! & ! Set up our maps and arrays & ! & 1000 on error goto 19000 & \ dev$(0%) = "_DRA0:" & \ dev$(1%) = "_DRA1:" & \ dev$(2%) = "_DRB2:" & \ dev$(3%) = "_DRB3:" & \ dev$(4%) = "_DRB4:" & \ dev$(5%) = "_DRB5:" & \ devices% = 6% & \ collection_interval% = 2% & \ collection_dump% = 1800% ! 1800% & ! & ! Describe the names and number of disk drives. Also set up & ! the sampling rate and the interval for a dump to disk. & ! & 1005 open "s_raw:statistic.dat" as file #1%, sequential fixed, & recordsize 512%, & map statbuf & ! & ! Open the collection file & ! & 1007 get #1% & \ m1% = s_time%(0%) & \ m2% = s_time%(1%) & \ goto 1007 & ! & ! Keep going until we get the last entry & ! & 1008 last_sample_day$ = string$(3%,0%) & \ if (m1% = 0%) and (m2% = 0%) then goto 1010 & else & sts% = utl_day_of_week(last_sample_day$,m1%,m2%) & ! & ! Figure out day of week of last entry so that we'll know & ! when we need to close the current collection file and & ! create a new one. & ! & 1010 open "s_raw:statistic.dat" as file #1%, sequential fixed, & recordsize 512%, & map statbuf, & access append & \ open "nl:" as file #2%, map temp, recordsize 132% & \ open "nl:" as file #3%, map dummy, recordsize 50% & \ free.min%, modified.min%, open.min% = 2^31 & \ free.max%, modified.max%, open.max%, & free.ave%, modified.ave%, open.ave%, samples% = 0% & \ free.pages%, modified.pages%, open.files% = 7% & \ switch.on% = 0% & \ factor1% = 10000000% ! 1 second & \ factor2%, m2% = 0% & \ m1% = collection_interval% & \ call multquad(m1%, factor1%, sleep_interval1%) & \ a = time(0) & \ a,a% = a/1800 & \ s% = ((a%+1%)*1800 - (a*1800)) & \ sleep s% & \ sts% = sys$gettim(base_time1%) & \ goto 1030 & ! & ! Initialize all the variables and sleep until the next & ! half hour & ! & 1020 counter% = counter% - collection_interval% & \ goto 1030 if counter% <= 0% & \ i% = getstats(free.pages% by ref, modified.pages% by ref, & open.files% by ref) & \ free.max% = free.pages% if free.pages% > free.max% & \ free.min% = free.pages% if free.pages% < free.min% & \ free.ave% = free.ave% + free.pages% & \ modified.max% = modified.pages% if modified.pages% > modified.max% & \ modified.min% = modified.pages% if modified.pages% < modified.min% & \ modified.ave% = modified.ave% + modified.pages% & \ open.max% = open.files% if open.files% > open.max% & \ open.min% = open.files% if open.files% < open.min% & \ open.ave% = open.ave% + open.files% & \ samples% = samples% + 1% & \ goto 1040% & ! & ! Get the stats that are sampled periodically, then & ! figure out the maximums, minimums and accumulate for & ! the averages. & ! & 1030 call dumpstats(a$) & \ i% = getstats(free.pages% by ref, modified.pages% by ref, & open.files% by ref) & \ free.max% = free.pages% if free.pages% > free.max% & \ free.min% = free.pages% if free.pages% < free.min% & \ free.ave% = free.ave% + free.pages% & \ modified.max% = modified.pages% if modified.pages% > modified.max% & \ modified.min% = modified.pages% if modified.pages% < modified.min% & \ modified.ave% = modified.ave% + modified.pages% & \ open.max% = open.files% if open.files% > open.max% & \ open.min% = open.files% if open.files% < open.min% & \ open.ave% = open.ave% + open.files% & \ samples% = samples% + 1% & \ aves%(0%) = free.min% & \ aves%(1%) = free.max% & \ aves%(2%) = free.ave%/samples% & \ aves%(3%) = modified.min% & \ aves%(4%) = modified.max% & \ aves%(5%) = modified.ave%/samples% & \ aves%(7%) = open.max% & \ aves%(6%) = open.min% & \ aves%(8%) = open.ave%/samples% & \ free.min%, modified.min%, open.min% = 2^31 & \ free.max%, modified.max%, open.max%, & free.ave%, modified.ave%, open.ave%, samples% = 0% & \ gosub 1100 & \ put #1% if switch.on% & \ switch.on% = -1% & \ counter% = collection_dump% & ! & ! Dump all the stats to the buffer, take care of all our & ! mins, maxes and averages, chase after the disk stats and & ! write out to the file. & ! & 1040 call addquad(base_time1%, sleep_interval1%, wake_time1%) & \ base_time1% = wake_time1% & \ base_time2% = wake_time2% & \ sts% = sys$schdwk(,,wake_time1%,) & \ sts% = sys$hiber & \ goto 1020 & ! & ! Die for a little while & ! & 1100 for i% = 0% to devices% - 1% & \ arg$ = dev$(i%) & \ nulbuff$ = "" & \ sts% = sys$getdev(arg$,,,temp%,nulbuff$) & \ count% = ops% & \ devnew% = ops% & \ dev%(i%) = devnew% - devold%(i%) & \ devold%(i%) = devnew% & \ next i% & ! & ! get device access counts for all of our disks & ! & 1110 sts% = sys$gettim(t0%) & \ current_day$ = string$(3%,0%) & \ sts% = utl_day_of_week(current_day$,t0%,t1%) & \ if current_day$ = "SAT" & then & goto 1120 if last_sample_day$ = "SAT" & \ goto 1120 if edit$(last_sample_day$,-1%) = "" & \ close #1% & \ command_1$ = "$ RENAME S_RAW:STATISTIC.DAT S_RAW:STATS.DAT" & \ command_2$ = "$ SUBMIT/QUE=SYS$FAST S_HOME:STATWEEK" & \ command_3$ = "$ LOGO" & \ sts% = lib$execute_cli(command_1$, command_2$, command_3$) & \ open "s_raw:statistic.dat" for output as file #1%, & sequential fixed, & recordsize 512%, & map statbuf & ! & ! If we are at the end of the sampling period then we & ! close and rename the file, start the processing batch & ! job, open a brand new file and continue on. & ! & & 1120 last_sample_day$ = current_day$ & \ return & ! & ! Remember the sample day and go on & ! & 19000 if err = 11% and erl = 1007% then resume 1008% & else print err,erl,ert$(err) & \ stop & ! & ! Official certified error handler & ! & 32760 close #1%, #2%, #3% & ! & ! Wrap it up & ! & 32767 end