[lug] Airing the dirty code

Bear Giles bgiles at coyotesong.com
Fri Mar 15 02:55:36 MST 2002


Since I was incredibly bored, I did a quick implementation of a
RR simulation using a heap-based priority queue to manage the
test data and RR schedule.  This isn't an exact match for the
problem (which apparently provided existing code), but it's not
far off.

It took about 3 hours, and I would normally document it a bit
better but I too want to get to bed.... :-)

All values:

q  50, o  0, t 13873800, idle   11632, delay  110, trn   74273
q 100, o  0, t 13874100, idle   24282, delay  224, trn   74837
q 250, o  0, t 13875500, idle   61782, delay  584, trn   76375
q 500, o  0, t 13876500, idle  126782, delay 1227, trn   79341
q  50, o  5, t 13920000, idle   11237, delay  392, trn  231305
q 100, o  5, t 13895900, idle   22552, delay  411, trn  125671
q 250, o  5, t 13884000, idle   62132, delay  745, trn   94075
q 500, o  5, t 13882000, idle  121127, delay 1386, trn   87545
q  50, o 10, t 15338800, idle   10002, delay 2498, trn 1535829
q 100, o 10, t 13920100, idle   22532, delay  795, trn  233061
q 250, o 10, t 13892750, idle   61082, delay  944, trn  115062
q 500, o 10, t 13886000, idle  124362, delay 1571, trn   97248
q  50, o 15, t 17516550, idle    8862, delay 4595, trn 3292906
q 100, o 15, t 14457100, idle   20707, delay 2826, trn  817313
q 250, o 15, t 13902000, idle   56212, delay 1176, trn  141954
q 500, o 15, t 13890500, idle  123532, delay 1771, trn  107524
q  50, o 20, t 20421000, idle    7472, delay 6501, trn 5615708
q 100, o 20, t 15350700, idle   19642, delay 5015, trn 1541350
q 250, o 20, t 13911250, idle   55152, delay 1509, trn  178434
q 500, o 20, t 13895000, idle  120602, delay 1964, trn  118454
q  50, o 25, t 24490450, idle    6007, delay 8214, trn 8818324
q 100, o 25, t 16367800, idle   19307, delay 7119, trn 2363180
q 250, o 25, t 13921250, idle   57182, delay 2055, trn  239337
q 500, o 25, t 13899500, idle  118882, delay 2188, trn  131677

q = quanta
o = overhead
t = time stimulation ended

The time to compute this on my <800 MHz linux box:

real  0m57.046
user  0m55.730
sys   0m0.070s

Note: that's the total time for the script that ran *all* of the
test cases.

The program itself can be easily tested with some simple command
line options, e.g.,

(test = processes remaining in test queue, pending = number of 
active processes, pid = active process, look for RR behavior where
pending > 1)

$ ./sim1 -v -v -q 750 -o 50 -n 10

   39050: job 1 starts
   39050: test 9, pending 1, pid 1
   39800: test 9, pending 1, pid 1
   40550: test 9, pending 1, pid 1
   41300: test 9, pending 1, pid 1
   42050: test 9, pending 1, pid 1
   42800: test 9, pending 1, pid 1
   43550: test 9, pending 1, pid 1
   44300: test 9, pending 1, pid 1
   45050: test 9, pending 1, pid 1
   45800: test 9, pending 1, pid 1
   46550: test 9, pending 1, pid 1
   47300: test 9, pending 1, pid 1
   48050: test 9, pending 1, pid 1
   48800: test 9, pending 1, pid 1
   49550: test 9, pending 1, pid 1
   50300: test 9, pending 1, pid 1
   51050: test 9, pending 1, pid 1
   51800: test 9, pending 1, pid 1
   52550: test 9, pending 1, pid 1
   53300: test 9, pending 1, pid 1
   54050: test 9, pending 1, pid 1
   54800: test 9, pending 1, pid 1
   55550: test 9, pending 1, pid 1
   56300: test 9, pending 1, pid 1
   57050: test 9, pending 1, pid 1
   57800: test 9, pending 1, pid 1
   58550: test 9, pending 1, pid 1
   59300: test 9, pending 1, pid 1
   60620: job 1 finishes
   60050: test 9, pending 0, pid 1
   76050: job 2 starts
   76050: test 8, pending 1, pid 2
   76800: test 8, pending 1, pid 2
   77550: test 8, pending 1, pid 2
   78300: test 8, pending 1, pid 2
   79050: test 8, pending 1, pid 2
   79800: test 8, pending 1, pid 2
   80550: test 8, pending 1, pid 2
   81300: test 8, pending 1, pid 2
   82050: test 8, pending 1, pid 2
   82800: test 8, pending 1, pid 2
   83550: test 8, pending 1, pid 2
   84300: test 8, pending 1, pid 2
   85050: test 8, pending 1, pid 2
   85800: test 8, pending 1, pid 2
   86550: test 8, pending 1, pid 2
   87300: test 8, pending 1, pid 2
   88050: test 8, pending 1, pid 2
   88800: test 8, pending 1, pid 2
   89550: test 8, pending 1, pid 2
   90300: test 8, pending 1, pid 2
   91050: test 8, pending 1, pid 2
   91800: test 8, pending 1, pid 2
   92550: test 8, pending 1, pid 2
   93300: test 8, pending 1, pid 2
   94050: test 8, pending 1, pid 2
   94800: test 8, pending 1, pid 2
   95550: test 8, pending 1, pid 2
   96300: test 8, pending 1, pid 2
   97050: test 8, pending 1, pid 2
   97800: test 8, pending 1, pid 2
   98550: test 8, pending 1, pid 2
   99300: test 8, pending 1, pid 2
  100050: test 8, pending 1, pid 2
  100800: test 8, pending 1, pid 2
  101550: test 8, pending 1, pid 2
  102300: test 8, pending 1, pid 2
  103050: test 8, pending 1, pid 2
  103800: test 8, pending 1, pid 2
  104550: test 8, pending 1, pid 2
  105300: test 8, pending 1, pid 2
  106050: test 8, pending 1, pid 2
  106800: test 8, pending 1, pid 2
  107550: test 8, pending 1, pid 2
  108300: test 8, pending 1, pid 2
  109050: test 8, pending 1, pid 2
  109800: test 8, pending 1, pid 2
  110550: test 8, pending 1, pid 2
  111300: test 8, pending 1, pid 2
  112050: test 8, pending 1, pid 2
  112800: test 8, pending 1, pid 2
  113550: test 8, pending 1, pid 2
  114300: test 8, pending 1, pid 2
  115387: job 2 finishes
  115050: test 8, pending 0, pid 2
  117050: job 3 starts
  117050: test 7, pending 1, pid 3
  117800: test 7, pending 1, pid 3
  118550: test 7, pending 1, pid 3
  119300: test 7, pending 1, pid 3
  120050: test 7, pending 1, pid 3
  120800: test 7, pending 1, pid 3
  121550: test 7, pending 1, pid 3
  122300: test 7, pending 1, pid 3
  123050: test 7, pending 1, pid 3
  123800: test 7, pending 1, pid 3
  124550: test 7, pending 1, pid 3
  125300: test 7, pending 1, pid 3
  126050: test 7, pending 1, pid 3
  126800: test 7, pending 1, pid 3
  127550: test 7, pending 1, pid 3
  128300: test 7, pending 1, pid 3
  129050: test 7, pending 1, pid 3
  130000: job 3 finishes
  129800: test 7, pending 0, pid 3
  138050: job 4 starts
  138050: test 6, pending 1, pid 4
  138800: test 6, pending 1, pid 4
  139550: test 6, pending 1, pid 4
  140300: test 6, pending 1, pid 4
  141050: test 6, pending 1, pid 4
  141800: test 6, pending 1, pid 4
  142550: test 6, pending 1, pid 4
  143300: test 6, pending 1, pid 4
  144050: test 6, pending 1, pid 4
  144800: test 6, pending 1, pid 4
  145550: test 6, pending 1, pid 4
  146300: test 6, pending 1, pid 4
  147050: test 6, pending 1, pid 4
  147800: test 6, pending 1, pid 4
  148550: test 6, pending 1, pid 4
  149300: test 6, pending 1, pid 4
  150050: test 6, pending 1, pid 4
  150800: test 6, pending 1, pid 4
  151550: test 6, pending 1, pid 4
  152300: test 6, pending 1, pid 4
  153050: test 6, pending 1, pid 4
  153800: test 6, pending 1, pid 4
  154550: test 6, pending 1, pid 4
  155300: test 6, pending 1, pid 4
  156050: test 6, pending 1, pid 4
  156800: test 6, pending 1, pid 4
  157550: test 5, pending 2, pid 4
  158300: job 5 starts
  158300: test 5, pending 2, pid 5
  159050: test 5, pending 2, pid 4
  159800: test 5, pending 2, pid 5
  160550: test 5, pending 2, pid 4
  161300: test 5, pending 2, pid 5
  162050: test 5, pending 2, pid 4
  162800: test 5, pending 2, pid 5
  163550: test 5, pending 2, pid 4
  164300: test 5, pending 2, pid 5
  165050: test 5, pending 2, pid 4
  165800: test 5, pending 2, pid 5
  166550: test 5, pending 2, pid 4
  167300: test 5, pending 2, pid 5
  168050: test 5, pending 2, pid 4
  168800: test 5, pending 2, pid 5
  169550: test 5, pending 2, pid 4
  170300: test 5, pending 2, pid 5
  171050: test 5, pending 2, pid 4
  171800: test 5, pending 2, pid 5
  172550: test 5, pending 2, pid 4
  173300: test 5, pending 2, pid 5
  174050: test 5, pending 2, pid 4
  174800: test 5, pending 2, pid 5
  175550: test 5, pending 2, pid 4
  176300: test 5, pending 2, pid 5
  177050: test 5, pending 2, pid 4
  177800: test 5, pending 2, pid 5
  178550: test 5, pending 2, pid 4
  179300: test 5, pending 2, pid 5
  180050: test 5, pending 2, pid 4
  180800: test 5, pending 2, pid 5
  181550: test 5, pending 2, pid 4
  182300: test 5, pending 2, pid 5
  183050: test 4, pending 3, pid 4
  183800: test 4, pending 3, pid 5
  184550: job 6 starts
  184550: test 4, pending 3, pid 6
  185300: test 4, pending 3, pid 4
  186050: test 4, pending 3, pid 5
  186800: test 4, pending 3, pid 6
  187550: test 4, pending 3, pid 4
  188300: test 4, pending 3, pid 5
  189050: test 4, pending 3, pid 6
  189800: test 4, pending 3, pid 4
  190550: test 4, pending 3, pid 5
  191300: test 4, pending 3, pid 6
  192050: test 4, pending 3, pid 4
  192800: test 4, pending 3, pid 5
  193550: test 4, pending 3, pid 6
  194300: test 4, pending 3, pid 4
  195050: test 4, pending 3, pid 5
  195800: test 4, pending 3, pid 6
  196550: test 4, pending 3, pid 4
  197300: test 4, pending 3, pid 5
  198050: test 4, pending 3, pid 6
  199298: job 4 finishes
  198800: test 4, pending 2, pid 4
  199550: test 4, pending 2, pid 5
  200300: test 4, pending 2, pid 6
  201050: test 4, pending 2, pid 5
  201800: test 4, pending 2, pid 6
  202550: test 4, pending 2, pid 5
  203300: test 4, pending 2, pid 6
  204050: test 4, pending 2, pid 5
  204800: test 4, pending 2, pid 6
  205550: test 3, pending 3, pid 5
  206300: test 3, pending 3, pid 6
  207050: job 7 starts
  207050: test 3, pending 3, pid 7
  207800: test 3, pending 3, pid 5
  208550: test 3, pending 3, pid 6
  209300: test 3, pending 3, pid 7
  210050: test 3, pending 3, pid 5
  210800: test 3, pending 3, pid 6
  211550: test 3, pending 3, pid 7
  212300: test 3, pending 3, pid 5
  213050: test 3, pending 3, pid 6
  213800: test 3, pending 3, pid 7
  214550: test 3, pending 3, pid 5
  215300: test 3, pending 3, pid 6
  216050: test 3, pending 3, pid 7
  216800: test 3, pending 3, pid 5
  217550: test 3, pending 3, pid 6
  218300: test 3, pending 3, pid 7
  219050: test 3, pending 3, pid 5
  219800: test 3, pending 3, pid 6
  220550: test 3, pending 3, pid 7
  221300: test 3, pending 3, pid 5
  222050: test 3, pending 3, pid 6
  222800: test 3, pending 3, pid 7
  223550: test 3, pending 3, pid 5
  224300: test 3, pending 3, pid 6
  225050: test 3, pending 3, pid 7
  225800: test 3, pending 3, pid 5
  226550: test 3, pending 3, pid 6
  227300: test 3, pending 3, pid 7
  228163: job 5 finishes
  228050: test 3, pending 2, pid 5
  228800: test 3, pending 2, pid 6
  229550: test 3, pending 2, pid 7
  230300: test 3, pending 2, pid 6
  231050: test 3, pending 2, pid 7
  231800: test 3, pending 2, pid 6
  232550: test 3, pending 2, pid 7
  233300: test 3, pending 2, pid 6
  234050: test 3, pending 2, pid 7
  234800: test 3, pending 2, pid 6
  235550: test 3, pending 2, pid 7
  236300: test 3, pending 2, pid 6
  237050: test 3, pending 2, pid 7
  237800: test 3, pending 2, pid 6
  238550: test 3, pending 2, pid 7
  239300: test 3, pending 2, pid 6
  240050: test 3, pending 2, pid 7
  240800: test 3, pending 2, pid 6
  241550: test 3, pending 2, pid 7
  242300: test 3, pending 2, pid 6
  243050: test 3, pending 2, pid 7
  243800: test 3, pending 2, pid 6
  244550: test 3, pending 2, pid 7
  245300: test 3, pending 2, pid 6
  246050: test 3, pending 2, pid 7
  246800: test 3, pending 2, pid 6
  247550: test 3, pending 2, pid 7
  248300: test 2, pending 3, pid 6
  249050: test 2, pending 3, pid 7
  249800: job 8 starts
  249800: test 2, pending 3, pid 8
  250550: test 2, pending 3, pid 6
  251300: test 2, pending 3, pid 7
  252050: test 2, pending 3, pid 8
  252800: test 2, pending 3, pid 6
  253550: test 2, pending 3, pid 7
  254300: test 2, pending 3, pid 8
  255050: test 2, pending 3, pid 6
  255800: test 2, pending 3, pid 7
  256550: test 2, pending 3, pid 8
  257300: test 2, pending 3, pid 6
  258050: test 2, pending 3, pid 7
  258800: test 2, pending 3, pid 8
  259550: test 2, pending 3, pid 6
  260300: test 2, pending 3, pid 7
  261050: test 2, pending 3, pid 8
  261800: test 2, pending 3, pid 6
  262550: test 2, pending 3, pid 7
  263300: test 2, pending 3, pid 8
  264050: test 2, pending 3, pid 6
  264800: test 2, pending 3, pid 7
  265550: test 2, pending 3, pid 8
  266684: job 6 finishes
  266300: test 2, pending 2, pid 6
  267050: test 2, pending 2, pid 7
  267800: test 2, pending 2, pid 8
  268550: test 2, pending 2, pid 7
  269300: test 2, pending 2, pid 8
  270050: test 2, pending 2, pid 7
  270800: test 2, pending 2, pid 8
  271550: test 2, pending 2, pid 7
  272300: test 2, pending 2, pid 8
  273600: job 7 finishes
  273050: test 2, pending 1, pid 7
  273800: test 2, pending 1, pid 8
  274550: test 2, pending 1, pid 8
  275300: test 2, pending 1, pid 8
  276050: test 2, pending 1, pid 8
  276800: test 2, pending 1, pid 8
  277550: test 2, pending 1, pid 8
  278300: test 2, pending 1, pid 8
  279050: test 2, pending 1, pid 8
  279800: test 2, pending 1, pid 8
  280550: test 1, pending 2, pid 8
  281300: job 9 starts
  281300: test 1, pending 2, pid 9
  282050: test 1, pending 2, pid 8
  282800: test 1, pending 2, pid 9
  283550: test 1, pending 2, pid 8
  284300: test 1, pending 2, pid 9
  285050: test 1, pending 2, pid 8
  285800: test 1, pending 2, pid 9
  286550: test 1, pending 2, pid 8
  287300: test 1, pending 2, pid 9
  288050: test 1, pending 2, pid 8
  288800: test 1, pending 2, pid 9
  289550: test 1, pending 2, pid 8
  290300: test 1, pending 2, pid 9
  291050: test 1, pending 2, pid 8
  291800: test 1, pending 2, pid 9
  292550: test 1, pending 2, pid 8
  293300: test 1, pending 2, pid 9
  294050: test 0, pending 3, pid 8
  294800: test 0, pending 3, pid 9
  295550: job 10 starts
  295550: test 0, pending 3, pid 10
  296300: test 0, pending 3, pid 8
  297050: test 0, pending 3, pid 9
  297800: test 0, pending 3, pid 10
  298550: test 0, pending 3, pid 8
  299300: test 0, pending 3, pid 9
  300050: test 0, pending 3, pid 10
  300800: test 0, pending 3, pid 8
  301550: test 0, pending 3, pid 9
  302300: test 0, pending 3, pid 10
  303050: test 0, pending 3, pid 8
  303800: test 0, pending 3, pid 9
  304550: test 0, pending 3, pid 10
  305300: test 0, pending 3, pid 8
  306050: test 0, pending 3, pid 9
  306800: test 0, pending 3, pid 10
  307550: test 0, pending 3, pid 8
  308300: test 0, pending 3, pid 9
  309050: test 0, pending 3, pid 10
  309800: test 0, pending 3, pid 8
  310550: test 0, pending 3, pid 9
  311300: test 0, pending 3, pid 10
  312050: test 0, pending 3, pid 8
  312800: test 0, pending 3, pid 9
  313550: test 0, pending 3, pid 10
  314300: test 0, pending 3, pid 8
  315050: test 0, pending 3, pid 9
  315800: test 0, pending 3, pid 10
  316550: test 0, pending 3, pid 8
  317300: test 0, pending 3, pid 9
  318050: test 0, pending 3, pid 10
  318800: test 0, pending 3, pid 8
  319550: test 0, pending 3, pid 9
  320300: test 0, pending 3, pid 10
  321050: test 0, pending 3, pid 8
  321800: test 0, pending 3, pid 9
  322550: test 0, pending 3, pid 10
  323300: test 0, pending 3, pid 8
  324050: test 0, pending 3, pid 9
  324800: test 0, pending 3, pid 10
  325550: test 0, pending 3, pid 8
  326300: test 0, pending 3, pid 9
  327050: test 0, pending 3, pid 10
  327800: test 0, pending 3, pid 8
  328550: test 0, pending 3, pid 9
  329300: test 0, pending 3, pid 10
  330050: test 0, pending 3, pid 8
  330800: test 0, pending 3, pid 9
  331550: test 0, pending 3, pid 10
  332300: test 0, pending 3, pid 8
  333050: test 0, pending 3, pid 9
  333800: test 0, pending 3, pid 10
  334550: test 0, pending 3, pid 8
  335300: test 0, pending 3, pid 9
  336050: test 0, pending 3, pid 10
  336800: test 0, pending 3, pid 8
  337550: test 0, pending 3, pid 9
  338300: test 0, pending 3, pid 10
  339050: test 0, pending 3, pid 8
  339800: test 0, pending 3, pid 9
  340550: test 0, pending 3, pid 10
  341300: test 0, pending 3, pid 8
  342050: test 0, pending 3, pid 9
  342800: test 0, pending 3, pid 10
  343550: test 0, pending 3, pid 8
  344300: test 0, pending 3, pid 9
  345050: test 0, pending 3, pid 10
  345800: test 0, pending 3, pid 8
  346550: test 0, pending 3, pid 9
  347300: test 0, pending 3, pid 10
  348050: test 0, pending 3, pid 8
  348800: test 0, pending 3, pid 9
  349550: test 0, pending 3, pid 10
  350300: test 0, pending 3, pid 8
  351050: test 0, pending 3, pid 9
  351800: test 0, pending 3, pid 10
  352550: test 0, pending 3, pid 8
  353300: test 0, pending 3, pid 9
  354050: test 0, pending 3, pid 10
  354800: test 0, pending 3, pid 8
  355550: test 0, pending 3, pid 9
  356300: test 0, pending 3, pid 10
  357050: test 0, pending 3, pid 8
  357800: test 0, pending 3, pid 9
  358550: test 0, pending 3, pid 10
  359564: job 8 finishes
  359300: test 0, pending 2, pid 8
  360050: test 0, pending 2, pid 9
  360800: test 0, pending 2, pid 10
  361550: test 0, pending 2, pid 9
  362300: test 0, pending 2, pid 10
  363050: test 0, pending 2, pid 9
  363800: test 0, pending 2, pid 10
  364550: test 0, pending 2, pid 9
  365300: test 0, pending 2, pid 10
  366050: test 0, pending 2, pid 9
  366800: test 0, pending 2, pid 10
  367550: test 0, pending 2, pid 9
  368300: test 0, pending 2, pid 10
  369050: test 0, pending 2, pid 9
  369800: test 0, pending 2, pid 10
  370550: test 0, pending 2, pid 9
  371300: test 0, pending 2, pid 10
  372050: test 0, pending 2, pid 9
  372800: test 0, pending 2, pid 10
  374060: job 9 finishes
  373550: test 0, pending 1, pid 9
  374300: test 0, pending 1, pid 10
  375050: test 0, pending 1, pid 10
  375800: test 0, pending 1, pid 10
  376736: job 10 finishes
  376550: test 0, pending 0, pid 10

q 750, o 50, t   377250, idle    3388, delay  975, trn   63736

Source code 
------------------------------------------------------------
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <getopt.h>
#include <limits.h>
#include <string.h>
#include <errno.h>
#include <math.h>

// since this is a simple simulation, we'll just use manifest
// constants instead of bothering with a real database.

// maximum number of threads will process in simulation
#define MAX_THREADS		1000

// maximum events allowed at any time
#define MAX_EVENTS		2000

// IMPORTANT: all times are measured in milliseconds, not seconds.

struct Thread {
	long pid;		// arbitary process id, unique identifier
	long ready;  	// time that the job became ready
	long start;  	// time that the job actually started
	long finish;  	// time that the job ended
	long remaining;	// execution time remaining
};

struct Event {
	long time;
	struct Thread *t;
};

struct PQ {
	int (*f)(const struct Event *, const struct Event *);
	int size;
	struct Event events[MAX_EVENTS];
};

struct State {
	// priority queue of pending tasks
	struct PQ test;

	// round-robin scheduling queue
	struct PQ pending;

	// current time (in milliseconds)
	long time;

	// scheduling information
	long quanta;
	long overhead;

	// statistics
	long idle;

	// miscellaneous
	int verbose;
};

// global variables

// test database
struct Thread threads[MAX_THREADS];
int tcount = 0;

#define PARENT(i)	(i/2)
#define LEFT(i)		(2*i)
#define RIGHT(i)	(2*i+1)

/*
 *	Priority queues
 */
int
cmpEvent(const struct Event *e1, const struct Event *e2)
{
	if (e1->time < e2->time) return -1;
	if (e1->time == e2->time) return 0;
	return 1;
}

/*
 *	Initialize a priority queue
 */
void
pqInit (struct PQ *pq, int (*f)())
{
	int i;

	pq->size = 0;
	pq->f = f;
	for (i = 0; i < sizeof (pq->events) / sizeof (pq->events[0]); i++) {
		pq->events[i].time = 0;
		pq->events[i].t = NULL;
	}
}

/*
 *	Return number of elements remaining
 */
int
pqSize (struct PQ *pq)
{
	return (pq->size);
}

/*
 *	Insert item into priority queue.  CLR 7.5
 */
void
pqInsert (struct PQ *pq, struct Event *e)
{
	int i;

	assert (pq->size < MAX_EVENTS);

	i = ++pq->size;
	while (i > 1 && pq->f(&pq->events[PARENT(i)], e) > 0) {
		memcpy(&pq->events[i], &pq->events[PARENT(i)], sizeof (struct Event));
		i = PARENT(i);
	}
	memcpy(&pq->events[i], e, sizeof (struct Event));
}

/*
 *	Restore heap property
 */
void
pqHeapify (struct PQ *pq, int i)
{
	int l, r, x;
	struct Event e;

	l = LEFT(i);
	r = RIGHT(i);

	x = (l <= pq->size && pq->f(&pq->events[l], &pq->events[i]) < 0) ? l : i;
	if (r <= pq->size && pq->f(&pq->events[r], &pq->events[x]) < 0) {
		x = r;
	}

	if (x != i) {
		memcpy(&e, &pq->events[i], sizeof (struct Event));
		memcpy(&pq->events[i], &pq->events[x], sizeof (struct Event));
		memcpy(&pq->events[x], &e, sizeof (struct Event));
		pqHeapify(pq, x);
	}
}

/*
 *	Get next item out of priority queue.  CLR 7.5
 */
int
pqNext (struct PQ *pq, struct Event *e)
{
	assert (pq->size >= 1);
	if (pq->size <= 0) {
		return -1;
	}

	memcpy(e, &pq->events[1], sizeof (struct Event));
	memcpy(&pq->events[1], &pq->events[pq->size], sizeof (struct Event));
	pq->size--;

	pqHeapify(pq, 1);

	return 0;
}

/*
 *	Peek at next item in priority queue.
 */
int
pqPeek (struct PQ *pq, struct Event *e)
{
	assert (pq->size >= 1);
	if (pq->size <= 0) {
		return -1;
	}

	memcpy(e, &pq->events[1], sizeof (struct Event));

	return 0;
}

/*
 *	Verify the heap invariant
 */
int
pqCheck (struct PQ *pq)
{
	int i;

	if (pq->size == 1) {
		return 1;
	}

	for (i = 2; i <= pq->size; i++) {
		if (pq->f(&pq->events[PARENT(i)], &pq->events[i]) > 0) {
			return 0;
		}
	}

	return 1;
}

/**************************************************************/

/*
 *	Load test data into priority queue.
 */
int
loadTestData (struct PQ *test, const char *filename, int mcount)
{
	FILE *fp;
	char buffer[1024];
	struct Thread *t;
	struct Event e;
	long ready;
	float duration;
	int pid = 1;

	assert (filename != NULL);

	if (filename == NULL) {
		fprintf(stderr, "filename must be specified\n");
		return -1;
	}

	if (mcount < 0) {
		fprintf(stderr, "count must be a positive number\n");
		return -1;
	}

	if ((fp = fopen(filename, "r")) == NULL) {
		fprintf(stderr, "%s: %s\n", filename, strerror(errno));
		return -1;
	}

	tcount = 0;
	while (fgets(buffer, sizeof buffer, fp) != NULL && tcount < mcount) {
		t = &threads[tcount++];

		sscanf(buffer, "%ld %f\n", &ready, &duration);
		t->pid = pid++;
		t->ready = 1000 * ready;
		t->start = 0;
		t->finish = 0;
		t->remaining = (long) ceil(1000.0 * duration);

		e.time = t->ready;
		e.t = t;
		pqInsert(test, &e);
	}

	return 0;
}

/**************************************************************/

int
kernelInit (struct State *state)
{
	pqInit (&state->test, cmpEvent);
	pqInit (&state->pending, cmpEvent);

	state->time = 0;
	state->idle = 0;

	if (state->quanta < state->overhead) {
		fprintf(stderr, "overhead exceeds quanta!\n");
		return -1;
	}
	else if (state->quanta == state->overhead) {
		fprintf(stderr, 
			"overhead and quanta are identical!\n");
		return -1;
	}

	state->quanta -= state->overhead;
	return 0;
}

void
kernelRun (struct State *state)
{
	struct Event e;
	int vtime = 0;  // virtual time used to handle RR scheduling

	while (pqSize(&state->test) > 0 || pqSize(&state->pending) > 0) {

		assert(pqCheck(&state->test));
		assert(pqCheck(&state->pending));

		// if pending queue is empty, skip forward in time
		// until next event.
		if (pqSize(&state->pending) == 0) {
			pqNext(&state->test, &e);
			state->time = e.time;
			e.time = vtime++;
			pqInsert(&state->pending, &e);
			
		}

		// copy any newborn events into pending queue
		while (pqSize(&state->test) > 0) {
			pqPeek(&state->test, &e);
			if (e.time <= state->time) {
				pqNext(&state->test, &e);
				e.time = vtime++;
				pqInsert(&state->pending, &e);
			}
			else {
				break;
			}
		}

		state->time += state->overhead;

		// take next event out of the pending queue.
		pqNext(&state->pending, &e);

		assert (e.t->remaining > 0);

		// if first time through, record start time
		if (e.t->start == 0) {
			e.t->start = state->time;
			if (state->verbose >= 1) {
				printf ("%8ld: job %ld starts\n", e.t->start, e.t->pid);
			}
		}

		// if program will continue running, decrement time
		// and put back into queue
		if (e.t->remaining > state->quanta) {
			e.t->remaining -= state->quanta;

			e.time = vtime++;
			pqInsert(&state->pending, &e);
		}

		// otherwise time will terminate.  Lotsa work.
		else {
			e.t->finish = state->time + e.t->remaining;
			state->idle += state->quanta - e.t->remaining;
			e.t->remaining = 0;

			if (state->verbose >= 1) {
				printf ("%8ld: job %ld finishes\n", e.t->finish, e.t->pid);
			}
		}

		// print debugging information
		if (state->verbose >= 2) {
			printf("%8ld: test %d, pending %d, pid %ld\n",
				state->time, pqSize(&state->test), pqSize(&state->pending),
				e.t->pid);
		}

		state->time += state->quanta;
	}
}

void
kernelFinish (struct State *state)
{
}

void
showStats (struct State *state)
{
	int i;
	long long delay;
	long long turn;

	delay = 0;
	turn = 0;
	for (i = 0; i < tcount; i++) {
		assert (threads[i].ready != 0);
		assert (threads[i].start != 0);
		assert (threads[i].finish != 0);
		assert (threads[i].ready <= threads[i].start);
		assert (threads[i].start <= threads[i].finish);

		delay += threads[i].start - threads[i].ready;
		turn += threads[i].finish - threads[i].start;
	}

	printf("q %3ld, o %2ld, t %8ld, idle %7ld, delay %4lld, trn %7lld\n",
		state->quanta + state->overhead, state->overhead, state->time, 
		state->idle, delay / tcount, turn / tcount);
}

int
usage (const char *progname)
{
	fprintf(stderr,
		"usage: %s [-q #] [-o #] [-n #] [-v] [filename]\n", progname);
	fprintf(stderr, 
		"-q #: time quantum.  Recommended values: 50, 100, 250, 500\n");
	fprintf(stderr, 
		"-o #: overhead.  Recommended values: 0, 5, 10, 15, 20, 25\n");
	fprintf(stderr, 
		"-n #: count.  Max. number of threads to execute\n");
	fprintf(stderr, 
		"-v: verbose.  May be repeated for more messages\n");
	exit (0);
}

int 
main (int argc, char * const * argv)
{
	int ch;
	int help = 0;
	int mcount = MAX_THREADS;
	struct State state;
	const char *filename = "lab07Data.txt";

	state.quanta = 50;
	state.overhead = 0;
	state.verbose = 0;

	while ((ch = getopt(argc, argv, "q:o:n:v")) != EOF) {
		switch (ch) {
		case 'q':  state.quanta = atol(optarg); break;
		case 'o':  state.overhead = atol(optarg); break;
		case 'n':  mcount = atol(optarg); break;
		case 'v':  state.verbose++; break;
		default:   help++;
		}
	}

	if (help) {
		usage(argv[0]);
	}

	if (optind != argc) {
		filename = argv[optind];
	}

	if (kernelInit(&state) == -1) {
		exit(1);
	}

	if (loadTestData(&state.test, filename, mcount) == -1) {
		exit(1);
	}

	kernelRun(&state);
	kernelFinish(&state);

	showStats(&state);

	return 0;
}



More information about the LUG mailing list