"Fossies" - the Fresh Open Source Software Archive

Member "install-tl-20200916/tlpkg/tlperl/lib/Tie/File.pm" (18 Apr 2017, 77444 Bytes) of package /windows/misc/install-tl.zip:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) Perl source code syntax highlighting (style: standard) with prefixed line numbers and code folding option. Alternatively you can here view or download the uninterpreted source code file.

    1 
    2 package Tie::File;
    3 require 5.005;
    4 use Carp ':DEFAULT', 'confess';
    5 use POSIX 'SEEK_SET';
    6 use Fcntl 'O_CREAT', 'O_RDWR', 'LOCK_EX', 'LOCK_SH', 'O_WRONLY', 'O_RDONLY';
    7 sub O_ACCMODE () { O_RDONLY | O_RDWR | O_WRONLY }
    8 
    9 
   10 $VERSION = "1.02";
   11 my $DEFAULT_MEMORY_SIZE = 1<<21;    # 2 megabytes
   12 my $DEFAULT_AUTODEFER_THRESHHOLD = 3; # 3 records
   13 my $DEFAULT_AUTODEFER_FILELEN_THRESHHOLD = 65536; # 16 disk blocksful
   14 
   15 my %good_opt = map {$_ => 1, "-$_" => 1}
   16                  qw(memory dw_size mode recsep discipline 
   17                     autodefer autochomp autodefer_threshhold concurrent);
   18 
   19 sub TIEARRAY {
   20   if (@_ % 2 != 0) {
   21     croak "usage: tie \@array, $_[0], filename, [option => value]...";
   22   }
   23   my ($pack, $file, %opts) = @_;
   24 
   25   # transform '-foo' keys into 'foo' keys
   26   for my $key (keys %opts) {
   27     unless ($good_opt{$key}) {
   28       croak("$pack: Unrecognized option '$key'\n");
   29     }
   30     my $okey = $key;
   31     if ($key =~ s/^-+//) {
   32       $opts{$key} = delete $opts{$okey};
   33     }
   34   }
   35 
   36   if ($opts{concurrent}) {
   37     croak("$pack: concurrent access not supported yet\n");
   38   }
   39 
   40   unless (defined $opts{memory}) {
   41     # default is the larger of the default cache size and the 
   42     # deferred-write buffer size (if specified)
   43     $opts{memory} = $DEFAULT_MEMORY_SIZE;
   44     $opts{memory} = $opts{dw_size}
   45       if defined $opts{dw_size} && $opts{dw_size} > $DEFAULT_MEMORY_SIZE;
   46     # Dora Winifred Read
   47   }
   48   $opts{dw_size} = $opts{memory} unless defined $opts{dw_size};
   49   if ($opts{dw_size} > $opts{memory}) {
   50       croak("$pack: dw_size may not be larger than total memory allocation\n");
   51   }
   52   # are we in deferred-write mode?
   53   $opts{defer} = 0 unless defined $opts{defer};
   54   $opts{deferred} = {};         # no records are presently deferred
   55   $opts{deferred_s} = 0;        # count of total bytes in ->{deferred}
   56   $opts{deferred_max} = -1;     # empty
   57 
   58   # What's a good way to arrange that this class can be overridden?
   59   $opts{cache} = Tie::File::Cache->new($opts{memory});
   60 
   61   # autodeferment is enabled by default
   62   $opts{autodefer} = 1 unless defined $opts{autodefer};
   63   $opts{autodeferring} = 0;     # but is not initially active
   64   $opts{ad_history} = [];
   65   $opts{autodefer_threshhold} = $DEFAULT_AUTODEFER_THRESHHOLD
   66     unless defined $opts{autodefer_threshhold};
   67   $opts{autodefer_filelen_threshhold} = $DEFAULT_AUTODEFER_FILELEN_THRESHHOLD
   68     unless defined $opts{autodefer_filelen_threshhold};
   69 
   70   $opts{offsets} = [0];
   71   $opts{filename} = $file;
   72   unless (defined $opts{recsep}) { 
   73     $opts{recsep} = _default_recsep();
   74   }
   75   $opts{recseplen} = length($opts{recsep});
   76   if ($opts{recseplen} == 0) {
   77     croak "Empty record separator not supported by $pack";
   78   }
   79 
   80   $opts{autochomp} = 1 unless defined $opts{autochomp};
   81 
   82   $opts{mode} = O_CREAT|O_RDWR unless defined $opts{mode};
   83   $opts{rdonly} = (($opts{mode} & O_ACCMODE) == O_RDONLY);
   84   $opts{sawlastrec} = undef;
   85 
   86   my $fh;
   87 
   88   if (UNIVERSAL::isa($file, 'GLOB')) {
   89     # We use 1 here on the theory that some systems 
   90     # may not indicate failure if we use 0.
   91     # MSWin32 does not indicate failure with 0, but I don't know if
   92     # it will indicate failure with 1 or not.
   93     unless (seek $file, 1, SEEK_SET) {
   94       croak "$pack: your filehandle does not appear to be seekable";
   95     }
   96     seek $file, 0, SEEK_SET;    # put it back
   97     $fh = $file;                # setting binmode is the user's problem
   98   } elsif (ref $file) {
   99     croak "usage: tie \@array, $pack, filename, [option => value]...";
  100   } else {
  101     # $fh = \do { local *FH };  # XXX this is buggy
  102     if ($] < 5.006) {
  103     # perl 5.005 and earlier don't autovivify filehandles
  104     require Symbol;
  105     $fh = Symbol::gensym();
  106     }
  107     sysopen $fh, $file, $opts{mode}, 0666 or return;
  108     binmode $fh;
  109     ++$opts{ourfh};
  110   }
  111   { my $ofh = select $fh; $| = 1; select $ofh } # autoflush on write
  112   if (defined $opts{discipline} && $] >= 5.006) {
  113     # This avoids a compile-time warning under 5.005
  114     eval 'binmode($fh, $opts{discipline})';
  115     croak $@ if $@ =~ /unknown discipline/i;
  116     die if $@;
  117   }
  118   $opts{fh} = $fh;
  119 
  120   bless \%opts => $pack;
  121 }
  122 
  123 sub FETCH {
  124   my ($self, $n) = @_;
  125   my $rec;
  126 
  127   # check the defer buffer
  128   $rec = $self->{deferred}{$n} if exists $self->{deferred}{$n};
  129   $rec = $self->_fetch($n) unless defined $rec;
  130 
  131   # inlined _chomp1
  132   substr($rec, - $self->{recseplen}) = ""
  133     if defined $rec && $self->{autochomp};
  134   $rec;
  135 }
  136 
  137 # Chomp many records in-place; return nothing useful
  138 sub _chomp {
  139   my $self = shift;
  140   return unless $self->{autochomp};
  141   if ($self->{autochomp}) {
  142     for (@_) {
  143       next unless defined;
  144       substr($_, - $self->{recseplen}) = "";
  145     }
  146   }
  147 }
  148 
  149 # Chomp one record in-place; return modified record
  150 sub _chomp1 {
  151   my ($self, $rec) = @_;
  152   return $rec unless $self->{autochomp};
  153   return unless defined $rec;
  154   substr($rec, - $self->{recseplen}) = "";
  155   $rec;
  156 }
  157 
  158 sub _fetch {
  159   my ($self, $n) = @_;
  160 
  161   # check the record cache
  162   { my $cached = $self->{cache}->lookup($n);
  163     return $cached if defined $cached;
  164   }
  165 
  166   if ($#{$self->{offsets}} < $n) {
  167     return if $self->{eof};  # request for record beyond end of file
  168     my $o = $self->_fill_offsets_to($n);
  169     # If it's still undefined, there is no such record, so return 'undef'
  170     return unless defined $o;
  171   }
  172 
  173   my $fh = $self->{FH};
  174   $self->_seek($n);             # we can do this now that offsets is populated
  175   my $rec = $self->_read_record;
  176 
  177 # If we happen to have just read the first record, check to see if
  178 # the length of the record matches what 'tell' says.  If not, Tie::File
  179 # won't work, and should drop dead.
  180 #
  181 #  if ($n == 0 && defined($rec) && tell($self->{fh}) != length($rec)) {
  182 #    if (defined $self->{discipline}) {
  183 #      croak "I/O discipline $self->{discipline} not supported";
  184 #    } else {
  185 #      croak "File encoding not supported";
  186 #    }
  187 #  }
  188 
  189   $self->{cache}->insert($n, $rec) if defined $rec && not $self->{flushing};
  190   $rec;
  191 }
  192 
  193 sub STORE {
  194   my ($self, $n, $rec) = @_;
  195   die "STORE called from _check_integrity!" if $DIAGNOSTIC;
  196 
  197   $self->_fixrecs($rec);
  198 
  199   if ($self->{autodefer}) {
  200     $self->_annotate_ad_history($n);
  201   }
  202 
  203   return $self->_store_deferred($n, $rec) if $self->_is_deferring;
  204 
  205 
  206   # We need this to decide whether the new record will fit
  207   # It incidentally populates the offsets table 
  208   # Note we have to do this before we alter the cache
  209   # 20020324 Wait, but this DOES alter the cache.  TODO BUG?
  210   my $oldrec = $self->_fetch($n);
  211 
  212   if (not defined $oldrec) {
  213     # We're storing a record beyond the end of the file
  214     $self->_extend_file_to($n+1);
  215     $oldrec = $self->{recsep};
  216   }
  217 #  return if $oldrec eq $rec;    # don't bother
  218   my $len_diff = length($rec) - length($oldrec);
  219 
  220   # length($oldrec) here is not consistent with text mode  TODO XXX BUG
  221   $self->_mtwrite($rec, $self->{offsets}[$n], length($oldrec));
  222   $self->_oadjust([$n, 1, $rec]);
  223   $self->{cache}->update($n, $rec);
  224 }
  225 
  226 sub _store_deferred {
  227   my ($self, $n, $rec) = @_;
  228   $self->{cache}->remove($n);
  229   my $old_deferred = $self->{deferred}{$n};
  230 
  231   if (defined $self->{deferred_max} && $n > $self->{deferred_max}) {
  232     $self->{deferred_max} = $n;
  233   }
  234   $self->{deferred}{$n} = $rec;
  235 
  236   my $len_diff = length($rec);
  237   $len_diff -= length($old_deferred) if defined $old_deferred;
  238   $self->{deferred_s} += $len_diff;
  239   $self->{cache}->adj_limit(-$len_diff);
  240   if ($self->{deferred_s} > $self->{dw_size}) {
  241     $self->_flush;
  242   } elsif ($self->_cache_too_full) {
  243     $self->_cache_flush;
  244   }
  245 }
  246 
  247 # Remove a single record from the deferred-write buffer without writing it
  248 # The record need not be present
  249 sub _delete_deferred {
  250   my ($self, $n) = @_;
  251   my $rec = delete $self->{deferred}{$n};
  252   return unless defined $rec;
  253 
  254   if (defined $self->{deferred_max} 
  255       && $n == $self->{deferred_max}) {
  256     undef $self->{deferred_max};
  257   }
  258 
  259   $self->{deferred_s} -= length $rec;
  260   $self->{cache}->adj_limit(length $rec);
  261 }
  262 
  263 sub FETCHSIZE {
  264   my $self = shift;
  265   my $n = $self->{eof} ? $#{$self->{offsets}} : $self->_fill_offsets;
  266 
  267   my $top_deferred = $self->_defer_max;
  268   $n = $top_deferred+1 if defined $top_deferred && $n < $top_deferred+1;
  269   $n;
  270 }
  271 
  272 sub STORESIZE {
  273   my ($self, $len) = @_;
  274 
  275   if ($self->{autodefer}) {
  276     $self->_annotate_ad_history('STORESIZE');
  277   }
  278 
  279   my $olen = $self->FETCHSIZE;
  280   return if $len == $olen;      # Woo-hoo!
  281 
  282   # file gets longer
  283   if ($len > $olen) {
  284     if ($self->_is_deferring) {
  285       for ($olen .. $len-1) {
  286         $self->_store_deferred($_, $self->{recsep});
  287       }
  288     } else {
  289       $self->_extend_file_to($len);
  290     }
  291     return;
  292   }
  293 
  294   # file gets shorter
  295   if ($self->_is_deferring) {
  296     # TODO maybe replace this with map-plus-assignment?
  297     for (grep $_ >= $len, keys %{$self->{deferred}}) {
  298       $self->_delete_deferred($_);
  299     }
  300     $self->{deferred_max} = $len-1;
  301   }
  302 
  303   $self->_seek($len);
  304   $self->_chop_file;
  305   $#{$self->{offsets}} = $len;
  306 #  $self->{offsets}[0] = 0;      # in case we just chopped this
  307 
  308   $self->{cache}->remove(grep $_ >= $len, $self->{cache}->ckeys);
  309 }
  310 
  311 ### OPTIMIZE ME
  312 ### It should not be necessary to do FETCHSIZE
  313 ### Just seek to the end of the file.
  314 sub PUSH {
  315   my $self = shift;
  316   $self->SPLICE($self->FETCHSIZE, scalar(@_), @_);
  317 
  318   # No need to return:
  319   #  $self->FETCHSIZE;  # because av.c takes care of this for me
  320 }
  321 
  322 sub POP {
  323   my $self = shift;
  324   my $size = $self->FETCHSIZE;
  325   return if $size == 0;
  326 #  print STDERR "# POPPITY POP POP POP\n";
  327   scalar $self->SPLICE($size-1, 1);
  328 }
  329 
  330 sub SHIFT {
  331   my $self = shift;
  332   scalar $self->SPLICE(0, 1);
  333 }
  334 
  335 sub UNSHIFT {
  336   my $self = shift;
  337   $self->SPLICE(0, 0, @_);
  338   # $self->FETCHSIZE; # av.c takes care of this for me
  339 }
  340 
  341 sub CLEAR {
  342   my $self = shift;
  343 
  344   if ($self->{autodefer}) {
  345     $self->_annotate_ad_history('CLEAR');
  346   }
  347 
  348   $self->_seekb(0);
  349   $self->_chop_file;
  350     $self->{cache}->set_limit($self->{memory});
  351     $self->{cache}->empty;
  352   @{$self->{offsets}} = (0);
  353   %{$self->{deferred}}= ();
  354     $self->{deferred_s} = 0;
  355     $self->{deferred_max} = -1;
  356 }
  357 
  358 sub EXTEND {
  359   my ($self, $n) = @_;
  360 
  361   # No need to pre-extend anything in this case
  362   return if $self->_is_deferring;
  363 
  364   $self->_fill_offsets_to($n);
  365   $self->_extend_file_to($n);
  366 }
  367 
  368 sub DELETE {
  369   my ($self, $n) = @_;
  370 
  371   if ($self->{autodefer}) {
  372     $self->_annotate_ad_history('DELETE');
  373   }
  374 
  375   my $lastrec = $self->FETCHSIZE-1;
  376   my $rec = $self->FETCH($n);
  377   $self->_delete_deferred($n) if $self->_is_deferring;
  378   if ($n == $lastrec) {
  379     $self->_seek($n);
  380     $self->_chop_file;
  381     $#{$self->{offsets}}--;
  382     $self->{cache}->remove($n);
  383     # perhaps in this case I should also remove trailing null records?
  384     # 20020316
  385     # Note that delete @a[-3..-1] deletes the records in the wrong order,
  386     # so we only chop the very last one out of the file.  We could repair this
  387     # by tracking deleted records inside the object.
  388   } elsif ($n < $lastrec) {
  389     $self->STORE($n, "");
  390   }
  391   $rec;
  392 }
  393 
  394 sub EXISTS {
  395   my ($self, $n) = @_;
  396   return 1 if exists $self->{deferred}{$n};
  397   $n < $self->FETCHSIZE;
  398 }
  399 
  400 sub SPLICE {
  401   my $self = shift;
  402 
  403   if ($self->{autodefer}) {
  404     $self->_annotate_ad_history('SPLICE');
  405   }
  406 
  407   $self->_flush if $self->_is_deferring; # move this up?
  408   if (wantarray) {
  409     $self->_chomp(my @a = $self->_splice(@_));
  410     @a;
  411   } else {
  412     $self->_chomp1(scalar $self->_splice(@_));
  413   }
  414 }
  415 
  416 sub DESTROY {
  417   my $self = shift;
  418   $self->flush if $self->_is_deferring;
  419   $self->{cache}->delink if defined $self->{cache}; # break circular link
  420   if ($self->{fh} and $self->{ourfh}) {
  421       delete $self->{ourfh};
  422       close delete $self->{fh};
  423   }
  424 }
  425 
  426 sub _splice {
  427   my ($self, $pos, $nrecs, @data) = @_;
  428   my @result;
  429 
  430   $pos = 0 unless defined $pos;
  431 
  432   # Deal with negative and other out-of-range positions
  433   # Also set default for $nrecs 
  434   {
  435     my $oldsize = $self->FETCHSIZE;
  436     $nrecs = $oldsize unless defined $nrecs;
  437     my $oldpos = $pos;
  438 
  439     if ($pos < 0) {
  440       $pos += $oldsize;
  441       if ($pos < 0) {
  442         croak "Modification of non-creatable array value attempted, " .
  443               "subscript $oldpos";
  444       }
  445     }
  446 
  447     if ($pos > $oldsize) {
  448       return unless @data;
  449       $pos = $oldsize;          # This is what perl does for normal arrays
  450     }
  451 
  452     # The manual is very unclear here
  453     if ($nrecs < 0) {
  454       $nrecs = $oldsize - $pos + $nrecs;
  455       $nrecs = 0 if $nrecs < 0;
  456     }
  457 
  458     # nrecs is too big---it really means "until the end"
  459     # 20030507
  460     if ($nrecs + $pos > $oldsize) {
  461       $nrecs = $oldsize - $pos;
  462     }
  463   }
  464 
  465   $self->_fixrecs(@data);
  466   my $data = join '', @data;
  467   my $datalen = length $data;
  468   my $oldlen = 0;
  469 
  470   # compute length of data being removed
  471   for ($pos .. $pos+$nrecs-1) {
  472     last unless defined $self->_fill_offsets_to($_);
  473     my $rec = $self->_fetch($_);
  474     last unless defined $rec;
  475     push @result, $rec;
  476 
  477     # Why don't we just use length($rec) here?
  478     # Because that record might have come from the cache.  _splice
  479     # might have been called to flush out the deferred-write records,
  480     # and in this case length($rec) is the length of the record to be
  481     # *written*, not the length of the actual record in the file.  But
  482     # the offsets are still true. 20020322
  483     $oldlen += $self->{offsets}[$_+1] - $self->{offsets}[$_]
  484       if defined $self->{offsets}[$_+1];
  485   }
  486   $self->_fill_offsets_to($pos+$nrecs);
  487 
  488   # Modify the file
  489   $self->_mtwrite($data, $self->{offsets}[$pos], $oldlen);
  490   # Adjust the offsets table
  491   $self->_oadjust([$pos, $nrecs, @data]);
  492 
  493   { # Take this read cache stuff out into a separate function
  494     # You made a half-attempt to put it into _oadjust.  
  495     # Finish something like that up eventually.
  496     # STORE also needs to do something similarish
  497 
  498     # update the read cache, part 1
  499     # modified records
  500     for ($pos .. $pos+$nrecs-1) {
  501       my $new = $data[$_-$pos];
  502       if (defined $new) {
  503         $self->{cache}->update($_, $new);
  504       } else {
  505         $self->{cache}->remove($_);
  506       }
  507     }
  508     
  509     # update the read cache, part 2
  510     # moved records - records past the site of the change
  511     # need to be renumbered
  512     # Maybe merge this with the previous block?
  513     {
  514       my @oldkeys = grep $_ >= $pos + $nrecs, $self->{cache}->ckeys;
  515       my @newkeys = map $_-$nrecs+@data, @oldkeys;
  516       $self->{cache}->rekey(\@oldkeys, \@newkeys);
  517     }
  518 
  519     # Now there might be too much data in the cache, if we spliced out
  520     # some short records and spliced in some long ones.  If so, flush
  521     # the cache.
  522     $self->_cache_flush;
  523   }
  524 
  525   # Yes, the return value of 'splice' *is* actually this complicated
  526   wantarray ? @result : @result ? $result[-1] : undef;
  527 }
  528 
  529 
  530 # write data into the file
  531 # $data is the data to be written.
  532 # it should be written at position $pos, and should overwrite
  533 # exactly $len of the following bytes.  
  534 # Note that if length($data) > $len, the subsequent bytes will have to 
  535 # be moved up, and if length($data) < $len, they will have to
  536 # be moved down
  537 sub _twrite {
  538   my ($self, $data, $pos, $len) = @_;
  539 
  540   unless (defined $pos) {
  541     die "\$pos was undefined in _twrite";
  542   }
  543 
  544   my $len_diff = length($data) - $len;
  545 
  546   if ($len_diff == 0) {          # Woo-hoo!
  547     my $fh = $self->{fh};
  548     $self->_seekb($pos);
  549     $self->_write_record($data);
  550     return;                     # well, that was easy.
  551   }
  552 
  553   # the two records are of different lengths
  554   # our strategy here: rewrite the tail of the file,
  555   # reading ahead one buffer at a time
  556   # $bufsize is required to be at least as large as the data we're overwriting
  557   my $bufsize = _bufsize($len_diff);
  558   my ($writepos, $readpos) = ($pos, $pos+$len);
  559   my $next_block;
  560   my $more_data;
  561 
  562   # Seems like there ought to be a way to avoid the repeated code
  563   # and the special case here.  The read(1) is also a little weird.
  564   # Think about this.
  565   do {
  566     $self->_seekb($readpos);
  567     my $br = read $self->{fh}, $next_block, $bufsize;
  568     $more_data = read $self->{fh}, my($dummy), 1;
  569     $self->_seekb($writepos);
  570     $self->_write_record($data);
  571     $readpos += $br;
  572     $writepos += length $data;
  573     $data = $next_block;
  574   } while $more_data;
  575   $self->_seekb($writepos);
  576   $self->_write_record($next_block);
  577 
  578   # There might be leftover data at the end of the file
  579   $self->_chop_file if $len_diff < 0;
  580 }
  581 
  582 # _iwrite(D, S, E)
  583 # Insert text D at position S.
  584 # Let C = E-S-|D|.  If C < 0; die.  
  585 # Data in [S,S+C) is copied to [S+D,S+D+C) = [S+D,E).
  586 # Data in [S+C = E-D, E) is returned.  Data in [E, oo) is untouched.
  587 #
  588 # In a later version, don't read the entire intervening area into
  589 # memory at once; do the copying block by block.
  590 sub _iwrite {
  591   my $self = shift;
  592   my ($D, $s, $e) = @_;
  593   my $d = length $D;
  594   my $c = $e-$s-$d;
  595   local *FH = $self->{fh};
  596   confess "Not enough space to insert $d bytes between $s and $e"
  597     if $c < 0;
  598   confess "[$s,$e) is an invalid insertion range" if $e < $s;
  599 
  600   $self->_seekb($s);
  601   read FH, my $buf, $e-$s;
  602 
  603   $D .= substr($buf, 0, $c, "");
  604 
  605   $self->_seekb($s);
  606   $self->_write_record($D);
  607 
  608   return $buf;
  609 }
  610 
  611 # Like _twrite, but the data-pos-len triple may be repeated; you may
  612 # write several chunks.  All the writing will be done in
  613 # one pass.   Chunks SHALL be in ascending order and SHALL NOT overlap.
  614 sub _mtwrite {
  615   my $self = shift;
  616   my $unwritten = "";
  617   my $delta = 0;
  618 
  619   @_ % 3 == 0 
  620     or die "Arguments to _mtwrite did not come in groups of three";
  621 
  622   while (@_) {
  623     my ($data, $pos, $len) = splice @_, 0, 3;
  624     my $end = $pos + $len;  # The OLD end of the segment to be replaced
  625     $data = $unwritten . $data;
  626     $delta -= length($unwritten);
  627     $unwritten  = "";
  628     $pos += $delta;             # This is where the data goes now
  629     my $dlen = length $data;
  630     $self->_seekb($pos);
  631     if ($len >= $dlen) {        # the data will fit
  632       $self->_write_record($data);
  633       $delta += ($dlen - $len); # everything following moves down by this much
  634       $data = ""; # All the data in the buffer has been written
  635     } else {                    # won't fit
  636       my $writable = substr($data, 0, $len - $delta, "");
  637       $self->_write_record($writable);
  638       $delta += ($dlen - $len); # everything following moves down by this much
  639     } 
  640 
  641     # At this point we've written some but maybe not all of the data.
  642     # There might be a gap to close up, or $data might still contain a
  643     # bunch of unwritten data that didn't fit.
  644     my $ndlen = length $data;
  645     if ($delta == 0) {
  646       $self->_write_record($data);
  647     } elsif ($delta < 0) {
  648       # upcopy (close up gap)
  649       if (@_) {
  650         $self->_upcopy($end, $end + $delta, $_[1] - $end);  
  651       } else {
  652         $self->_upcopy($end, $end + $delta);  
  653       }
  654     } else {
  655       # downcopy (insert data that didn't fit; replace this data in memory
  656       # with _later_ data that doesn't fit)
  657       if (@_) {
  658         $unwritten = $self->_downcopy($data, $end, $_[1] - $end);
  659       } else {
  660         # Make the file longer to accommodate the last segment that doesn't
  661         $unwritten = $self->_downcopy($data, $end);
  662       }
  663     }
  664   }
  665 }
  666 
  667 # Copy block of data of length $len from position $spos to position $dpos
  668 # $dpos must be <= $spos
  669 #
  670 # If $len is undefined, go all the way to the end of the file
  671 # and then truncate it ($spos - $dpos bytes will be removed)
  672 sub _upcopy {
  673   my $blocksize = 8192;
  674   my ($self, $spos, $dpos, $len) = @_;
  675   if ($dpos > $spos) {
  676     die "source ($spos) was upstream of destination ($dpos) in _upcopy";
  677   } elsif ($dpos == $spos) {
  678     return;
  679   }
  680 
  681   while (! defined ($len) || $len > 0) {
  682     my $readsize = ! defined($len) ? $blocksize
  683                : $len > $blocksize ? $blocksize
  684                : $len;
  685       
  686     my $fh = $self->{fh};
  687     $self->_seekb($spos);
  688     my $bytes_read = read $fh, my($data), $readsize;
  689     $self->_seekb($dpos);
  690     if ($data eq "") { 
  691       $self->_chop_file;
  692       last;
  693     }
  694     $self->_write_record($data);
  695     $spos += $bytes_read;
  696     $dpos += $bytes_read;
  697     $len -= $bytes_read if defined $len;
  698   }
  699 }
  700 
  701 # Write $data into a block of length $len at position $pos,
  702 # moving everything in the block forwards to make room.
  703 # Instead of writing the last length($data) bytes from the block
  704 # (because there isn't room for them any longer) return them.
  705 #
  706 # Undefined $len means 'until the end of the file'
  707 sub _downcopy {
  708   my $blocksize = 8192;
  709   my ($self, $data, $pos, $len) = @_;
  710   my $fh = $self->{fh};
  711 
  712   while (! defined $len || $len > 0) {
  713     my $readsize = ! defined($len) ? $blocksize 
  714       : $len > $blocksize? $blocksize : $len;
  715     $self->_seekb($pos);
  716     read $fh, my($old), $readsize;
  717     my $last_read_was_short = length($old) < $readsize;
  718     $data .= $old;
  719     my $writable;
  720     if ($last_read_was_short) {
  721       # If last read was short, then $data now contains the entire rest
  722       # of the file, so there's no need to write only one block of it
  723       $writable = $data;
  724       $data = "";
  725     } else {
  726       $writable = substr($data, 0, $readsize, "");
  727     }
  728     last if $writable eq "";
  729     $self->_seekb($pos);
  730     $self->_write_record($writable);
  731     last if $last_read_was_short && $data eq "";
  732     $len -= $readsize if defined $len;
  733     $pos += $readsize;
  734   }
  735   return $data;
  736 }
  737 
  738 # Adjust the object data structures following an '_mtwrite'
  739 # Arguments are
  740 #  [$pos, $nrecs, @length]  items
  741 # indicating that $nrecs records were removed at $recpos (a record offset)
  742 # and replaced with records of length @length...
  743 # Arguments guarantee that $recpos is strictly increasing.
  744 # No return value
  745 sub _oadjust {
  746   my $self = shift;
  747   my $delta = 0;
  748   my $delta_recs = 0;
  749   my $prev_end = -1;
  750   my %newkeys;
  751 
  752   for (@_) {
  753     my ($pos, $nrecs, @data) = @$_;
  754     $pos += $delta_recs;
  755 
  756     # Adjust the offsets of the records after the previous batch up
  757     # to the first new one of this batch
  758     for my $i ($prev_end+2 .. $pos - 1) {
  759       $self->{offsets}[$i] += $delta;
  760       $newkey{$i} = $i + $delta_recs;
  761     }
  762 
  763     $prev_end = $pos + @data - 1; # last record moved on this pass 
  764 
  765     # Remove the offsets for the removed records;
  766     # replace with the offsets for the inserted records
  767     my @newoff = ($self->{offsets}[$pos] + $delta);
  768     for my $i (0 .. $#data) {
  769       my $newlen = length $data[$i];
  770       push @newoff, $newoff[$i] + $newlen;
  771       $delta += $newlen;
  772     }
  773 
  774     for my $i ($pos .. $pos+$nrecs-1) {
  775       last if $i+1 > $#{$self->{offsets}};
  776       my $oldlen = $self->{offsets}[$i+1] - $self->{offsets}[$i];
  777       $delta -= $oldlen;
  778     }
  779 
  780 #    # also this data has changed, so update it in the cache
  781 #    for (0 .. $#data) {
  782 #      $self->{cache}->update($pos + $_, $data[$_]);
  783 #    }
  784 #    if ($delta_recs) {
  785 #      my @oldkeys = grep $_ >= $pos + @data, $self->{cache}->ckeys;
  786 #      my @newkeys = map $_ + $delta_recs, @oldkeys;
  787 #      $self->{cache}->rekey(\@oldkeys, \@newkeys);
  788 #    }
  789 
  790     # replace old offsets with new
  791     splice @{$self->{offsets}}, $pos, $nrecs+1, @newoff;
  792     # What if we just spliced out the end of the offsets table?
  793     # shouldn't we clear $self->{eof}?   Test for this XXX BUG TODO
  794 
  795     $delta_recs += @data - $nrecs; # net change in total number of records
  796   }
  797 
  798   # The trailing records at the very end of the file
  799   if ($delta) {
  800     for my $i ($prev_end+2 .. $#{$self->{offsets}}) {
  801       $self->{offsets}[$i] += $delta;
  802     }
  803   }
  804 
  805   # If we scrubbed out all known offsets, regenerate the trivial table
  806   # that knows that the file does indeed start at 0.
  807   $self->{offsets}[0] = 0 unless @{$self->{offsets}};
  808   # If the file got longer, the offsets table is no longer complete
  809   # $self->{eof} = 0 if $delta_recs > 0;
  810 
  811   # Now there might be too much data in the cache, if we spliced out
  812   # some short records and spliced in some long ones.  If so, flush
  813   # the cache.
  814   $self->_cache_flush;
  815 }
  816 
  817 # If a record does not already end with the appropriate terminator
  818 # string, append one.
  819 sub _fixrecs {
  820   my $self = shift;
  821   for (@_) {
  822     $_ = "" unless defined $_;
  823     $_ .= $self->{recsep}
  824       unless substr($_, - $self->{recseplen}) eq $self->{recsep};
  825   }
  826 }
  827 
  828 
  829 ################################################################
  830 #
  831 # Basic read, write, and seek
  832 #
  833 
  834 # seek to the beginning of record #$n
  835 # Assumes that the offsets table is already correctly populated
  836 #
  837 # Note that $n=-1 has a special meaning here: It means the start of
  838 # the last known record; this may or may not be the very last record
  839 # in the file, depending on whether the offsets table is fully populated.
  840 #
  841 sub _seek {
  842   my ($self, $n) = @_;
  843   my $o = $self->{offsets}[$n];
  844   defined($o)
  845     or confess("logic error: undefined offset for record $n");
  846   seek $self->{fh}, $o, SEEK_SET
  847     or confess "Couldn't seek filehandle: $!";  # "Should never happen."
  848 }
  849 
  850 # seek to byte $b in the file
  851 sub _seekb {
  852   my ($self, $b) = @_;
  853   seek $self->{fh}, $b, SEEK_SET
  854     or die "Couldn't seek filehandle: $!";  # "Should never happen."
  855 }
  856 
  857 # populate the offsets table up to the beginning of record $n
  858 # return the offset of record $n
  859 sub _fill_offsets_to {
  860   my ($self, $n) = @_;
  861 
  862   return $self->{offsets}[$n] if $self->{eof};
  863 
  864   my $fh = $self->{fh};
  865   local *OFF = $self->{offsets};
  866   my $rec;
  867 
  868   until ($#OFF >= $n) {
  869     $self->_seek(-1);           # tricky -- see comment at _seek
  870     $rec = $self->_read_record;
  871     if (defined $rec) {
  872       push @OFF, int(tell $fh);  # Tels says that int() saves memory here
  873     } else {
  874       $self->{eof} = 1;
  875       return;                   # It turns out there is no such record
  876     }
  877   }
  878 
  879   # we have now read all the records up to record n-1,
  880   # so we can return the offset of record n
  881   $OFF[$n];
  882 }
  883 
  884 sub _fill_offsets {
  885   my ($self) = @_;
  886 
  887   my $fh = $self->{fh};
  888   local *OFF = $self->{offsets};
  889 
  890   $self->_seek(-1);           # tricky -- see comment at _seek
  891 
  892   # Tels says that inlining read_record() would make this loop
  893   # five times faster. 20030508
  894   while ( defined $self->_read_record()) {
  895     # int() saves us memory here
  896     push @OFF, int(tell $fh);
  897   }
  898 
  899   $self->{eof} = 1;
  900   $#OFF;
  901 }
  902 
  903 # assumes that $rec is already suitably terminated
  904 sub _write_record {
  905   my ($self, $rec) = @_;
  906   my $fh = $self->{fh};
  907   local $\ = "";
  908   print $fh $rec
  909     or die "Couldn't write record: $!";  # "Should never happen."
  910 #  $self->{_written} += length($rec);
  911 }
  912 
  913 sub _read_record {
  914   my $self = shift;
  915   my $rec;
  916   { local $/ = $self->{recsep};
  917     my $fh = $self->{fh};
  918     $rec = <$fh>;
  919   }
  920   return unless defined $rec;
  921   if (substr($rec, -$self->{recseplen}) ne $self->{recsep}) {
  922     # improperly terminated final record --- quietly fix it.
  923 #    my $ac = substr($rec, -$self->{recseplen});
  924 #    $ac =~ s/\n/\\n/g;
  925     $self->{sawlastrec} = 1;
  926     unless ($self->{rdonly}) {
  927       local $\ = "";
  928       my $fh = $self->{fh};
  929       print $fh $self->{recsep};
  930     }
  931     $rec .= $self->{recsep};
  932   }
  933 #  $self->{_read} += length($rec) if defined $rec;
  934   $rec;
  935 }
  936 
  937 sub _rw_stats {
  938   my $self = shift;
  939   @{$self}{'_read', '_written'};
  940 }
  941 
  942 ################################################################
  943 #
  944 # Read cache management
  945 
  946 sub _cache_flush {
  947   my ($self) = @_;
  948   $self->{cache}->reduce_size_to($self->{memory} - $self->{deferred_s});
  949 }
  950 
  951 sub _cache_too_full {
  952   my $self = shift;
  953   $self->{cache}->bytes + $self->{deferred_s} >= $self->{memory};
  954 }
  955 
  956 ################################################################
  957 #
  958 # File custodial services
  959 #
  960 
  961 
  962 # We have read to the end of the file and have the offsets table
  963 # entirely populated.  Now we need to write a new record beyond
  964 # the end of the file.  We prepare for this by writing
  965 # empty records into the file up to the position we want
  966 #
  967 # assumes that the offsets table already contains the offset of record $n,
  968 # if it exists, and extends to the end of the file if not.
  969 sub _extend_file_to {
  970   my ($self, $n) = @_;
  971   $self->_seek(-1);             # position after the end of the last record
  972   my $pos = $self->{offsets}[-1];
  973 
  974   # the offsets table has one entry more than the total number of records
  975   my $extras = $n - $#{$self->{offsets}};
  976 
  977   # Todo : just use $self->{recsep} x $extras here?
  978   while ($extras-- > 0) {
  979     $self->_write_record($self->{recsep});
  980     push @{$self->{offsets}}, int(tell $self->{fh});
  981   }
  982 }
  983 
  984 # Truncate the file at the current position
  985 sub _chop_file {
  986   my $self = shift;
  987   truncate $self->{fh}, tell($self->{fh});
  988 }
  989 
  990 
  991 # compute the size of a buffer suitable for moving
  992 # all the data in a file forward $n bytes
  993 # ($n may be negative)
  994 # The result should be at least $n.
  995 sub _bufsize {
  996   my $n = shift;
  997   return 8192 if $n <= 0;
  998   my $b = $n & ~8191;
  999   $b += 8192 if $n & 8191;
 1000   $b;
 1001 }
 1002 
 1003 ################################################################
 1004 #
 1005 # Miscellaneous public methods
 1006 #
 1007 
 1008 # Lock the file
 1009 sub flock {
 1010   my ($self, $op) = @_;
 1011   unless (@_ <= 3) {
 1012     my $pack = ref $self;
 1013     croak "Usage: $pack\->flock([OPERATION])";
 1014   }
 1015   my $fh = $self->{fh};
 1016   $op = LOCK_EX unless defined $op;
 1017   my $locked = flock $fh, $op;
 1018 
 1019   if ($locked && ($op & (LOCK_EX | LOCK_SH))) {
 1020     # If you're locking the file, then presumably it's because
 1021     # there might have been a write access by another process.
 1022     # In that case, the read cache contents and the offsets table
 1023     # might be invalid, so discard them.  20030508
 1024     $self->{offsets} = [0];
 1025     $self->{cache}->empty;
 1026   }
 1027 
 1028   $locked;
 1029 }
 1030 
 1031 # Get/set autochomp option
 1032 sub autochomp {
 1033   my $self = shift;
 1034   if (@_) {
 1035     my $old = $self->{autochomp};
 1036     $self->{autochomp} = shift;
 1037     $old;
 1038   } else {
 1039     $self->{autochomp};
 1040   }
 1041 }
 1042 
 1043 # Get offset table entries; returns offset of nth record
 1044 sub offset {
 1045   my ($self, $n) = @_;
 1046 
 1047   if ($#{$self->{offsets}} < $n) {
 1048     return if $self->{eof};     # request for record beyond the end of file
 1049     my $o = $self->_fill_offsets_to($n);
 1050     # If it's still undefined, there is no such record, so return 'undef'
 1051     return unless defined $o;
 1052    }
 1053 
 1054   $self->{offsets}[$n];
 1055 }
 1056 
 1057 sub discard_offsets {
 1058   my $self = shift;
 1059   $self->{offsets} = [0];
 1060 }
 1061 
 1062 ################################################################
 1063 #
 1064 # Matters related to deferred writing
 1065 #
 1066 
 1067 # Defer writes
 1068 sub defer {
 1069   my $self = shift;
 1070   $self->_stop_autodeferring;
 1071   @{$self->{ad_history}} = ();
 1072   $self->{defer} = 1;
 1073 }
 1074 
 1075 # Flush deferred writes
 1076 #
 1077 # This could be better optimized to write the file in one pass, instead
 1078 # of one pass per block of records.  But that will require modifications
 1079 # to _twrite, so I should have a good _twrite test suite first.
 1080 sub flush {
 1081   my $self = shift;
 1082 
 1083   $self->_flush;
 1084   $self->{defer} = 0;
 1085 }
 1086 
 1087 sub _old_flush {
 1088   my $self = shift;
 1089   my @writable = sort {$a<=>$b} (keys %{$self->{deferred}});
 1090 
 1091   while (@writable) {
 1092     # gather all consecutive records from the front of @writable
 1093     my $first_rec = shift @writable;
 1094     my $last_rec = $first_rec+1;
 1095     ++$last_rec, shift @writable while @writable && $last_rec == $writable[0];
 1096     --$last_rec;
 1097     $self->_fill_offsets_to($last_rec);
 1098     $self->_extend_file_to($last_rec);
 1099     $self->_splice($first_rec, $last_rec-$first_rec+1, 
 1100                    @{$self->{deferred}}{$first_rec .. $last_rec});
 1101   }
 1102 
 1103   $self->_discard;               # clear out defered-write-cache
 1104 }
 1105 
 1106 sub _flush {
 1107   my $self = shift;
 1108   my @writable = sort {$a<=>$b} (keys %{$self->{deferred}});
 1109   my @args;
 1110   my @adjust;
 1111 
 1112   while (@writable) {
 1113     # gather all consecutive records from the front of @writable
 1114     my $first_rec = shift @writable;
 1115     my $last_rec = $first_rec+1;
 1116     ++$last_rec, shift @writable while @writable && $last_rec == $writable[0];
 1117     --$last_rec;
 1118     my $end = $self->_fill_offsets_to($last_rec+1);
 1119     if (not defined $end) {
 1120       $self->_extend_file_to($last_rec);
 1121       $end = $self->{offsets}[$last_rec];
 1122     }
 1123     my ($start) = $self->{offsets}[$first_rec];
 1124     push @args,
 1125          join("", @{$self->{deferred}}{$first_rec .. $last_rec}), # data
 1126          $start,                                                  # position
 1127          $end-$start;                                             # length
 1128     push @adjust, [$first_rec, # starting at this position...
 1129                    $last_rec-$first_rec+1,  # this many records...
 1130                    # are replaced with these...
 1131                    @{$self->{deferred}}{$first_rec .. $last_rec},
 1132                   ];
 1133   }
 1134 
 1135   $self->_mtwrite(@args);  # write multiple record groups
 1136   $self->_discard;               # clear out defered-write-cache
 1137   $self->_oadjust(@adjust);
 1138 }
 1139 
 1140 # Discard deferred writes and disable future deferred writes
 1141 sub discard {
 1142   my $self = shift;
 1143   $self->_discard;
 1144   $self->{defer} = 0;
 1145 }
 1146 
 1147 # Discard deferred writes, but retain old deferred writing mode
 1148 sub _discard {
 1149   my $self = shift;
 1150   %{$self->{deferred}} = ();
 1151   $self->{deferred_s}  = 0;
 1152   $self->{deferred_max}  = -1;
 1153   $self->{cache}->set_limit($self->{memory});
 1154 }
 1155 
 1156 # Deferred writing is enabled, either explicitly ($self->{defer})
 1157 # or automatically ($self->{autodeferring})
 1158 sub _is_deferring {
 1159   my $self = shift;
 1160   $self->{defer} || $self->{autodeferring};
 1161 }
 1162 
 1163 # The largest record number of any deferred record
 1164 sub _defer_max {
 1165   my $self = shift;
 1166   return $self->{deferred_max} if defined $self->{deferred_max};
 1167   my $max = -1;
 1168   for my $key (keys %{$self->{deferred}}) {
 1169     $max = $key if $key > $max;
 1170   }
 1171   $self->{deferred_max} = $max;
 1172   $max;
 1173 }
 1174 
 1175 ################################################################
 1176 #
 1177 # Matters related to autodeferment
 1178 #
 1179 
 1180 # Get/set autodefer option
 1181 sub autodefer {
 1182   my $self = shift;
 1183   if (@_) {
 1184     my $old = $self->{autodefer};
 1185     $self->{autodefer} = shift;
 1186     if ($old) {
 1187       $self->_stop_autodeferring;
 1188       @{$self->{ad_history}} = ();
 1189     }
 1190     $old;
 1191   } else {
 1192     $self->{autodefer};
 1193   }
 1194 }
 1195 
 1196 # The user is trying to store record #$n Record that in the history,
 1197 # and then enable (or disable) autodeferment if that seems useful.
 1198 # Note that it's OK for $n to be a non-number, as long as the function
 1199 # is prepared to deal with that.  Nobody else looks at the ad_history.
 1200 #
 1201 # Now, what does the ad_history mean, and what is this function doing?
 1202 # Essentially, the idea is to enable autodeferring when we see that the
 1203 # user has made three consecutive STORE calls to three consecutive records.
 1204 # ("Three" is actually ->{autodefer_threshhold}.)
 1205 # A STORE call for record #$n inserts $n into the autodefer history,
 1206 # and if the history contains three consecutive records, we enable 
 1207 # autodeferment.  An ad_history of [X, Y] means that the most recent
 1208 # STOREs were for records X, X+1, ..., Y, in that order.  
 1209 #
 1210 # Inserting a nonconsecutive number erases the history and starts over.
 1211 #
 1212 # Performing a special operation like SPLICE erases the history.
 1213 #
 1214 # There's one special case: CLEAR means that CLEAR was just called.
 1215 # In this case, we prime the history with [-2, -1] so that if the next
 1216 # write is for record 0, autodeferring goes on immediately.  This is for
 1217 # the common special case of "@a = (...)".
 1218 #
 1219 sub _annotate_ad_history {
 1220   my ($self, $n) = @_;
 1221   return unless $self->{autodefer}; # feature is disabled
 1222   return if $self->{defer};     # already in explicit defer mode
 1223   return unless $self->{offsets}[-1] >= $self->{autodefer_filelen_threshhold};
 1224 
 1225   local *H = $self->{ad_history};
 1226   if ($n eq 'CLEAR') {
 1227     @H = (-2, -1);              # prime the history with fake records
 1228     $self->_stop_autodeferring;
 1229   } elsif ($n =~ /^\d+$/) {
 1230     if (@H == 0) {
 1231       @H =  ($n, $n);
 1232     } else {                    # @H == 2
 1233       if ($H[1] == $n-1) {      # another consecutive record
 1234         $H[1]++;
 1235         if ($H[1] - $H[0] + 1 >= $self->{autodefer_threshhold}) {
 1236           $self->{autodeferring} = 1;
 1237         }
 1238       } else {                  # nonconsecutive- erase and start over
 1239         @H = ($n, $n);
 1240         $self->_stop_autodeferring;
 1241       }
 1242     }
 1243   } else {                      # SPLICE or STORESIZE or some such
 1244     @H = ();
 1245     $self->_stop_autodeferring;
 1246   }
 1247 }
 1248 
 1249 # If autodeferring was enabled, cut it out and discard the history
 1250 sub _stop_autodeferring {
 1251   my $self = shift;
 1252   if ($self->{autodeferring}) {
 1253     $self->_flush;
 1254   }
 1255   $self->{autodeferring} = 0;
 1256 }
 1257 
 1258 ################################################################
 1259 
 1260 
 1261 # This is NOT a method.  It is here for two reasons:
 1262 #  1. To factor a fairly complicated block out of the constructor
 1263 #  2. To provide access for the test suite, which need to be sure
 1264 #     files are being written properly.
 1265 sub _default_recsep {
 1266   my $recsep = $/;
 1267   if ($^O eq 'MSWin32') {       # Dos too?
 1268     # Windows users expect files to be terminated with \r\n
 1269     # But $/ is set to \n instead
 1270     # Note that this also transforms \n\n into \r\n\r\n.
 1271     # That is a feature.
 1272     $recsep =~ s/\n/\r\n/g;
 1273   }
 1274   $recsep;
 1275 }
 1276 
 1277 # Utility function for _check_integrity
 1278 sub _ci_warn {
 1279   my $msg = shift;
 1280   $msg =~ s/\n/\\n/g;
 1281   $msg =~ s/\r/\\r/g;
 1282   print "# $msg\n";
 1283 }
 1284 
 1285 # Given a file, make sure the cache is consistent with the
 1286 # file contents and the internal data structures are consistent with
 1287 # each other.  Returns true if everything checks out, false if not
 1288 #
 1289 # The $file argument is no longer used.  It is retained for compatibility
 1290 # with the existing test suite.
 1291 sub _check_integrity {
 1292   my ($self, $file, $warn) = @_;
 1293   my $rsl = $self->{recseplen};
 1294   my $rs  = $self->{recsep};
 1295   my $good = 1; 
 1296   local *_;                     # local $_ does not work here
 1297   local $DIAGNOSTIC = 1;
 1298 
 1299   if (not defined $rs) {
 1300     _ci_warn("recsep is undef!");
 1301     $good = 0;
 1302   } elsif ($rs eq "") {
 1303     _ci_warn("recsep is empty!");
 1304     $good = 0;
 1305   } elsif ($rsl != length $rs) {
 1306     my $ln = length $rs;
 1307     _ci_warn("recsep <$rs> has length $ln, should be $rsl");
 1308     $good = 0;
 1309   }
 1310 
 1311   if (not defined $self->{offsets}[0]) {
 1312     _ci_warn("offset 0 is missing!");
 1313     $good = 0;
 1314 
 1315   } elsif ($self->{offsets}[0] != 0) {
 1316     _ci_warn("rec 0: offset <$self->{offsets}[0]> s/b 0!");
 1317     $good = 0;
 1318   }
 1319 
 1320   my $cached = 0;
 1321   {
 1322     local *F = $self->{fh};
 1323     seek F, 0, SEEK_SET;
 1324     local $. = 0;
 1325     local $/ = $rs;
 1326 
 1327     while (<F>) {
 1328       my $n = $. - 1;
 1329       my $cached = $self->{cache}->_produce($n);
 1330       my $offset = $self->{offsets}[$.];
 1331       my $ao = tell F;
 1332       if (defined $offset && $offset != $ao) {
 1333         _ci_warn("rec $n: offset <$offset> actual <$ao>");
 1334         $good = 0;
 1335       }
 1336       if (defined $cached && $_ ne $cached && ! $self->{deferred}{$n}) {
 1337         $good = 0;
 1338         _ci_warn("rec $n: cached <$cached> actual <$_>");
 1339       }
 1340       if (defined $cached && substr($cached, -$rsl) ne $rs) {
 1341         $good = 0;
 1342         _ci_warn("rec $n in the cache is missing the record separator");
 1343       }
 1344       if (! defined $offset && $self->{eof}) {
 1345         $good = 0;
 1346         _ci_warn("The offset table was marked complete, but it is missing " .
 1347                  "element $.");
 1348       }
 1349     }
 1350     if (@{$self->{offsets}} > $.+1) {
 1351         $good = 0;
 1352         my $n = @{$self->{offsets}};
 1353         _ci_warn("The offset table has $n items, but the file has only $.");
 1354     }
 1355 
 1356     my $deferring = $self->_is_deferring;
 1357     for my $n ($self->{cache}->ckeys) {
 1358       my $r = $self->{cache}->_produce($n);
 1359       $cached += length($r);
 1360       next if $n+1 <= $.;         # checked this already
 1361       _ci_warn("spurious caching of record $n");
 1362       $good = 0;
 1363     }
 1364     my $b = $self->{cache}->bytes;
 1365     if ($cached != $b) {
 1366       _ci_warn("cache size is $b, should be $cached");
 1367       $good = 0;
 1368     }
 1369   }
 1370 
 1371   # That cache has its own set of tests
 1372   $good = 0 unless $self->{cache}->_check_integrity;
 1373 
 1374   # Now let's check the deferbuffer
 1375   # Unless deferred writing is enabled, it should be empty
 1376   if (! $self->_is_deferring && %{$self->{deferred}}) {
 1377     _ci_warn("deferred writing disabled, but deferbuffer nonempty");
 1378     $good = 0;
 1379   }
 1380 
 1381   # Any record in the deferbuffer should *not* be present in the readcache
 1382   my $deferred_s = 0;
 1383   while (my ($n, $r) = each %{$self->{deferred}}) {
 1384     $deferred_s += length($r);
 1385     if (defined $self->{cache}->_produce($n)) {
 1386       _ci_warn("record $n is in the deferbuffer *and* the readcache");
 1387       $good = 0;
 1388     }
 1389     if (substr($r, -$rsl) ne $rs) {
 1390       _ci_warn("rec $n in the deferbuffer is missing the record separator");
 1391       $good = 0;
 1392     }
 1393   }
 1394 
 1395   # Total size of deferbuffer should match internal total
 1396   if ($deferred_s != $self->{deferred_s}) {
 1397     _ci_warn("buffer size is $self->{deferred_s}, should be $deferred_s");
 1398     $good = 0;
 1399   }
 1400 
 1401   # Total size of deferbuffer should not exceed the specified limit
 1402   if ($deferred_s > $self->{dw_size}) {
 1403     _ci_warn("buffer size is $self->{deferred_s} which exceeds the limit " .
 1404              "of $self->{dw_size}");
 1405     $good = 0;
 1406   }
 1407 
 1408   # Total size of cached data should not exceed the specified limit
 1409   if ($deferred_s + $cached > $self->{memory}) {
 1410     my $total = $deferred_s + $cached;
 1411     _ci_warn("total stored data size is $total which exceeds the limit " .
 1412              "of $self->{memory}");
 1413     $good = 0;
 1414   }
 1415 
 1416   # Stuff related to autodeferment
 1417   if (!$self->{autodefer} && @{$self->{ad_history}}) {
 1418     _ci_warn("autodefer is disabled, but ad_history is nonempty");
 1419     $good = 0;
 1420   }
 1421   if ($self->{autodeferring} && $self->{defer}) {
 1422     _ci_warn("both autodeferring and explicit deferring are active");
 1423     $good = 0;
 1424   }
 1425   if (@{$self->{ad_history}} == 0) {
 1426     # That's OK, no additional tests required
 1427   } elsif (@{$self->{ad_history}} == 2) {
 1428     my @non_number = grep !/^-?\d+$/, @{$self->{ad_history}};
 1429     if (@non_number) {
 1430       my $msg;
 1431       { local $" = ')(';
 1432         $msg = "ad_history contains non-numbers (@{$self->{ad_history}})";
 1433       }
 1434       _ci_warn($msg);
 1435       $good = 0;
 1436     } elsif ($self->{ad_history}[1] < $self->{ad_history}[0]) {
 1437       _ci_warn("ad_history has nonsensical values @{$self->{ad_history}}");
 1438       $good = 0;
 1439     }
 1440   } else {
 1441     _ci_warn("ad_history has bad length <@{$self->{ad_history}}>");
 1442     $good = 0;
 1443   }
 1444 
 1445   $good;
 1446 }
 1447 
 1448 ################################################################
 1449 #
 1450 # Tie::File::Cache
 1451 #
 1452 # Read cache
 1453 
 1454 package Tie::File::Cache;
 1455 $Tie::File::Cache::VERSION = $Tie::File::VERSION;
 1456 use Carp ':DEFAULT', 'confess';
 1457 
 1458 sub HEAP () { 0 }
 1459 sub HASH () { 1 }
 1460 sub MAX  () { 2 }
 1461 sub BYTES() { 3 }
 1462 #sub STAT () { 4 } # Array with request statistics for each record
 1463 #sub MISS () { 5 } # Total number of cache misses
 1464 #sub REQ  () { 6 } # Total number of cache requests 
 1465 use strict 'vars';
 1466 
 1467 sub new {
 1468   my ($pack, $max) = @_;
 1469   local *_;
 1470   croak "missing argument to ->new" unless defined $max;
 1471   my $self = [];
 1472   bless $self => $pack;
 1473   @$self = (Tie::File::Heap->new($self), {}, $max, 0);
 1474   $self;
 1475 }
 1476 
 1477 sub adj_limit {
 1478   my ($self, $n) = @_;
 1479   $self->[MAX] += $n;
 1480 }
 1481 
 1482 sub set_limit {
 1483   my ($self, $n) = @_;
 1484   $self->[MAX] = $n;
 1485 }
 1486 
 1487 # For internal use only
 1488 # Will be called by the heap structure to notify us that a certain 
 1489 # piece of data has moved from one heap element to another.
 1490 # $k is the hash key of the item
 1491 # $n is the new index into the heap at which it is stored
 1492 # If $n is undefined, the item has been removed from the heap.
 1493 sub _heap_move {
 1494   my ($self, $k, $n) = @_;
 1495   if (defined $n) {
 1496     $self->[HASH]{$k} = $n;
 1497   } else {
 1498     delete $self->[HASH]{$k};
 1499   }
 1500 }
 1501 
 1502 sub insert {
 1503   my ($self, $key, $val) = @_;
 1504   local *_;
 1505   croak "missing argument to ->insert" unless defined $key;
 1506   unless (defined $self->[MAX]) {
 1507     confess "undefined max" ;
 1508   }
 1509   confess "undefined val" unless defined $val;
 1510   return if length($val) > $self->[MAX];
 1511 
 1512 #  if ($self->[STAT]) {
 1513 #    $self->[STAT][$key] = 1;
 1514 #    return;
 1515 #  }
 1516 
 1517   my $oldnode = $self->[HASH]{$key};
 1518   if (defined $oldnode) {
 1519     my $oldval = $self->[HEAP]->set_val($oldnode, $val);
 1520     $self->[BYTES] -= length($oldval);
 1521   } else {
 1522     $self->[HEAP]->insert($key, $val);
 1523   }
 1524   $self->[BYTES] += length($val);
 1525   $self->flush if $self->[BYTES] > $self->[MAX];
 1526 }
 1527 
 1528 sub expire {
 1529   my $self = shift;
 1530   my $old_data = $self->[HEAP]->popheap;
 1531   return unless defined $old_data;
 1532   $self->[BYTES] -= length $old_data;
 1533   $old_data;
 1534 }
 1535 
 1536 sub remove {
 1537   my ($self, @keys) = @_;
 1538   my @result;
 1539 
 1540 #  if ($self->[STAT]) {
 1541 #    for my $key (@keys) {
 1542 #      $self->[STAT][$key] = 0;
 1543 #    }
 1544 #    return;
 1545 #  }
 1546 
 1547   for my $key (@keys) {
 1548     next unless exists $self->[HASH]{$key};
 1549     my $old_data = $self->[HEAP]->remove($self->[HASH]{$key});
 1550     $self->[BYTES] -= length $old_data;
 1551     push @result, $old_data;
 1552   }
 1553   @result;
 1554 }
 1555 
 1556 sub lookup {
 1557   my ($self, $key) = @_;
 1558   local *_;
 1559   croak "missing argument to ->lookup" unless defined $key;
 1560 
 1561 #  if ($self->[STAT]) {
 1562 #    $self->[MISS]++  if $self->[STAT][$key]++ == 0;
 1563 #    $self->[REQ]++;
 1564 #    my $hit_rate = 1 - $self->[MISS] / $self->[REQ];
 1565 #    # Do some testing to determine this threshhold
 1566 #    $#$self = STAT - 1 if $hit_rate > 0.20; 
 1567 #  }
 1568 
 1569   if (exists $self->[HASH]{$key}) {
 1570     $self->[HEAP]->lookup($self->[HASH]{$key});
 1571   } else {
 1572     return;
 1573   }
 1574 }
 1575 
 1576 # For internal use only
 1577 sub _produce {
 1578   my ($self, $key) = @_;
 1579   my $loc = $self->[HASH]{$key};
 1580   return unless defined $loc;
 1581   $self->[HEAP][$loc][2];
 1582 }
 1583 
 1584 # For internal use only
 1585 sub _promote {
 1586   my ($self, $key) = @_;
 1587   $self->[HEAP]->promote($self->[HASH]{$key});
 1588 }
 1589 
 1590 sub empty {
 1591   my ($self) = @_;
 1592   %{$self->[HASH]} = ();
 1593     $self->[BYTES] = 0;
 1594     $self->[HEAP]->empty;
 1595 #  @{$self->[STAT]} = ();
 1596 #    $self->[MISS] = 0;
 1597 #    $self->[REQ] = 0;
 1598 }
 1599 
 1600 sub is_empty {
 1601   my ($self) = @_;
 1602   keys %{$self->[HASH]} == 0;
 1603 }
 1604 
 1605 sub update {
 1606   my ($self, $key, $val) = @_;
 1607   local *_;
 1608   croak "missing argument to ->update" unless defined $key;
 1609   if (length($val) > $self->[MAX]) {
 1610     my ($oldval) = $self->remove($key);
 1611     $self->[BYTES] -= length($oldval) if defined $oldval;
 1612   } elsif (exists $self->[HASH]{$key}) {
 1613     my $oldval = $self->[HEAP]->set_val($self->[HASH]{$key}, $val);
 1614     $self->[BYTES] += length($val);
 1615     $self->[BYTES] -= length($oldval) if defined $oldval;
 1616   } else {
 1617     $self->[HEAP]->insert($key, $val);
 1618     $self->[BYTES] += length($val);
 1619   }
 1620   $self->flush;
 1621 }
 1622 
 1623 sub rekey {
 1624   my ($self, $okeys, $nkeys) = @_;
 1625   local *_;
 1626   my %map;
 1627   @map{@$okeys} = @$nkeys;
 1628   croak "missing argument to ->rekey" unless defined $nkeys;
 1629   croak "length mismatch in ->rekey arguments" unless @$nkeys == @$okeys;
 1630   my %adjusted;                 # map new keys to heap indices
 1631   # You should be able to cut this to one loop TODO XXX
 1632   for (0 .. $#$okeys) {
 1633     $adjusted{$nkeys->[$_]} = delete $self->[HASH]{$okeys->[$_]};
 1634   }
 1635   while (my ($nk, $ix) = each %adjusted) {
 1636     # @{$self->[HASH]}{keys %adjusted} = values %adjusted;
 1637     $self->[HEAP]->rekey($ix, $nk);
 1638     $self->[HASH]{$nk} = $ix;
 1639   }
 1640 }
 1641 
 1642 sub ckeys {
 1643   my $self = shift;
 1644   my @a = keys %{$self->[HASH]};
 1645   @a;
 1646 }
 1647 
 1648 # Return total amount of cached data
 1649 sub bytes {
 1650   my $self = shift;
 1651   $self->[BYTES];
 1652 }
 1653 
 1654 # Expire oldest item from cache until cache size is smaller than $max
 1655 sub reduce_size_to {
 1656   my ($self, $max) = @_;
 1657   until ($self->[BYTES] <= $max) {
 1658     # Note that Tie::File::Cache::expire has been inlined here
 1659     my $old_data = $self->[HEAP]->popheap;
 1660     return unless defined $old_data;
 1661     $self->[BYTES] -= length $old_data;
 1662   }
 1663 }
 1664 
 1665 # Why not just $self->reduce_size_to($self->[MAX])?
 1666 # Try this when things stabilize   TODO XXX
 1667 # If the cache is too full, expire the oldest records
 1668 sub flush {
 1669   my $self = shift;
 1670   $self->reduce_size_to($self->[MAX]) if $self->[BYTES] > $self->[MAX];
 1671 }
 1672 
 1673 # For internal use only
 1674 sub _produce_lru {
 1675   my $self = shift;
 1676   $self->[HEAP]->expire_order;
 1677 }
 1678 
 1679 BEGIN { *_ci_warn = \&Tie::File::_ci_warn }
 1680 
 1681 sub _check_integrity {          # For CACHE
 1682   my $self = shift;
 1683   my $good = 1;
 1684 
 1685   # Test HEAP
 1686   $self->[HEAP]->_check_integrity or $good = 0;
 1687 
 1688   # Test HASH
 1689   my $bytes = 0;
 1690   for my $k (keys %{$self->[HASH]}) {
 1691     if ($k ne '0' && $k !~ /^[1-9][0-9]*$/) {
 1692       $good = 0;
 1693       _ci_warn "Cache hash key <$k> is non-numeric";
 1694     }
 1695 
 1696     my $h = $self->[HASH]{$k};
 1697     if (! defined $h) {
 1698       $good = 0;
 1699       _ci_warn "Heap index number for key $k is undefined";
 1700     } elsif ($h == 0) {
 1701       $good = 0;
 1702       _ci_warn "Heap index number for key $k is zero";
 1703     } else {
 1704       my $j = $self->[HEAP][$h];
 1705       if (! defined $j) {
 1706         $good = 0;
 1707         _ci_warn "Heap contents key $k (=> $h) are undefined";
 1708       } else {
 1709         $bytes += length($j->[2]);
 1710         if ($k ne $j->[1]) {
 1711           $good = 0;
 1712           _ci_warn "Heap contents key $k (=> $h) is $j->[1], should be $k";
 1713         }
 1714       }
 1715     }
 1716   }
 1717 
 1718   # Test BYTES
 1719   if ($bytes != $self->[BYTES]) {
 1720     $good = 0;
 1721     _ci_warn "Total data in cache is $bytes, expected $self->[BYTES]";
 1722   }
 1723 
 1724   # Test MAX
 1725   if ($bytes > $self->[MAX]) {
 1726     $good = 0;
 1727     _ci_warn "Total data in cache is $bytes, exceeds maximum $self->[MAX]";
 1728   }
 1729 
 1730   return $good;
 1731 }
 1732 
 1733 sub delink {
 1734   my $self = shift;
 1735   $self->[HEAP] = undef;        # Bye bye heap
 1736 }
 1737 
 1738 ################################################################
 1739 #
 1740 # Tie::File::Heap
 1741 #
 1742 # Heap data structure for use by cache LRU routines
 1743 
 1744 package Tie::File::Heap;
 1745 use Carp ':DEFAULT', 'confess';
 1746 $Tie::File::Heap::VERSION = $Tie::File::Cache::VERSION;
 1747 sub SEQ () { 0 };
 1748 sub KEY () { 1 };
 1749 sub DAT () { 2 };
 1750 
 1751 sub new {
 1752   my ($pack, $cache) = @_;
 1753   die "$pack: Parent cache object $cache does not support _heap_move method"
 1754     unless eval { $cache->can('_heap_move') };
 1755   my $self = [[0,$cache,0]];
 1756   bless $self => $pack;
 1757 }
 1758 
 1759 # Allocate a new sequence number, larger than all previously allocated numbers
 1760 sub _nseq {
 1761   my $self = shift;
 1762   $self->[0][0]++;
 1763 }
 1764 
 1765 sub _cache {
 1766   my $self = shift;
 1767   $self->[0][1];
 1768 }
 1769 
 1770 sub _nelts {
 1771   my $self = shift;
 1772   $self->[0][2];
 1773 }
 1774 
 1775 sub _nelts_inc {
 1776   my $self = shift;
 1777   ++$self->[0][2];
 1778 }  
 1779 
 1780 sub _nelts_dec {
 1781   my $self = shift;
 1782   --$self->[0][2];
 1783 }  
 1784 
 1785 sub is_empty {
 1786   my $self = shift;
 1787   $self->_nelts == 0;
 1788 }
 1789 
 1790 sub empty {
 1791   my $self = shift;
 1792   $#$self = 0;
 1793   $self->[0][2] = 0;
 1794   $self->[0][0] = 0;            # might as well reset the sequence numbers
 1795 }
 1796 
 1797 # notify the parent cache object that we moved something
 1798 sub _heap_move {
 1799   my $self = shift;
 1800   $self->_cache->_heap_move(@_);
 1801 }
 1802 
 1803 # Insert a piece of data into the heap with the indicated sequence number.
 1804 # The item with the smallest sequence number is always at the top.
 1805 # If no sequence number is specified, allocate a new one and insert the
 1806 # item at the bottom.
 1807 sub insert {
 1808   my ($self, $key, $data, $seq) = @_;
 1809   $seq = $self->_nseq unless defined $seq;
 1810   $self->_insert_new([$seq, $key, $data]);
 1811 }
 1812 
 1813 # Insert a new, fresh item at the bottom of the heap
 1814 sub _insert_new {
 1815   my ($self, $item) = @_;
 1816   my $i = @$self;
 1817   $i = int($i/2) until defined $self->[$i/2];
 1818   $self->[$i] = $item;
 1819   $self->[0][1]->_heap_move($self->[$i][KEY], $i);
 1820   $self->_nelts_inc;
 1821 }
 1822 
 1823 # Insert [$data, $seq] pair at or below item $i in the heap.
 1824 # If $i is omitted, default to 1 (the top element.)
 1825 sub _insert {
 1826   my ($self, $item, $i) = @_;
 1827 #  $self->_check_loc($i) if defined $i;
 1828   $i = 1 unless defined $i;
 1829   until (! defined $self->[$i]) {
 1830     if ($self->[$i][SEQ] > $item->[SEQ]) { # inserted item is older
 1831       ($self->[$i], $item) = ($item, $self->[$i]);
 1832       $self->[0][1]->_heap_move($self->[$i][KEY], $i);
 1833     }
 1834     # If either is undefined, go that way.  Otherwise, choose at random
 1835     my $dir;
 1836     $dir = 0 if !defined $self->[2*$i];
 1837     $dir = 1 if !defined $self->[2*$i+1];
 1838     $dir = int(rand(2)) unless defined $dir;
 1839     $i = 2*$i + $dir;
 1840   }
 1841   $self->[$i] = $item;
 1842   $self->[0][1]->_heap_move($self->[$i][KEY], $i);
 1843   $self->_nelts_inc;
 1844 }
 1845 
 1846 # Remove the item at node $i from the heap, moving child items upwards.
 1847 # The item with the smallest sequence number is always at the top.
 1848 # Moving items upwards maintains this condition.
 1849 # Return the removed item.  Return undef if there was no item at node $i.
 1850 sub remove {
 1851   my ($self, $i) = @_;
 1852   $i = 1 unless defined $i;
 1853   my $top = $self->[$i];
 1854   return unless defined $top;
 1855   while (1) {
 1856     my $ii;
 1857     my ($L, $R) = (2*$i, 2*$i+1);
 1858 
 1859     # If either is undefined, go the other way.
 1860     # Otherwise, go towards the smallest.
 1861     last unless defined $self->[$L] || defined $self->[$R];
 1862     $ii = $R if not defined $self->[$L];
 1863     $ii = $L if not defined $self->[$R];
 1864     unless (defined $ii) {
 1865       $ii = $self->[$L][SEQ] < $self->[$R][SEQ] ? $L : $R;
 1866     }
 1867 
 1868     $self->[$i] = $self->[$ii]; # Promote child to fill vacated spot
 1869     $self->[0][1]->_heap_move($self->[$i][KEY], $i);
 1870     $i = $ii; # Fill new vacated spot
 1871   }
 1872   $self->[0][1]->_heap_move($top->[KEY], undef);
 1873   undef $self->[$i];
 1874   $self->_nelts_dec;
 1875   return $top->[DAT];
 1876 }
 1877 
 1878 sub popheap {
 1879   my $self = shift;
 1880   $self->remove(1);
 1881 }
 1882 
 1883 # set the sequence number of the indicated item to a higher number
 1884 # than any other item in the heap, and bubble the item down to the
 1885 # bottom.
 1886 sub promote {
 1887   my ($self, $n) = @_;
 1888 #  $self->_check_loc($n);
 1889   $self->[$n][SEQ] = $self->_nseq;
 1890   my $i = $n;
 1891   while (1) {
 1892     my ($L, $R) = (2*$i, 2*$i+1);
 1893     my $dir;
 1894     last unless defined $self->[$L] || defined $self->[$R];
 1895     $dir = $R unless defined $self->[$L];
 1896     $dir = $L unless defined $self->[$R];
 1897     unless (defined $dir) {
 1898       $dir = $self->[$L][SEQ] < $self->[$R][SEQ] ? $L : $R;
 1899     }
 1900     @{$self}[$i, $dir] = @{$self}[$dir, $i];
 1901     for ($i, $dir) {
 1902       $self->[0][1]->_heap_move($self->[$_][KEY], $_) if defined $self->[$_];
 1903     }
 1904     $i = $dir;
 1905   }
 1906 }
 1907 
 1908 # Return item $n from the heap, promoting its LRU status
 1909 sub lookup {
 1910   my ($self, $n) = @_;
 1911 #  $self->_check_loc($n);
 1912   my $val = $self->[$n];
 1913   $self->promote($n);
 1914   $val->[DAT];
 1915 }
 1916 
 1917 
 1918 # Assign a new value for node $n, promoting it to the bottom of the heap
 1919 sub set_val {
 1920   my ($self, $n, $val) = @_;
 1921 #  $self->_check_loc($n);
 1922   my $oval = $self->[$n][DAT];
 1923   $self->[$n][DAT] = $val;
 1924   $self->promote($n);
 1925   return $oval;
 1926 }
 1927 
 1928 # The hash key has changed for an item;
 1929 # alter the heap's record of the hash key
 1930 sub rekey {
 1931   my ($self, $n, $new_key) = @_;
 1932 #  $self->_check_loc($n);
 1933   $self->[$n][KEY] = $new_key;
 1934 }
 1935 
 1936 sub _check_loc {
 1937   my ($self, $n) = @_;
 1938   unless (1 || defined $self->[$n]) {
 1939     confess "_check_loc($n) failed";
 1940   }
 1941 }
 1942 
 1943 BEGIN { *_ci_warn = \&Tie::File::_ci_warn }
 1944 
 1945 sub _check_integrity {
 1946   my $self = shift;
 1947   my $good = 1;
 1948   my %seq;
 1949 
 1950   unless (eval {$self->[0][1]->isa("Tie::File::Cache")}) {
 1951     _ci_warn "Element 0 of heap corrupt";
 1952     $good = 0;
 1953   }
 1954   $good = 0 unless $self->_satisfies_heap_condition(1);
 1955   for my $i (2 .. $#{$self}) {
 1956     my $p = int($i/2);          # index of parent node
 1957     if (defined $self->[$i] && ! defined $self->[$p]) {
 1958       _ci_warn "Element $i of heap defined, but parent $p isn't";
 1959       $good = 0;
 1960     }
 1961 
 1962     if (defined $self->[$i]) {
 1963       if ($seq{$self->[$i][SEQ]}) {
 1964         my $seq = $self->[$i][SEQ];
 1965         _ci_warn "Nodes $i and $seq{$seq} both have SEQ=$seq";
 1966         $good = 0;
 1967       } else {
 1968         $seq{$self->[$i][SEQ]} = $i;
 1969       }
 1970     }
 1971   }
 1972 
 1973   return $good;
 1974 }
 1975 
 1976 sub _satisfies_heap_condition {
 1977   my $self = shift;
 1978   my $n = shift || 1;
 1979   my $good = 1;
 1980   for (0, 1) {
 1981     my $c = $n*2 + $_;
 1982     next unless defined $self->[$c];
 1983     if ($self->[$n][SEQ] >= $self->[$c]) {
 1984       _ci_warn "Node $n of heap does not predate node $c";
 1985       $good = 0 ;
 1986     }
 1987     $good = 0 unless $self->_satisfies_heap_condition($c);
 1988   }
 1989   return $good;
 1990 }
 1991 
 1992 # Return a list of all the values, sorted by expiration order
 1993 sub expire_order {
 1994   my $self = shift;
 1995   my @nodes = sort {$a->[SEQ] <=> $b->[SEQ]} $self->_nodes;
 1996   map { $_->[KEY] } @nodes;
 1997 }
 1998 
 1999 sub _nodes {
 2000   my $self = shift;
 2001   my $i = shift || 1;
 2002   return unless defined $self->[$i];
 2003   ($self->[$i], $self->_nodes($i*2), $self->_nodes($i*2+1));
 2004 }
 2005 
 2006 "Cogito, ergo sum.";  # don't forget to return a true value from the file
 2007 
 2008 __END__
 2009 
 2010 =head1 NAME
 2011 
 2012 Tie::File - Access the lines of a disk file via a Perl array
 2013 
 2014 =head1 SYNOPSIS
 2015 
 2016  # This file documents Tie::File version 0.98
 2017  use Tie::File;
 2018 
 2019  tie @array, 'Tie::File', filename or die ...;
 2020 
 2021  $array[13] = 'blah';     # line 13 of the file is now 'blah'
 2022  print $array[42];        # display line 42 of the file
 2023 
 2024  $n_recs = @array;        # how many records are in the file?
 2025  $#array -= 2;            # chop two records off the end
 2026 
 2027 
 2028  for (@array) {
 2029    s/PERL/Perl/g;        # Replace PERL with Perl everywhere in the file
 2030  }
 2031 
 2032  # These are just like regular push, pop, unshift, shift, and splice
 2033  # Except that they modify the file in the way you would expect
 2034 
 2035  push @array, new recs...;
 2036  my $r1 = pop @array;
 2037  unshift @array, new recs...;
 2038  my $r2 = shift @array;
 2039  @old_recs = splice @array, 3, 7, new recs...;
 2040 
 2041  untie @array;            # all finished
 2042 
 2043 
 2044 =head1 DESCRIPTION
 2045 
 2046 C<Tie::File> represents a regular text file as a Perl array.  Each
 2047 element in the array corresponds to a record in the file.  The first
 2048 line of the file is element 0 of the array; the second line is element
 2049 1, and so on.
 2050 
 2051 The file is I<not> loaded into memory, so this will work even for
 2052 gigantic files.
 2053 
 2054 Changes to the array are reflected in the file immediately.
 2055 
 2056 Lazy people and beginners may now stop reading the manual.
 2057 
 2058 =head2 C<recsep>
 2059 
 2060 What is a 'record'?  By default, the meaning is the same as for the
 2061 C<E<lt>...E<gt>> operator: It's a string terminated by C<$/>, which is
 2062 probably C<"\n">.  (Minor exception: on DOS and Win32 systems, a
 2063 'record' is a string terminated by C<"\r\n">.)  You may change the
 2064 definition of "record" by supplying the C<recsep> option in the C<tie>
 2065 call:
 2066 
 2067     tie @array, 'Tie::File', $file, recsep => 'es';
 2068 
 2069 This says that records are delimited by the string C<es>.  If the file
 2070 contained the following data:
 2071 
 2072     Curse these pesky flies!\n
 2073 
 2074 then the C<@array> would appear to have four elements:
 2075 
 2076     "Curse th"
 2077     "e p"
 2078     "ky fli"
 2079     "!\n"
 2080 
 2081 An undefined value is not permitted as a record separator.  Perl's
 2082 special "paragraph mode" semantics (E<agrave> la C<$/ = "">) are not
 2083 emulated.
 2084 
 2085 Records read from the tied array do not have the record separator
 2086 string on the end; this is to allow
 2087 
 2088     $array[17] .= "extra";
 2089 
 2090 to work as expected.
 2091 
 2092 (See L<"autochomp">, below.)  Records stored into the array will have
 2093 the record separator string appended before they are written to the
 2094 file, if they don't have one already.  For example, if the record
 2095 separator string is C<"\n">, then the following two lines do exactly
 2096 the same thing:
 2097 
 2098     $array[17] = "Cherry pie";
 2099     $array[17] = "Cherry pie\n";
 2100 
 2101 The result is that the contents of line 17 of the file will be
 2102 replaced with "Cherry pie"; a newline character will separate line 17
 2103 from line 18.  This means that this code will do nothing:
 2104 
 2105     chomp $array[17];
 2106 
 2107 Because the C<chomp>ed value will have the separator reattached when
 2108 it is written back to the file.  There is no way to create a file
 2109 whose trailing record separator string is missing.
 2110 
 2111 Inserting records that I<contain> the record separator string is not
 2112 supported by this module.  It will probably produce a reasonable
 2113 result, but what this result will be may change in a future version.
 2114 Use 'splice' to insert records or to replace one record with several.
 2115 
 2116 =head2 C<autochomp>
 2117 
 2118 Normally, array elements have the record separator removed, so that if
 2119 the file contains the text
 2120 
 2121     Gold
 2122     Frankincense
 2123     Myrrh
 2124 
 2125 the tied array will appear to contain C<("Gold", "Frankincense",
 2126 "Myrrh")>.  If you set C<autochomp> to a false value, the record
 2127 separator will not be removed.  If the file above was tied with
 2128 
 2129     tie @gifts, "Tie::File", $gifts, autochomp => 0;
 2130 
 2131 then the array C<@gifts> would appear to contain C<("Gold\n",
 2132 "Frankincense\n", "Myrrh\n")>, or (on Win32 systems) C<("Gold\r\n",
 2133 "Frankincense\r\n", "Myrrh\r\n")>.
 2134 
 2135 =head2 C<mode>
 2136 
 2137 Normally, the specified file will be opened for read and write access,
 2138 and will be created if it does not exist.  (That is, the flags
 2139 C<O_RDWR | O_CREAT> are supplied in the C<open> call.)  If you want to
 2140 change this, you may supply alternative flags in the C<mode> option.
 2141 See L<Fcntl> for a listing of available flags.
 2142 For example:
 2143 
 2144     # open the file if it exists, but fail if it does not exist
 2145     use Fcntl 'O_RDWR';
 2146     tie @array, 'Tie::File', $file, mode => O_RDWR;
 2147 
 2148     # create the file if it does not exist
 2149     use Fcntl 'O_RDWR', 'O_CREAT';
 2150     tie @array, 'Tie::File', $file, mode => O_RDWR | O_CREAT;
 2151 
 2152     # open an existing file in read-only mode
 2153     use Fcntl 'O_RDONLY';
 2154     tie @array, 'Tie::File', $file, mode => O_RDONLY;
 2155 
 2156 Opening the data file in write-only or append mode is not supported.
 2157 
 2158 =head2 C<memory>
 2159 
 2160 This is an upper limit on the amount of memory that C<Tie::File> will
 2161 consume at any time while managing the file.  This is used for two
 2162 things: managing the I<read cache> and managing the I<deferred write
 2163 buffer>.
 2164 
 2165 Records read in from the file are cached, to avoid having to re-read
 2166 them repeatedly.  If you read the same record twice, the first time it
 2167 will be stored in memory, and the second time it will be fetched from
 2168 the I<read cache>.  The amount of data in the read cache will not
 2169 exceed the value you specified for C<memory>.  If C<Tie::File> wants
 2170 to cache a new record, but the read cache is full, it will make room
 2171 by expiring the least-recently visited records from the read cache.
 2172 
 2173 The default memory limit is 2Mib.  You can adjust the maximum read
 2174 cache size by supplying the C<memory> option.  The argument is the
 2175 desired cache size, in bytes.
 2176 
 2177  # I have a lot of memory, so use a large cache to speed up access
 2178  tie @array, 'Tie::File', $file, memory => 20_000_000;
 2179 
 2180 Setting the memory limit to 0 will inhibit caching; records will be
 2181 fetched from disk every time you examine them.
 2182 
 2183 The C<memory> value is not an absolute or exact limit on the memory
 2184 used.  C<Tie::File> objects contains some structures besides the read
 2185 cache and the deferred write buffer, whose sizes are not charged
 2186 against C<memory>. 
 2187 
 2188 The cache itself consumes about 310 bytes per cached record, so if
 2189 your file has many short records, you may want to decrease the cache
 2190 memory limit, or else the cache overhead may exceed the size of the
 2191 cached data.
 2192 
 2193 
 2194 =head2 C<dw_size>
 2195 
 2196 (This is an advanced feature.  Skip this section on first reading.)
 2197 
 2198 If you use deferred writing (See L<"Deferred Writing">, below) then
 2199 data you write into the array will not be written directly to the
 2200 file; instead, it will be saved in the I<deferred write buffer> to be
 2201 written out later.  Data in the deferred write buffer is also charged
 2202 against the memory limit you set with the C<memory> option.
 2203 
 2204 You may set the C<dw_size> option to limit the amount of data that can
 2205 be saved in the deferred write buffer.  This limit may not exceed the
 2206 total memory limit.  For example, if you set C<dw_size> to 1000 and
 2207 C<memory> to 2500, that means that no more than 1000 bytes of deferred
 2208 writes will be saved up.  The space available for the read cache will
 2209 vary, but it will always be at least 1500 bytes (if the deferred write
 2210 buffer is full) and it could grow as large as 2500 bytes (if the
 2211 deferred write buffer is empty.)
 2212 
 2213 If you don't specify a C<dw_size>, it defaults to the entire memory
 2214 limit.
 2215 
 2216 =head2 Option Format
 2217 
 2218 C<-mode> is a synonym for C<mode>.  C<-recsep> is a synonym for
 2219 C<recsep>.  C<-memory> is a synonym for C<memory>.  You get the
 2220 idea.
 2221 
 2222 =head1 Public Methods
 2223 
 2224 The C<tie> call returns an object, say C<$o>.  You may call
 2225 
 2226     $rec = $o->FETCH($n);
 2227     $o->STORE($n, $rec);
 2228 
 2229 to fetch or store the record at line C<$n>, respectively; similarly
 2230 the other tied array methods.  (See L<perltie> for details.)  You may
 2231 also call the following methods on this object:
 2232 
 2233 =head2 C<flock>
 2234 
 2235     $o->flock(MODE)
 2236 
 2237 will lock the tied file.  C<MODE> has the same meaning as the second
 2238 argument to the Perl built-in C<flock> function; for example
 2239 C<LOCK_SH> or C<LOCK_EX | LOCK_NB>.  (These constants are provided by
 2240 the C<use Fcntl ':flock'> declaration.)
 2241 
 2242 C<MODE> is optional; the default is C<LOCK_EX>.
 2243 
 2244 C<Tie::File> maintains an internal table of the byte offset of each
 2245 record it has seen in the file.  
 2246 
 2247 When you use C<flock> to lock the file, C<Tie::File> assumes that the
 2248 read cache is no longer trustworthy, because another process might
 2249 have modified the file since the last time it was read.  Therefore, a
 2250 successful call to C<flock> discards the contents of the read cache
 2251 and the internal record offset table.
 2252 
 2253 C<Tie::File> promises that the following sequence of operations will
 2254 be safe:
 2255 
 2256     my $o = tie @array, "Tie::File", $filename;
 2257     $o->flock;
 2258 
 2259 In particular, C<Tie::File> will I<not> read or write the file during
 2260 the C<tie> call.  (Exception: Using C<mode =E<gt> O_TRUNC> will, of
 2261 course, erase the file during the C<tie> call.  If you want to do this
 2262 safely, then open the file without C<O_TRUNC>, lock the file, and use
 2263 C<@array = ()>.)
 2264 
 2265 The best way to unlock a file is to discard the object and untie the
 2266 array.  It is probably unsafe to unlock the file without also untying
 2267 it, because if you do, changes may remain unwritten inside the object.
 2268 That is why there is no shortcut for unlocking.  If you really want to
 2269 unlock the file prematurely, you know what to do; if you don't know
 2270 what to do, then don't do it.
 2271 
 2272 All the usual warnings about file locking apply here.  In particular,
 2273 note that file locking in Perl is B<advisory>, which means that
 2274 holding a lock will not prevent anyone else from reading, writing, or
 2275 erasing the file; it only prevents them from getting another lock at
 2276 the same time.  Locks are analogous to green traffic lights: If you
 2277 have a green light, that does not prevent the idiot coming the other
 2278 way from plowing into you sideways; it merely guarantees to you that
 2279 the idiot does not also have a green light at the same time.
 2280 
 2281 =head2 C<autochomp>
 2282 
 2283     my $old_value = $o->autochomp(0);    # disable autochomp option
 2284     my $old_value = $o->autochomp(1);    #  enable autochomp option
 2285 
 2286     my $ac = $o->autochomp();   # recover current value
 2287 
 2288 See L<"autochomp">, above.
 2289 
 2290 =head2 C<defer>, C<flush>, C<discard>, and C<autodefer>
 2291 
 2292 See L<"Deferred Writing">, below.
 2293 
 2294 =head2 C<offset>
 2295 
 2296     $off = $o->offset($n);
 2297 
 2298 This method returns the byte offset of the start of the C<$n>th record
 2299 in the file.  If there is no such record, it returns an undefined
 2300 value.
 2301 
 2302 =head1 Tying to an already-opened filehandle
 2303 
 2304 If C<$fh> is a filehandle, such as is returned by C<IO::File> or one
 2305 of the other C<IO> modules, you may use:
 2306 
 2307     tie @array, 'Tie::File', $fh, ...;
 2308 
 2309 Similarly if you opened that handle C<FH> with regular C<open> or
 2310 C<sysopen>, you may use:
 2311 
 2312     tie @array, 'Tie::File', \*FH, ...;
 2313 
 2314 Handles that were opened write-only won't work.  Handles that were
 2315 opened read-only will work as long as you don't try to modify the
 2316 array.  Handles must be attached to seekable sources of data---that
 2317 means no pipes or sockets.  If C<Tie::File> can detect that you
 2318 supplied a non-seekable handle, the C<tie> call will throw an
 2319 exception.  (On Unix systems, it can detect this.)
 2320 
 2321 Note that Tie::File will only close any filehandles that it opened
 2322 internally.  If you passed it a filehandle as above, you "own" the
 2323 filehandle, and are responsible for closing it after you have untied
 2324 the @array.
 2325 
 2326 =head1 Deferred Writing
 2327 
 2328 (This is an advanced feature.  Skip this section on first reading.)
 2329 
 2330 Normally, modifying a C<Tie::File> array writes to the underlying file
 2331 immediately.  Every assignment like C<$a[3] = ...> rewrites as much of
 2332 the file as is necessary; typically, everything from line 3 through
 2333 the end will need to be rewritten.  This is the simplest and most
 2334 transparent behavior.  Performance even for large files is reasonably
 2335 good.
 2336 
 2337 However, under some circumstances, this behavior may be excessively
 2338 slow.  For example, suppose you have a million-record file, and you
 2339 want to do:
 2340 
 2341     for (@FILE) {
 2342       $_ = "> $_";
 2343     }
 2344 
 2345 The first time through the loop, you will rewrite the entire file,
 2346 from line 0 through the end.  The second time through the loop, you
 2347 will rewrite the entire file from line 1 through the end.  The third
 2348 time through the loop, you will rewrite the entire file from line 2 to
 2349 the end.  And so on.
 2350 
 2351 If the performance in such cases is unacceptable, you may defer the
 2352 actual writing, and then have it done all at once.  The following loop
 2353 will perform much better for large files:
 2354 
 2355     (tied @a)->defer;
 2356     for (@a) {
 2357       $_ = "> $_";
 2358     }
 2359     (tied @a)->flush;
 2360 
 2361 If C<Tie::File>'s memory limit is large enough, all the writing will
 2362 done in memory.  Then, when you call C<-E<gt>flush>, the entire file
 2363 will be rewritten in a single pass.
 2364 
 2365 (Actually, the preceding discussion is something of a fib.  You don't
 2366 need to enable deferred writing to get good performance for this
 2367 common case, because C<Tie::File> will do it for you automatically
 2368 unless you specifically tell it not to.  See L<"Autodeferring">,
 2369 below.)
 2370 
 2371 Calling C<-E<gt>flush> returns the array to immediate-write mode.  If
 2372 you wish to discard the deferred writes, you may call C<-E<gt>discard>
 2373 instead of C<-E<gt>flush>.  Note that in some cases, some of the data
 2374 will have been written already, and it will be too late for
 2375 C<-E<gt>discard> to discard all the changes.  Support for
 2376 C<-E<gt>discard> may be withdrawn in a future version of C<Tie::File>.
 2377 
 2378 Deferred writes are cached in memory up to the limit specified by the
 2379 C<dw_size> option (see above).  If the deferred-write buffer is full
 2380 and you try to write still more deferred data, the buffer will be
 2381 flushed.  All buffered data will be written immediately, the buffer
 2382 will be emptied, and the now-empty space will be used for future
 2383 deferred writes.
 2384 
 2385 If the deferred-write buffer isn't yet full, but the total size of the
 2386 buffer and the read cache would exceed the C<memory> limit, the oldest
 2387 records will be expired from the read cache until the total size is
 2388 under the limit.
 2389 
 2390 C<push>, C<pop>, C<shift>, C<unshift>, and C<splice> cannot be
 2391 deferred.  When you perform one of these operations, any deferred data
 2392 is written to the file and the operation is performed immediately.
 2393 This may change in a future version.
 2394 
 2395 If you resize the array with deferred writing enabled, the file will
 2396 be resized immediately, but deferred records will not be written.
 2397 This has a surprising consequence: C<@a = (...)> erases the file
 2398 immediately, but the writing of the actual data is deferred.  This
 2399 might be a bug.  If it is a bug, it will be fixed in a future version.
 2400 
 2401 =head2 Autodeferring
 2402 
 2403 C<Tie::File> tries to guess when deferred writing might be helpful,
 2404 and to turn it on and off automatically. 
 2405 
 2406     for (@a) {
 2407       $_ = "> $_";
 2408     }
 2409 
 2410 In this example, only the first two assignments will be done
 2411 immediately; after this, all the changes to the file will be deferred
 2412 up to the user-specified memory limit.
 2413 
 2414 You should usually be able to ignore this and just use the module
 2415 without thinking about deferring.  However, special applications may
 2416 require fine control over which writes are deferred, or may require
 2417 that all writes be immediate.  To disable the autodeferment feature,
 2418 use
 2419 
 2420     (tied @o)->autodefer(0);
 2421 
 2422 or
 2423 
 2424         tie @array, 'Tie::File', $file, autodefer => 0;
 2425 
 2426 
 2427 Similarly, C<-E<gt>autodefer(1)> re-enables autodeferment, and 
 2428 C<-E<gt>autodefer()> recovers the current value of the autodefer setting.
 2429 
 2430 
 2431 =head1 CONCURRENT ACCESS TO FILES
 2432 
 2433 Caching and deferred writing are inappropriate if you want the same
 2434 file to be accessed simultaneously from more than one process.  Other
 2435 optimizations performed internally by this module are also
 2436 incompatible with concurrent access.  A future version of this module will
 2437 support a C<concurrent =E<gt> 1> option that enables safe concurrent access.
 2438 
 2439 Previous versions of this documentation suggested using C<memory
 2440 =E<gt> 0> for safe concurrent access.  This was mistaken.  Tie::File
 2441 will not support safe concurrent access before version 0.96.
 2442 
 2443 =head1 CAVEATS
 2444 
 2445 (That's Latin for 'warnings'.)
 2446 
 2447 =over 4
 2448 
 2449 =item *
 2450 
 2451 Reasonable effort was made to make this module efficient.  Nevertheless,
 2452 changing the size of a record in the middle of a large file will
 2453 always be fairly slow, because everything after the new record must be
 2454 moved.
 2455 
 2456 =item *
 2457 
 2458 The behavior of tied arrays is not precisely the same as for regular
 2459 arrays.  For example:
 2460 
 2461     # This DOES print "How unusual!"
 2462     undef $a[10];  print "How unusual!\n" if defined $a[10];
 2463 
 2464 C<undef>-ing a C<Tie::File> array element just blanks out the
 2465 corresponding record in the file.  When you read it back again, you'll
 2466 get the empty string, so the supposedly-C<undef>'ed value will be
 2467 defined.  Similarly, if you have C<autochomp> disabled, then
 2468 
 2469     # This DOES print "How unusual!" if 'autochomp' is disabled
 2470     undef $a[10];
 2471         print "How unusual!\n" if $a[10];
 2472 
 2473 Because when C<autochomp> is disabled, C<$a[10]> will read back as
 2474 C<"\n"> (or whatever the record separator string is.)  
 2475 
 2476 There are other minor differences, particularly regarding C<exists>
 2477 and C<delete>, but in general, the correspondence is extremely close.
 2478 
 2479 =item *
 2480 
 2481 I have supposed that since this module is concerned with file I/O,
 2482 almost all normal use of it will be heavily I/O bound.  This means
 2483 that the time to maintain complicated data structures inside the
 2484 module will be dominated by the time to actually perform the I/O.
 2485 When there was an opportunity to spend CPU time to avoid doing I/O, I
 2486 usually tried to take it.
 2487 
 2488 =item *
 2489 
 2490 You might be tempted to think that deferred writing is like
 2491 transactions, with C<flush> as C<commit> and C<discard> as
 2492 C<rollback>, but it isn't, so don't.
 2493 
 2494 =item *
 2495 
 2496 There is a large memory overhead for each record offset and for each
 2497 cache entry: about 310 bytes per cached data record, and about 21 bytes
 2498 per offset table entry.
 2499 
 2500 The per-record overhead will limit the maximum number of records you
 2501 can access per file. Note that I<accessing> the length of the array
 2502 via C<$x = scalar @tied_file> accesses B<all> records and stores their
 2503 offsets.  The same for C<foreach (@tied_file)>, even if you exit the
 2504 loop early.
 2505 
 2506 =back
 2507 
 2508 =head1 SUBCLASSING
 2509 
 2510 This version promises absolutely nothing about the internals, which
 2511 may change without notice.  A future version of the module will have a
 2512 well-defined and stable subclassing API.
 2513 
 2514 =head1 WHAT ABOUT C<DB_File>?
 2515 
 2516 People sometimes point out that L<DB_File> will do something similar,
 2517 and ask why C<Tie::File> module is necessary.
 2518 
 2519 There are a number of reasons that you might prefer C<Tie::File>.
 2520 A list is available at C<http://perl.plover.com/TieFile/why-not-DB_File>.
 2521 
 2522 =head1 AUTHOR
 2523 
 2524 Mark Jason Dominus
 2525 
 2526 To contact the author, send email to: C<mjd-perl-tiefile+@plover.com>
 2527 
 2528 To receive an announcement whenever a new version of this module is
 2529 released, send a blank email message to
 2530 C<mjd-perl-tiefile-subscribe@plover.com>.
 2531 
 2532 The most recent version of this module, including documentation and
 2533 any news of importance, will be available at
 2534 
 2535     http://perl.plover.com/TieFile/
 2536 
 2537 
 2538 =head1 LICENSE
 2539 
 2540 C<Tie::File> version 0.96 is copyright (C) 2003 Mark Jason Dominus.
 2541 
 2542 This library is free software; you may redistribute it and/or modify
 2543 it under the same terms as Perl itself.
 2544 
 2545 These terms are your choice of any of (1) the Perl Artistic Licence,
 2546 or (2) version 2 of the GNU General Public License as published by the
 2547 Free Software Foundation, or (3) any later version of the GNU General
 2548 Public License.
 2549 
 2550 This library is distributed in the hope that it will be useful,
 2551 but WITHOUT ANY WARRANTY; without even the implied warranty of
 2552 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 2553 GNU General Public License for more details.
 2554 
 2555 You should have received a copy of the GNU General Public License
 2556 along with this library program; it should be in the file C<COPYING>.
 2557 If not, write to the Free Software Foundation, Inc., 51 Franklin Street,
 2558 Fifth Floor, Boston, MA  02110-1301, USA
 2559 
 2560 For licensing inquiries, contact the author at:
 2561 
 2562     Mark Jason Dominus
 2563     255 S. Warnock St.
 2564     Philadelphia, PA 19107
 2565 
 2566 =head1 WARRANTY
 2567 
 2568 C<Tie::File> version 0.98 comes with ABSOLUTELY NO WARRANTY.
 2569 For details, see the license.
 2570 
 2571 =head1 THANKS
 2572 
 2573 Gigantic thanks to Jarkko Hietaniemi, for agreeing to put this in the
 2574 core when I hadn't written it yet, and for generally being helpful,
 2575 supportive, and competent.  (Usually the rule is "choose any one.")
 2576 Also big thanks to Abhijit Menon-Sen for all of the same things.
 2577 
 2578 Special thanks to Craig Berry and Peter Prymmer (for VMS portability
 2579 help), Randy Kobes (for Win32 portability help), Clinton Pierce and
 2580 Autrijus Tang (for heroic eleventh-hour Win32 testing above and beyond
 2581 the call of duty), Michael G Schwern (for testing advice), and the
 2582 rest of the CPAN testers (for testing generally).
 2583 
 2584 Special thanks to Tels for suggesting several speed and memory
 2585 optimizations.
 2586 
 2587 Additional thanks to:
 2588 Edward Avis /
 2589 Mattia Barbon /
 2590 Tom Christiansen /
 2591 Gerrit Haase /
 2592 Gurusamy Sarathy /
 2593 Jarkko Hietaniemi (again) /
 2594 Nikola Knezevic /
 2595 John Kominetz /
 2596 Nick Ing-Simmons /
 2597 Tassilo von Parseval /
 2598 H. Dieter Pearcey /
 2599 Slaven Rezic /
 2600 Eric Roode /
 2601 Peter Scott /
 2602 Peter Somu /
 2603 Autrijus Tang (again) /
 2604 Tels (again) /
 2605 Juerd Waalboer /
 2606 Todd Rinaldo
 2607 
 2608 =head1 TODO
 2609 
 2610 More tests.  (Stuff I didn't think of yet.)
 2611 
 2612 Paragraph mode?
 2613 
 2614 Fixed-length mode.  Leave-blanks mode.
 2615 
 2616 Maybe an autolocking mode?
 2617 
 2618 For many common uses of the module, the read cache is a liability.
 2619 For example, a program that inserts a single record, or that scans the
 2620 file once, will have a cache hit rate of zero.  This suggests a major
 2621 optimization: The cache should be initially disabled.  Here's a hybrid
 2622 approach: Initially, the cache is disabled, but the cache code
 2623 maintains statistics about how high the hit rate would be *if* it were
 2624 enabled.  When it sees the hit rate get high enough, it enables
 2625 itself.  The STAT comments in this code are the beginning of an
 2626 implementation of this.
 2627 
 2628 Record locking with fcntl()?  Then the module might support an undo
 2629 log and get real transactions.  What a tour de force that would be.
 2630 
 2631 Keeping track of the highest cached record. This would allow reads-in-a-row
 2632 to skip the cache lookup faster (if reading from 1..N with empty cache at
 2633 start, the last cached value will be always N-1).
 2634 
 2635 More tests.
 2636 
 2637 =cut
 2638