use strict;
use Text::Wrap;
use Time::Local;
use File::Basename;
my $Log_Source_Command = "cvs log";
my $Only_Branch = undef;
my $VERSION = '$Revision: 1.1 $';
$VERSION =~ s/\S+\s+(\S+)\s+\S+/$1/;
my $Debug = 0;
my $Print_Version = 0;
my $Print_Usage = 0;
my $Distributed = 0;
my $Log_File_Name = "ChangeLog";
my $User_Map_File = "";
my $Output_To_Stdout = 0;
my $Prune_Empty_Msgs = 0;
my $No_Wrap = 0;
my $After_Header = " ";
my $XML_Output = 0;
my $UTC_Times = 0;
my $Show_Day_Of_Week = 0;
my $Show_Revisions = 0;
my $Show_Tags = 0;
my $Show_Branches = 0;
my @Follow_Branches;
my @Ignore_Files;
my $Case_Insensitive = 0;
my $Regexp_Gate = "";
my $Global_Opts = "";
my $Command_Opts = "";
my $Input_From_Stdin = 0;
my $Hide_Filenames = 0;
my $Max_Checkin_Duration = 180;
my $ChangeLog_Header = "";
my $file_separator = "======================================="
. "======================================";
my $logmsg_separator = "----------------------------";
&parse_options ();
&derive_change_log ();
sub derive_change_log ()
{
my %grand_poobah;
my $file_full_path;
my $time;
my $revision;
my $author;
my $msg_txt;
my $detected_file_separator;
my %usermap;
my $collecting_symbolic_names = 0;
my %symbolic_names; my %branch_names; my %branch_numbers; my @branch_roots; my $found_branch = 0;
if (defined $Only_Branch) {
$Log_Source_Command .= " -r$Only_Branch";
}
if (! $Input_From_Stdin) {
warn "running '$Log_Source_Command'\n";
open (LOG_SOURCE, "$Log_Source_Command |")
or die "unable to run \"${Log_Source_Command}\"";
}
else {
open (LOG_SOURCE, "-") or die "unable to open stdin for reading";
}
%usermap = &maybe_read_user_map_file ();
while (<LOG_SOURCE>)
{
if ((! (defined $file_full_path)) and /^Working file: (.*)/) {
$file_full_path = $1;
if (@Ignore_Files) {
my $base;
($base, undef, undef) = fileparse ($file_full_path);
if ($Case_Insensitive) {
if (grep ($file_full_path =~ m|$_|i, @Ignore_Files)) {
undef $file_full_path;
}
}
elsif (grep ($file_full_path =~ m|$_|, @Ignore_Files)) {
undef $file_full_path;
}
}
$found_branch = 0;
next;
}
next if (! $file_full_path);
if (/^symbolic names:$/) {
$collecting_symbolic_names = 1;
next; }
if ($collecting_symbolic_names)
{
if (/^\S/) {
$collecting_symbolic_names = 0;
}
else {
/^\s+([^:]+): ([\d.]+)$/;
my $tag_name = $1;
my $tag_rev = $2;
my $real_branch_rev = "";
if (($tag_rev =~ /^(\d+\.\d+\.)+\d+$/) and (! ($tag_rev =~ /^(1\.)+1$/))) {
$real_branch_rev = $tag_rev;
}
elsif ($tag_rev =~ /(\d+\.(\d+\.)+)0.(\d+)/) {
$real_branch_rev = $1 . $3;
}
if ($real_branch_rev)
{
$branch_names{$real_branch_rev} = $tag_name;
if (@Follow_Branches) {
if (grep ($_ eq $tag_name, @Follow_Branches)) {
$branch_numbers{$tag_name} = $real_branch_rev;
}
}
if (defined $Only_Branch) {
if ($tag_name eq $Only_Branch) {
$found_branch = 1;
}
}
}
else {
push (@{$symbolic_names{$tag_rev}}, $tag_name);
}
}
}
if ((! (defined $revision)) and (/^revision (\d+\.[\d.]+)/))
{
$revision = $1;
if (@Follow_Branches)
{
foreach my $branch (@Follow_Branches)
{
if (($branch =~ /^trunk$/i) and ($revision =~ /^[0-9]+\.[0-9]+$/))
{
goto dengo;
}
my $branch_number = $branch_numbers{$branch};
if ($branch_number)
{
if ((substr ($revision, 0, ((length ($branch_number)) + 1)))
eq ($branch_number . "."))
{
goto dengo;
}
elsif ((length ($branch_number)) > (length ($revision)))
{
$revision =~ /^((?:\d+\.)+)(\d+)$/;
my $r_left = $1; my $r_end = $2;
$branch_number =~ /^((?:\d+\.)+)(\d+)\.\d+$/;
my $b_left = $1; my $b_mid = $2;
if (($r_left eq $b_left)
&& ($r_end <= $b_mid))
{
goto dengo;
}
}
}
}
}
else {
next;
}
undef $revision;
dengo:
next;
}
if (! (defined ($revision))) {
$detected_file_separator = /^$file_separator$/o;
if ($detected_file_separator) {
goto CLEAR;
}
else {
next;
}
}
unless (defined $time) {
if (/^date: .*/)
{
($time, $author) = &parse_date_and_author ($_);
if (defined ($usermap{$author}) and $usermap{$author}) {
$author = $usermap{$author};
}
}
else {
$detected_file_separator = /^$file_separator$/o;
if ($detected_file_separator) {
goto CLEAR;
}
}
next;
}
if (/^branches:\s+(.*);$/)
{
if ($Show_Branches)
{
my $lst = $1;
$lst =~ s/(1\.)+1;|(1\.)+1$//; # ignore the trivial branch 1.1.1
if ($lst) {
@branch_roots = split (/;\s+/, $lst);
}
else {
undef @branch_roots;
}
next;
}
else
{
next;
}
}
$detected_file_separator = /^$file_separator$/o;
if ($detected_file_separator && ! (defined $revision)) {
goto CLEAR;
}
unless ($detected_file_separator || /^$logmsg_separator$/o)
{
$msg_txt .= $_; next;
}
if ((! $msg_txt)
|| ($msg_txt =~ /^\s*\.\s*$|^\s*$/)
|| ($msg_txt =~ /\*\*\* empty log message \*\*\*/)) {
if ($Prune_Empty_Msgs) {
goto CLEAR;
}
$msg_txt = "[no log message]\n";
}
{
my $dir_key; my %qunk;
if ($Distributed) {
($qunk{'filename'}, $dir_key, undef) = fileparse ($file_full_path);
}
else {
$dir_key = "./";
$qunk{'filename'} = $file_full_path;
}
$qunk{'time'} = $time;
$qunk{'revision'} = $revision;
$qunk{'revision'} =~ /((?:\d+\.)+)\d+/;
my $branch_prefix = $1;
$branch_prefix =~ s/\.$//; # strip off final dot
if ($branch_names{$branch_prefix}) {
$qunk{'branch'} = $branch_names{$branch_prefix};
}
if (@branch_roots) {
my @roots = map { $branch_names{$_} } @branch_roots;
$qunk{'branchroots'} = \@roots;
}
if (defined $Only_Branch) {
if (!$found_branch) { goto CLEAR; }
}
if (defined ($symbolic_names{$revision})) {
$qunk{'tags'} = $symbolic_names{$revision};
delete $symbolic_names{$revision};
}
&debug ("(pushing log msg for ${dir_key}$qunk{'filename'})\n");
push (@{$grand_poobah{$dir_key}{$author}{$time}{$msg_txt}}, \%qunk);
}
CLEAR:
undef $msg_txt;
undef $time;
undef $revision;
undef $author;
undef @branch_roots;
if ($detected_file_separator) {
undef $file_full_path;
undef %branch_names;
undef %branch_numbers;
}
}
close (LOG_SOURCE);
while (my ($dir,$authorhash) = each %grand_poobah)
{
&debug ("DOING DIR: $dir\n");
my %changelog;
while (my ($author,$timehash) = each %$authorhash)
{
my $lasttime;
my %stamptime;
foreach my $time (sort {$main::a <=> $main::b} (keys %$timehash))
{
my $msghash = $timehash->{$time};
while (my ($msg,$qunklist) = each %$msghash)
{
my $stamptime = $stamptime{$msg};
if ((defined $stamptime)
and (($time - $stamptime) < $Max_Checkin_Duration)
and (defined $changelog{$stamptime}{$author}{$msg}))
{
push(@{$changelog{$stamptime}{$author}{$msg}}, @$qunklist);
}
else {
$changelog{$time}{$author}{$msg} = $qunklist;
$stamptime{$msg} = $time;
}
}
}
}
undef (%$authorhash);
my ($logfile_here, $logfile_bak, $tmpfile);
if (! $Output_To_Stdout) {
$logfile_here = $dir . $Log_File_Name;
$logfile_here =~ s/^\.\/\//\//; # fix any leading ".//" problem
$tmpfile = "${logfile_here}.cvs2cl$$.tmp";
$logfile_bak = "${logfile_here}.bak";
open (LOG_OUT, ">$tmpfile") or die "Unable to open \"$tmpfile\"";
}
else {
open (LOG_OUT, ">-") or die "Unable to open stdout for writing";
}
print LOG_OUT $ChangeLog_Header;
if ($XML_Output) {
print LOG_OUT "<?xml version=\"1.0\"?>\n\n"
. "<changelog xmlns=\"http://www.red-bean.com/xmlns/cvs2cl/\">\n\n";
}
foreach my $time (sort {$main::b <=> $main::a} (keys %changelog))
{
my $authorhash = $changelog{$time};
while (my ($author,$mesghash) = each %$authorhash)
{
if ($XML_Output) {
$author = &xml_escape ($author);
}
while (my ($msg,$qunklist) = each %$mesghash)
{
my $files = &pretty_file_list ($qunklist);
my $logtext = &pretty_msg_text ($msg);
my $header_line; my $body; my $wholething;
my ($ignore,$min,$hour,$mday,$mon,$year,$wday)
= $UTC_Times ? gmtime($time) : localtime($time);
if ($Show_Day_Of_Week or $XML_Output) {
$wday = ("Sunday", "Monday", "Tuesday", "Wednesday",
"Thursday", "Friday", "Saturday")[$wday];
$wday = ($XML_Output) ? "<weekday>${wday}</weekday>\n" : " $wday";
}
else {
$wday = "";
}
if ($XML_Output) {
$header_line =
sprintf ("<date>%4u-%02u-%02u</date>\n"
. "${wday}"
. "<time>%02u:%02u</time>\n"
. "<author>%s</author>\n",
$year+1900, $mon+1, $mday, $hour, $min, $author);
}
else {
$header_line =
sprintf ("%4u-%02u-%02u${wday} %02u:%02u %s\n\n",
$year+1900, $mon+1, $mday, $hour, $min, $author);
}
if ($XML_Output) {
$body = $files . $logtext;
}
elsif ($No_Wrap) {
$files = wrap ("\t", " ", "$files");
$logtext =~ s/\n(.*)/\n\t$1/g;
unless ($After_Header eq " ") {
$logtext =~ s/^(.*)/\t$1/g;
}
$body = $files . $After_Header . $logtext;
}
else {
$body = $files . $After_Header . $logtext;
$body = wrap ("\t", " ", "$body");
}
$wholething = $header_line . $body;
if ($XML_Output) {
$wholething = "<entry>\n${wholething}</entry>\n";
}
if ($Case_Insensitive) {
unless ($Regexp_Gate && ($wholething !~ /$Regexp_Gate/oi)) {
print LOG_OUT "${wholething}\n";
}
}
else {
unless ($Regexp_Gate && ($wholething !~ /$Regexp_Gate/o)) {
print LOG_OUT "${wholething}\n";
}
}
}
}
}
if ($XML_Output) {
print LOG_OUT "</changelog>\n";
}
close (LOG_OUT);
if (! $Output_To_Stdout)
{
if (-f $logfile_here) {
rename ($logfile_here, $logfile_bak);
}
rename ($tmpfile, $logfile_here);
}
}
}
sub parse_date_and_author ()
{
my $line = shift;
my ($year, $mon, $mday, $hours, $min, $secs, $author) = $line =~
m or die "Couldn't parse date ``$line''";
die "Bad date or Y2K issues" unless ($year > 1969 and $year < 2258);
my $time = timegm($secs,$min,$hours,$mday,$mon-1,$year-1900);
return ($time, $author);
}
sub pretty_file_list ()
{
if ($Hide_Filenames and (! $XML_Output)) {
return "";
}
my $qunksref = shift;
my @qunkrefs = @$qunksref;
my @filenames;
my $beauty = ""; my %non_unanimous_tags; my %unanimous_tags; my %all_branches; my $common_dir; my $fbegun = 0;
foreach my $qunkref (@qunkrefs)
{
if ((scalar (@qunkrefs)) > 1)
{
if (! (defined ($common_dir))) {
my ($base, $dir);
($base, $dir, undef) = fileparse ($$qunkref{'filename'});
if (($dir eq "./") || ($dir eq ".\\")) {
$common_dir = "";
}
else {
$common_dir = $dir;
}
($dir eq "./") ? ($common_dir = "") : ($common_dir = $dir);
}
elsif ($common_dir) {
$common_dir = &common_path_prefix ($$qunkref{'filename'}, $common_dir);
}
}
else {
$common_dir = "";
}
if (defined ($$qunkref{'branch'})) {
$all_branches{$$qunkref{'branch'}} = 1;
}
if (defined ($$qunkref{'tags'})) {
foreach my $tag (@{$$qunkref{'tags'}}) {
$non_unanimous_tags{$tag} = 1;
}
}
}
if ((scalar (@qunkrefs)) > 1) {
foreach my $tag (keys (%non_unanimous_tags)) {
my $everyone_has_this_tag = 1;
foreach my $qunkref (@qunkrefs) {
if ((! (defined ($$qunkref{'tags'})))
or (! (grep ($_ eq $tag, @{$$qunkref{'tags'}})))) {
$everyone_has_this_tag = 0;
}
}
if ($everyone_has_this_tag) {
$unanimous_tags{$tag} = 1;
delete $non_unanimous_tags{$tag};
}
}
}
if ($XML_Output)
{
foreach my $qunkref (@qunkrefs)
{
my $filename = $$qunkref{'filename'};
my $revision = $$qunkref{'revision'};
my $tags = $$qunkref{'tags'};
my $branch = $$qunkref{'branch'};
my $branchroots = $$qunkref{'branchroots'};
$filename = &xml_escape ($filename); $revision = &xml_escape ($revision);
$beauty .= "<file>\n";
$beauty .= "<name>${filename}</name>\n";
$beauty .= "<revision>${revision}</revision>\n";
if ($branch) {
$branch = &xml_escape ($branch); $beauty .= "<branch>${branch}</branch>\n";
}
foreach my $tag (@$tags) {
$tag = &xml_escape ($tag); $beauty .= "<tag>${tag}</tag>\n";
}
foreach my $root (@$branchroots) {
$root = &xml_escape ($root); $beauty .= "<branchroot>${root}</branchroot>\n";
}
$beauty .= "</file>\n";
}
if ((scalar (keys (%unanimous_tags))) > 1) {
foreach my $utag ((keys (%unanimous_tags))) {
$utag = &xml_escape ($utag); $beauty .= "<utag>${utag}</utag>\n";
}
}
if ($common_dir) {
$common_dir = &xml_escape ($common_dir);
$beauty .= "<commondir>${common_dir}</commondir>\n";
}
return $beauty;
}
if ($common_dir) {
$beauty .= "$common_dir: ";
}
if ($Show_Branches)
{
my @brevisions;
foreach my $branch (keys (%all_branches))
{
foreach my $qunkref (@qunkrefs)
{
if ((defined ($$qunkref{'branch'}))
and ($$qunkref{'branch'} eq $branch))
{
if ($fbegun) {
$beauty .= ", ";
}
else {
$fbegun = 1;
}
my $fname = substr ($$qunkref{'filename'}, length ($common_dir));
$beauty .= $fname;
$$qunkref{'printed'} = 1;
if ($Show_Tags && (defined @{$$qunkref{'tags'}})) {
my @tags = grep ($non_unanimous_tags{$_}, @{$$qunkref{'tags'}});
if (@tags) {
$beauty .= " (tags: ";
$beauty .= join (', ', @tags);
$beauty .= ")";
}
}
if ($Show_Revisions) {
$$qunkref{'revision'} =~ /.+\.([\d])+$/;
push (@brevisions, $1);
}
}
}
$beauty .= " ($branch";
if (@brevisions) {
if ((scalar (@brevisions)) > 1) {
$beauty .= ".[";
$beauty .= (join (',', @brevisions));
$beauty .= "]";
}
else {
$beauty .= ".$brevisions[0]";
}
}
$beauty .= ")";
}
}
foreach my $qunkref (@qunkrefs)
{
next if (defined ($$qunkref{'printed'}));
if ($fbegun) {
$beauty .= ", ";
}
else {
$fbegun = 1;
}
$beauty .= substr ($$qunkref{'filename'}, length ($common_dir));
$$qunkref{'printed'} = 1;
if ($Show_Revisions || $Show_Tags)
{
my $started_addendum = 0;
if ($Show_Revisions) {
$started_addendum = 1;
$beauty .= " (";
$beauty .= "$$qunkref{'revision'}";
}
if ($Show_Tags && (defined $$qunkref{'tags'})) {
my @tags = grep ($non_unanimous_tags{$_}, @{$$qunkref{'tags'}});
if ((scalar (@tags)) > 0) {
if ($started_addendum) {
$beauty .= ", ";
}
else {
$beauty .= " (tags: ";
}
$beauty .= join (', ', @tags);
$started_addendum = 1;
}
}
if ($started_addendum) {
$beauty .= ")";
}
}
}
if ($Show_Tags && %unanimous_tags)
{
$beauty .= " (utags: ";
$beauty .= join (', ', keys (%unanimous_tags));
$beauty .= ")";
}
$beauty = "* $beauty:";
return $beauty;
}
sub common_path_prefix ()
{
my $path1 = shift;
my $path2 = shift;
my ($dir1, $dir2);
(undef, $dir1, undef) = fileparse ($path1);
(undef, $dir2, undef) = fileparse ($path2);
$dir1 =~ tr $dir2 =~ tr
my $accum1 = "";
my $accum2 = "";
my $last_common_prefix = "";
while ($accum1 eq $accum2)
{
$last_common_prefix = $accum1;
last if ($accum1 eq $dir1);
my ($tmp1) = split (/\//, (substr ($dir1, length ($accum1))));
my ($tmp2) = split (/\//, (substr ($dir2, length ($accum2))));
$accum1 .= "$tmp1/" if ((defined ($tmp1)) and $tmp1);
$accum2 .= "$tmp2/" if ((defined ($tmp2)) and $tmp2);
}
return $last_common_prefix;
}
sub pretty_msg_text ()
{
my $text = shift;
$text =~ s/\r\n/\n/g;
$text =~ s/\n\s*\n/\n\n/g;
if ($XML_Output)
{
$text = &xml_escape ($text);
$text = "<msg>${text}</msg>\n";
}
elsif (! $No_Wrap)
{
1 while ($text =~ s/(^|\n)([^>\s].*[^.\n])\n([^>\n])/$1$2 $3/g);
1 while ($text =~ s/(^|\n)([^>\s].*)\n([^>\n])/$1$2 $3/g);
}
return $text;
}
sub xml_escape ()
{
my $txt = shift;
$txt =~ s/&/&/g;
$txt =~ s/</</g;
$txt =~ s/>/>/g;
return $txt;
}
sub maybe_read_user_map_file ()
{
my %expansions;
if ($User_Map_File)
{
open (MAPFILE, "<$User_Map_File")
or die ("Unable to open $User_Map_File ($!)");
while (<MAPFILE>)
{
next if /^\s* next if not /:/;
my ($username, $expansion) = split ':';
chomp $expansion;
$expansion =~ s/^'(.*)'$/$1/;
$expansion =~ s/^"(.*)"$/$1/;
if ($expansion =~ /^\s*<{0,1}\S+@.*/) {
if (! ($expansion =~ /<\S+@\S+>/)) {
$expansions{$username} = "$username <$expansion>";
}
else {
$expansions{$username} = "$username $expansion";
}
}
else {
$expansions{$username} = $expansion;
}
}
close (MAPFILE);
}
return %expansions;
}
sub parse_options ()
{
my $output_file;
my $exit_with_admonishment = 0;
while (my $arg = shift (@ARGV))
{
if ($arg =~ /^-h$|^-help$|^--help$|^--usage$|^-?$/) {
$Print_Usage = 1;
}
elsif ($arg =~ /^--debug$/) { $Debug = 1;
}
elsif ($arg =~ /^--version$/) {
$Print_Version = 1;
}
elsif ($arg =~ /^-g$|^--global-opts$/) {
my $narg = shift (@ARGV) || die "$arg needs argument.\n";
$Log_Source_Command =~ s/(^\S*)/$1 $narg/;
}
elsif ($arg =~ /^-l$|^--log-opts$/) {
my $narg = shift (@ARGV) || die "$arg needs argument.\n";
$Log_Source_Command .= " $narg";
}
elsif ($arg =~ /^-B$|^--branch$/) {
my $narg = shift (@ARGV) || die "$arg needs argument.\n";
$Only_Branch = $narg;
}
elsif ($arg =~ /^-f$|^--file$/) {
my $narg = shift (@ARGV) || die "$arg needs argument.\n";
$output_file = $narg;
}
elsif ($arg =~ /^-U$|^--usermap$/) {
my $narg = shift (@ARGV) || die "$arg needs argument.\n";
$User_Map_File = $narg;
}
elsif ($arg =~ /^-W$|^--window$/) {
my $narg = shift (@ARGV) || die "$arg needs argument.\n";
$Max_Checkin_Duration = $narg;
}
elsif ($arg =~ /^-I$|^--ignore$/) {
my $narg = shift (@ARGV) || die "$arg needs argument.\n";
push (@Ignore_Files, $narg);
}
elsif ($arg =~ /^-C$|^--case-insensitive$/) {
$Case_Insensitive = 1;
}
elsif ($arg =~ /^-R$|^--regexp$/) {
my $narg = shift (@ARGV) || die "$arg needs argument.\n";
$Regexp_Gate = $narg;
}
elsif ($arg =~ /^--stdout$/) {
$Output_To_Stdout = 1;
}
elsif ($arg =~ /^--version$/) {
$Print_Version = 1;
}
elsif ($arg =~ /^-d$|^--distributed$/) {
$Distributed = 1;
}
elsif ($arg =~ /^-P$|^--prune$/) {
$Prune_Empty_Msgs = 1;
}
elsif ($arg =~ /^-S$|^--separate-header$/) {
$After_Header = "\n\n";
}
elsif ($arg =~ /^--no-wrap$/) {
$No_Wrap = 1;
}
elsif ($arg =~ /^--gmt$|^--utc$/) {
$UTC_Times = 1;
}
elsif ($arg =~ /^-w$|^--day-of-week$/) {
$Show_Day_Of_Week = 1;
}
elsif ($arg =~ /^-r$|^--revisions$/) {
$Show_Revisions = 1;
}
elsif ($arg =~ /^-t$|^--tags$/) {
$Show_Tags = 1;
}
elsif ($arg =~ /^-b$|^--branches$/) {
$Show_Branches = 1;
}
elsif ($arg =~ /^-F$|^--follow$/) {
my $narg = shift (@ARGV) || die "$arg needs argument.\n";
push (@Follow_Branches, $narg);
}
elsif ($arg =~ /^--stdin$/) {
$Input_From_Stdin = 1;
}
elsif ($arg =~ /^--header$/) {
my $narg = shift (@ARGV) || die "$arg needs argument.\n";
$ChangeLog_Header = &slurp_file ($narg);
if (! defined ($ChangeLog_Header)) {
$ChangeLog_Header = "";
}
}
elsif ($arg =~ /^--xml$/) {
$XML_Output = 1;
}
elsif ($arg =~ /^--hide-filenames$/) {
$Hide_Filenames = 1;
$After_Header = "";
}
else {
$Log_Source_Command .= " $arg";
}
}
if ($Output_To_Stdout && $Distributed) {
print STDERR "cannot pass both --stdout and --distributed\n";
$exit_with_admonishment = 1;
}
if ($Output_To_Stdout && $output_file) {
print STDERR "cannot pass both --stdout and --file\n";
$exit_with_admonishment = 1;
}
if ($exit_with_admonishment) {
&usage ();
exit (1);
}
elsif ($Print_Usage) {
&usage ();
exit (0);
}
elsif ($Print_Version) {
&version ();
exit (0);
}
if ($Output_To_Stdout) {
undef $Log_File_Name; }
elsif ($output_file) {
$Log_File_Name = $output_file;
}
}
sub slurp_file ()
{
my $filename = shift || die ("no filename passed to slurp_file()");
my $retstr;
open (SLURPEE, "<${filename}") or die ("unable to open $filename ($!)");
my $saved_sep = $/;
undef $/;
$retstr = <SLURPEE>;
$/ = $saved_sep;
close (SLURPEE);
return $retstr;
}
sub debug ()
{
if ($Debug) {
my $msg = shift;
print STDERR $msg;
}
}
sub version ()
{
print "cvs2cl.pl version ${VERSION}; distributed under the GNU GPL.\n";
}
sub usage ()
{
&version ();
print <<'END_OF_INFO';
Generate GNU-style ChangeLogs in CVS working copies.
Notes about the output format(s):
The default output of cvs2cl.pl is designed to be compact, formally
unambiguous, but still easy for humans to read. It is largely
self-explanatory, I hope; the one abbreviation that might not be
obvious is "utags". That stands for "universal tags" -- a
universal tag is one held by all the files in a given change entry.
If you need output that's easy for a program to parse, use the
--xml option. Note that with XML output, just about all available
information is included with each change entry, whether you asked
for it or not, on the theory that your parser can ignore anything
it's not looking for.
Notes about the options and arguments (the actual options are listed
last in this usage message):
* The -I and -F options may appear multiple times.
* To follow trunk revisions, use "-F trunk" ("-F TRUNK" also works).
This is okay because no would ever, ever be crazy enough to name a
branch "trunk", right? Right.
* For the -U option, the UFILE should be formatted like
CVSROOT/users. That is, each line of UFILE looks like this
jrandom:jrandom@red-bean.com
or maybe even like this
jrandom:'Jesse Q. Random <jrandom@red-bean.com>'
Don't forget to quote the portion after the colon if necessary.
* Many people want to filter by date. To do so, invoke cvs2cl.pl
like this:
cvs2cl.pl -l "-d'DATESPEC'"
where DATESPEC is any date specification valid for "cvs log -d".
(Note that CVS 1.10.7 and below requires there be no space between
-d and its argument).
Options/Arguments:
-h, -help, --help, or -? Show this usage and exit
--version Show version and exit
-r, --revisions Show revision numbers in output
-b, --branches Show branch names in revisions when possible
-t, --tags Show tags (symbolic names) in output
--stdin Read from stdin, don't run cvs log
--stdout Output to stdout not to ChangeLog
-d, --distributed Put ChangeLogs in subdirs
-f FILE, --file FILE Write to FILE instead of "ChangeLog"
-W SECS, --window SECS Window of time within which log entries unify
-U UFILE, --usermap UFILE Expand usernames to email addresses from UFILE
-R REGEXP, --regexp REGEXP Include only entries that match REGEXP
-I REGEXP, --ignore REGEXP Ignore files whose names match REGEXP
-C, --case-insensitive Any regexp matching is done case-insensitively
-F BRANCH, --follow BRANCH Show only revisions on or ancestral to BRANCH
-B BRANCH, --branch BRANCH Show only revisions directly on branch BRANCH
-S, --separate-header Blank line between each header and log message
--no-wrap Don't auto-wrap log message (recommend -S also)
--gmt, --utc Show times in GMT/UTC instead of local time
-w, --day-of-week Show day of week
--header FILE Get ChangeLog header from FILE ("-" means stdin)
--xml Output XML instead of ChangeLog format
--hide-filenames Don't show filenames (ignored for XML output)
-P, --prune Don't show empty log messages
-g OPTS, --global-opts OPTS Invoke like this "cvs OPTS log ..."
-l OPTS, --log-opts OPTS Invoke like this "cvs ... log OPTS"
FILE1 [FILE2 ...] Show only log information for the named FILE(s)
See http://www.red-bean.com/~kfogel/cvs2cl.shtml for maintenance and bug info.
END_OF_INFO
}
__END__
=head1 NAME
cvs2cl.pl - produces GNU-style ChangeLogs in CVS working copies, by
running "cvs log" and parsing the output. Shared log entries are
unified in an intuitive way.
=head1 DESCRIPTION
This script generates GNU-style ChangeLog files from CVS log
information. Basic usage: just run it inside a working copy and a
ChangeLog will appear. It requires repository access (i.e., 'cvs log'
must work). Run "cvs2cl.pl --help" to see more advanced options.
See http://www.red-bean.com/~kfogel/cvs2cl.shtml for updates, and
for instructions on getting anonymous CVS access to this script.
Maintainer: Karl Fogel <kfogel@red-bean.com>
Please report bugs to <cvs2cl-bugs@red-bean.com>.
=head1 README
This script generates GNU-style ChangeLog files from CVS log
information. Basic usage: just run it inside a working copy and a
ChangeLog will appear. It requires repository access (i.e., 'cvs log'
must work). Run "cvs2cl.pl --help" to see more advanced options.
See http://www.red-bean.com/~kfogel/cvs2cl.shtml for updates, and
for instructions on getting anonymous CVS access to this script.
Maintainer: Karl Fogel <kfogel@red-bean.com>
Please report bugs to <cvs2cl-bugs@red-bean.com>.
=head1 PREREQUISITES
This script requires C<Text::Wrap>, C<Time::Local>, and
C<File::Basename>.
It also seems to require C<Perl 5.004_04> or higher.
=pod OSNAMES
any
=pod SCRIPT CATEGORIES
Version_Control/CVS
=cut
-*- -*- -*- -*- -*- -*- -*- -*- -*- -*- -*- -*- -*- -*- -*- -*- -*- -*-
Note about a bug-slash-opportunity:
-----------------------------------
There's a bug in Text::Wrap, which affects cvs2cl. This script
reveals it:
use Text::Wrap;
my $test_text =
"This script demonstrates a bug in Text::Wrap. The very long line
following this paragraph will be relocated relative to the surrounding
text:
====================================================================
See? When the bug happens, we'll get the line of equal signs below
this paragraph, even though it should be above.";
print "$test_text";
print "\n";
print "\n";
print wrap ("\t", " ", "$test_text");
print "\n";
print "\n";
If the line of equal signs were one shorter, then the bug doesn't
happen. Interesting.
Anyway, rather than fix this in Text::Wrap, we might as well write a
new wrap() which has the following much-needed features:
* initial indentation, like current Text::Wrap()
* subsequent line indentation, like current Text::Wrap()
* user chooses among: force-break long words, leave them alone, or die()?
* preserve existing indentation: chopped chunks from an indented line
are indented by same (like this line, not counting the asterisk!)
* optional list of things to preserve on line starts, default ">"
Note that the last two are essentially the same concept, so unify in
implementation and give a good interface to controlling them.
And how about:
Optionally, when encounter a line pre-indented by same as previous
line, then strip the newline and refill, but indent by the same.
Yeah...