#!/usr/bin/perl use warnings; use strict; use File::Basename; use File::Spec::Functions qw(rel2abs); use List::Util qw(sum); ### Script Configuration my $queries = 5; my $max_wait = 5; my $filename; my @siteList = ( "google.com", # External, Internet "msn.com", # External, Internet "yahoo.com", # External, Internet "amazon.com", ); # get the location where this script is running my $dirname = dirname(rel2abs($0)); # if the file doesn't exist make it, and add a title sub check_file { # file name includes the abs path my $filename = shift; open(my $fh, '+>>', $filename) or die "Could not open file '$filename' $!"; if (!-s $filename) { open(my $fh, '>>', $filename) or die "Could not open file '$filename' $!"; print $fh "Time In, Google, Msn, Yahoo, Amazon, Time Out"; close $fh; } } ### Begin Script Execution sub main { # use one file per day chomp the date so its file friendly my $entry_date = `date +%m%d%y`; chomp($entry_date); $filename = "$dirname/test_${entry_date}.mkd"; check_file($filename); open(my $fh, '>>', $filename) or die "Could not open file '$filename' $!"; # capture the first time before running the loop my $time = `date +%H:%M:%S`; chomp($time); print $fh "!!\n".$time.", "; # loop through the sites and fill the master attempt list foreach my $site (@siteList){ my @attempts; for(1..$queries){ # use wget for the attempt my $attempt = substr(`/usr/bin/time -f %E wget -qO/dev/null https://$site 2>&1`,2)*1000; chomp($attempt); push (@attempts, $attempt); my $wait = int rand($max_wait); sleep $wait; } # the attempts are checked for zeros or gaps in records my $average = @attempts ? sum(@attempts)/$#attempts : 0; if ($average ne 0){ print $fh $average.', '; } else { print $fh ' , 0 , '; } } # chomp time $time = `date +%H:%M:%S`; chomp($time); print $fh $time; # close file and exit out close $fh; print "site check ran: ${entry_date} ${time}\n"; } main(); exit 0;