I have this segment of code:
struct timeval start, end;
gettimeofday(&start, NULL);
//code I'm timing
gettimeofday(&end, NULL);
long elapsed = ((end.tv_sec-start.tv_sec)*1000000 + end.tv_usec-start.tv_usec);
ofstream timeFile;
timeFile.open ("timingSheet.txt");
timeFile << fixed << showpoint;
timeFile << setprecision(2);
timeFile << "Duration: " << elapsed << "\n";
timeFile.close();
Which will output the number of microseconds that has passed. However, if I change this line
long elapsed = ((end.tv_sec-start.tv_sec)*1000000 + end.tv_usec-start.tv_usec);
to this:
long elapsed = ((end.tv_sec-start.tv_sec)*1000000 + end.tv_usec-start.tv_usec)/1000000.0;
I get a negative value. Why does this happen?
You are dividing by a double: 1000000.0
, and casting back into an integer type.
Presuming all your start and end variables are ints (or longs), there is an awkward casting into a double, and then back into a long.
Try:
double elapsed = (double)(end.tv_sec-start.tv_sec) + (double)(end.tv_usec-start.tv)/1000000.0;
I use a timing class that I borrowed from somewhere here on SO.
#include <time.h>
#include <sys/time.h>
#include <iomanip>
#include <iostream>
using namespace std;
class Timer
{
private:
timeval startTime;
public:
void start()
{
gettimeofday(&startTime, NULL);
}
double stop()
{
timeval endTime;
long seconds, useconds;
double duration;
gettimeofday(&endTime, NULL);
seconds = endTime.tv_sec - startTime.tv_sec;
useconds = endTime.tv_usec - startTime.tv_usec;
duration = seconds + useconds/1000000.0;
return duration;
}
static void printTime(double duration)
{
cout << setprecision(6) << fixed << duration << " seconds" << endl;
}
};
For example:
Timer timer = Timer();
timer.start();
long x=0;
for (int i = 0; i < 256; i++)
for (int j = 0; j < 256; j++)
for (int k = 0; k < 256; k++)
for (int l = 0; l < 256; l++)
x++;
timer.printTime(timer.stop());
yields 11.346621 seconds
.
For my hash function project, I get:
Number of collisions: 0
Set size: 16777216
VM: 841.797MB
22.5810500000 seconds