The big offender is the function I wrote that does the Mars time conversions. My code compiles to 29,872 bytes. Commenting out the function reduces it to 16,020 bytes. So this one function is taking up more than 13k! Here's the code:

- Code: Select all
`uint8_t MinTens = 0;`

uint8_t MinOnes = 0;

uint8_t SecTens = 0;

uint8_t SecOnes = 0;

uint8_t UTCHourTens = 1;

uint8_t UTCHourOnes = 2;

uint8_t UTCDateOnes = 1;

uint8_t UTCDateTens = 0;

uint8_t UTCMonthCode = 1;

uint8_t UTCYearsOnes = 2;

uint8_t UTCYearsTens = 1;

uint32_t MarsSol;

uint8_t MarsHourTens;

uint8_t MarsHourOnes;

uint8_t MarsMinTens;

uint8_t MarsMinOnes;

void ComputeMarsTime(uint64_t Epoch)

{

uint32_t UnixTime = 946684800L; // Jan 1, 2000 epoch

uint8_t temp;

temp = UTCYearsTens * 10 + UTCYearsOnes;

UnixTime += (uint64_t) temp * 31536000L;

// Now correct for leap years. Note that 2000 is NOT a leap year. We are also subtracting 1 because the current

// year is dealt with later.

if ( temp )

temp--;

temp -= temp % 4;

temp /= 4;

UnixTime += (uint64_t) temp * 86400L;

if ( UTCMonthCode >= 2 )

UnixTime += 2678400L;

if ( UTCMonthCode >= 3 ) {

UnixTime += 2419200L;

// Correct for current year leap year. Note that 2000 is NOT a leap year.

if ( UTCYearsTens != 0 || UTCYearsOnes != 0 ) {

if ( ( UTCYearsTens * 10 + UTCYearsOnes ) % 4 == 0 )

UnixTime += 86400L;

}

}

if ( UTCMonthCode >= 4 )

UnixTime += 2678400L;

if ( UTCMonthCode >= 5 )

UnixTime += 2592000L;

if ( UTCMonthCode >= 6 )

UnixTime += 2678400L;

if ( UTCMonthCode >= 7 )

UnixTime += 2592000L;

if ( UTCMonthCode >= 8 )

UnixTime += 2678400L;

if ( UTCMonthCode >= 9 )

UnixTime += 2678400L;

if ( UTCMonthCode >= 10 )

UnixTime += 2592000L;

if ( UTCMonthCode >= 11 )

UnixTime += 2678400L;

if ( UTCMonthCode >= 12 )

UnixTime += 2592000L;

temp = UTCDateTens * 10 + UTCDateOnes;

UnixTime += (uint32_t) temp * 86400L;

temp = UTCHourTens * 10 + UTCHourOnes;

UnixTime += (uint32_t) temp * 3600L;

temp = MinTens * 10 + MinOnes;

UnixTime += (uint32_t) temp * 60L;

temp = SecTens * 10 + SecOnes;

UnixTime += (uint32_t) temp;

uint64_t JD_UTC = (UnixTime * 1000000LL) / 86400LL + 2440587500000LL;

uint64_t JD_TT = JD_UTC;

JD_TT += (35LL*1000000LL + 32184000LL) / 86400LL; // Note: TODO MAKE LEAP SECOND CONFIGURABLE

uint64_t J2000 = JD_TT - 2451545000000LL;

uint64_t MSD = J2000 - 4500000LL;

MSD *= 1000000000LL;

MSD /= 1027491252LL;

MSD += 44796000000LL - 960LL;

MSD -= Epoch; // Epoch of mission time. Set to 0 for MTC.

MarsSol = MSD / 1000000LL;

uint32_t MTC = (MSD % 1000000L) * 24L;

byte MTCHour = MTC / 1000000L;

MarsHourOnes = MTCHour % 10;

MarsHourTens = ( MTCHour - MarsHourOnes ) / 10;

MTC = (MTC % 1000000L)*60L;

byte MTCMin = (MTC - MTC % 1000000L)/1000000L;

MarsMinOnes = MTCMin % 10;

MarsMinTens = ( MTCMin - MarsMinOnes ) / 10;

}

Now I can clearly reduce the number of math operations. But given that this is taking up >13k, I'd like to get a conceptual idea of where the big offender(s) are before I dive in, so I can optimize where it counts. My fear is that the bulk of this is assembly language subroutines for the 64 bit math operations I'm using. In that case, each addition, subtraction, modulo, etc. is only taking up a handful of bytes, and reducing the number of operations won't buy me much. I don't really need to squeeze every last byte out of it; I'm just looking for low hanging fruit that could free up a little breathing room.

Any advice would be appreciated. Thanks!