#define DIR_NAME_SIZE 256
#define __UNUSED__ __attribute__((__unused__))
+
+/* obtain the time of an event */
+
+static inline LttTime getEventTime(LttTracefile * tf);
+
+
/* set the offset of the fields belonging to the event,
need the information of the archecture */
void setFieldsOffset(LttTracefile *tf,LttEventType *evT,void *evD,LttTrace *t);
evT = ltt_trace_eventtype_get(t->trace,(unsigned)evId);
- if(evT) rootFld = evT->root_field;
+ if(likely(evT)) rootFld = evT->root_field;
else return ERANGE;
- if(rootFld){
+ if(likely(rootFld)){
//event has string/sequence or the last event is not the same event
- if((evT->latest_block!=t->which_block || evT->latest_event!=t->which_event)
- && rootFld->field_fixed == 0){
+ if(likely((evT->latest_block!=t->which_block || evT->latest_event!=t->which_event)
+ && rootFld->field_fixed == 0)){
setFieldsOffset(t, evT, evData, t->trace);
}
t->cur_event_pos += EVENT_HEADER_SIZE + rootFld->field_size;
evT->latest_event = t->which_event;
//the next event is in the next block
- if(evId == TRACE_BLOCK_END){
+ if(unlikely(evId == TRACE_BLOCK_END)){
t->cur_event_pos = t->buffer + t->block_size;
}else{
t->which_event++;
void getCyclePerNsec(LttTracefile * t)
{
LttTime lBufTotalTime; /* Total time for this buffer */
- LttCycleCount lBufTotalNSec; /* Total time for this buffer in nsecs */
- LttCycleCount lBufTotalCycle;/* Total cycles for this buffer */
+ double lBufTotalNSec; /* Total time for this buffer in nsecs */
+ double lBufTotalCycle;/* Total cycles for this buffer */
/* Calculate the total time for this buffer */
lBufTotalTime = ltt_time_sub(t->a_block_end->time, t->a_block_start->time);
lBufTotalCycle -= t->a_block_start->cycle_count;
/* Convert the total time to nsecs */
- lBufTotalNSec = lBufTotalTime.tv_sec;
- lBufTotalNSec *= NANOSECONDS_PER_SECOND;
- lBufTotalNSec += lBufTotalTime.tv_nsec;
+ lBufTotalNSec = ltt_time_to_double(lBufTotalTime);
- t->cycle_per_nsec = (double)lBufTotalCycle / (double)lBufTotalNSec;
+ t->nsec_per_cycle = (double)lBufTotalNSec / (double)lBufTotalCycle;
+ /* See : http://www.azillionmonkeys.com/qed/adiv.html */
+ // precalculate the reciprocal, so divisions will be really fast.
+ // 2^32-1 == 0xFFFFFFFFULL
+ //{
+ // double int_res = lBufTotalCycle/lBufTotalNSec;
+ // t->cycles_per_nsec_reciprocal =
+ // ((0xFFFF+int_res)/int_res);
+ //}
+
}
/****************************************************************************
*Function name
* getEventTime : obtain the time of an event
+ * NOTE : this function _really_ is on critical path.
*Input params
* tf : tracefile
*Return value
* LttTime : the time of the event
****************************************************************************/
-LttTime getEventTime(LttTracefile * tf)
+static inline LttTime getEventTime(LttTracefile * tf)
{
LttTime time;
LttCycleCount cycle_count; // cycle count for the current event
LttCycleCount lEventTotalCycle; // Total cycles from start for event
- LttCycleCount lEventNSec; // Total usecs from start for event
+ LttCycleCount lEventNSec; // Total nsecs from start for event
LttTime lTimeOffset; // Time offset in struct LttTime
guint16 evId;
- LttCycleCount tmpCycleCount = (((LttCycleCount)1)<<32);
evId = *(guint16 *)tf->cur_event_pos;
- if(evId == TRACE_BLOCK_START){
+ if(unlikely(evId == TRACE_BLOCK_START)){
tf->count = 0;
tf->pre_cycle_count = 0;
tf->cur_cycle_count = tf->a_block_start->cycle_count;
return tf->a_block_start->time;
- }else if(evId == TRACE_BLOCK_END){
+ }else if(unlikely(evId == TRACE_BLOCK_END)){
tf->count = 0;
tf->pre_cycle_count = 0;
tf->cur_cycle_count = tf->a_block_end->cycle_count;
// Calculate total time in cycles from start of buffer for this event
cycle_count = (LttCycleCount)*(guint32 *)(tf->cur_event_pos + EVENT_ID_SIZE);
- if(cycle_count < tf->pre_cycle_count)tf->count++;
+ if(unlikely(cycle_count < tf->pre_cycle_count)) tf->count++;
tf->pre_cycle_count = cycle_count;
- cycle_count += tmpCycleCount * tf->count;
+ cycle_count += (LttCycleCount)tf->count << 32;
// if(tf->cur_heart_beat_number > tf->count)
// cycle_count += tmpCycleCount * (tf->cur_heart_beat_number - tf->count);
lEventTotalCycle -= tf->a_block_start->cycle_count;
// Convert it to nsecs
- lEventNSec = (double)lEventTotalCycle / (double)tf->cycle_per_nsec;
-
+ lEventNSec = (double)lEventTotalCycle * (double)tf->nsec_per_cycle;
+ //lEventNSec = (tf->cycles_per_nsec_reciprocal * lEventTotalCycle) >> 16;
+
// Determine offset in struct LttTime
- lTimeOffset.tv_nsec = lEventNSec % NANOSECONDS_PER_SECOND;
- lTimeOffset.tv_sec = lEventNSec / NANOSECONDS_PER_SECOND;
+ lTimeOffset = ltt_time_from_double(lEventNSec);
time = ltt_time_add(tf->a_block_start->time, lTimeOffset);
LttField * rootFld = evT->root_field;
// rootFld->base_address = evD;
- if(rootFld)
+ if(likely(rootFld))
rootFld->field_size = getFieldtypeSize(tf, evT, 0,0,rootFld, evD,t);
}
int size, size1, element_number, i, offset1, offset2;
LttType * type = fld->field_type;
- if(t){
- if(evT->latest_block==t->which_block && evT->latest_event==t->which_event){
+ if(likely(t)){
+ if(unlikely(evT->latest_block==t->which_block && evT->latest_event==t->which_event)){
return fld->field_size;
}
}
- if(fld->field_fixed == 1){
+ if(likely(fld->field_fixed == 1)){
if(fld == evT->root_field) return fld->field_size;
}
case LTT_STRUCT:
element_number = (int) type->element_number;
size = 0;
- if(fld->field_fixed == -1){
+ if(fld->field_fixed == -1){
offset1 = offsetRoot;
offset2 = 0;
for(i=0;i<element_number;i++){