530 likes | 684 Views
Real Time Operating System RTLinux – core module. u9 01631 陳碩璜. Real Time VS General Purpose. From 928310 吳中如 928320 吳琦. From 928310 吳中如 928320 吳琦. From 928314 蔡漢民 926336 吳宗樺. From 928314 蔡漢民 926336 吳宗樺. From 928314 蔡漢民 926336 吳宗樺. From 928310 吳中如 928320 吳琦.
E N D
Real Time Operating System RTLinux – core module u901631 陳碩璜
Real Time VS General Purpose From 928310吳中如 928320吳琦
rtl_intercept(regs) From 928310吳中如 928320吳琦
rtl_intercept(regs) From 928310吳中如 928320吳琦
Trace RTLinux Source Code rtl_core.c
Interrupt control data structure Global interrupt : struct rtl_global{ spinlock_t hard_irq_controller_lock; unsigned long flags; unsigned long pending[IRQ_ARRAY_SIZE]; unsigned long soft_enabled[IRQ_ARRAY_SIZE]; unsigned long rtirq[IRQ_ARRAY_SIZE]; }; struct rtl_global rtl_global = { SPIN_LOCK_UNLOCKED,0,IRQ_ZINIT,IRQ_NZINIT,IRQ_ZINIT} ; /* RTLinux interrupt handlers */ struct rtl_global_handlers{ unsigned int (*handler)(unsigned int irq, struct pt_regs *r); }rtl_global_handlers[IRQ_MAX_COUNT]; 256
Local Interrupt: Enable the local interrupt struct rtl_local { __u32 flags; #ifdef __LOCAL_IRQS__ __u32 pending; __u32 rtirq; rtl_local_handler_t rt_handlers[LOCAL_PND_MAX]; #endif }; Which irq is pended Which one is the real time irq struct rtl_local rtl_local[NR_CPUS];
void rtl_soft_sti(void) { DeclareAndInit(cpu_id); if ( L_TEST(l_pend_since_sti) || G_TEST(g_pend_since_sti) #if LINUX_VERSION_CODE >= 0x020300 || (softirq_active(cpu_id) & softirq_mask(cpu_id) ) #endif ) rtl_process_pending(); rtl_soft_sti_no_emulation(); } Process the pended interrupts for Linux void rtl_soft_sti_no_emulation(void) { DeclareAndInit(cpu_id); if ( !L_TEST(l_ienable) ) last_cli[cpu_id] = 0; L_SET(l_ienable); }
void rtl_soft_cli(void) { DeclareAndInit(cpu_id); if ( L_TEST(l_ienable) ) last_cli[cpu_id] = (unsigned long)__builtin_return_address(0); L_CLEAR(l_ienable); } return the return address of rtl_soft_cli() #define L_CLEAR(f) clear_bit(f,&rtl_local[cpu_id].flags)
#definertl_hard_cli() __rtl_hard_cli() #define __rtl_hard_cli() __asm__ __volatile__("cli": : :"memory") #definertl_hard_sti() __rtl_hard_sti() #define __rtl_hard_sti() __asm__ __volatile__("sti": : :"memory") Ignore maskable external interrupts
int init_module(void) { int ret; if ( arch_takeover() ) { printk("arch_takeover failed\n"); return -1; } if ( !quiet ) printk("RTLinux Extensions Loaded (http://www.fsmlabs.com/)\n"); ret = rtl_printf_init(); if (ret < 0) return ret; rtl_soft_sti(); rtlinux_suspend_linux_init(); return 0; }
inline int arch_takeover(void) { int i; DeclareAndInit(cpu_id); rtl_hard_cli(); if(G_TEST_AND_SET(g_initialized)){ printk("Can't lock to install RTL. Already installed?\n"); rtl_hard_sti(); return -1; } if( rtl_smp_synchronize(sync_on,&sync_data.waiting_with_cli)) return -1; rtl_global.flags = (1<<g_initialized); set g_initialized bit
for(i = 0; i < NR_CPUS; i++){ rtl_local[i].flags = POS_TO_BIT(l_ienable) |POS_TO_BIT(l_idle); rtl_reschedule_handlers[i] = &default_reschedule_handler;} patch_kernel(cpu_id); #ifdef CONFIG_SMP barrier(); atomic_inc(&sync_data.done_patch); mb(); #endif rtl_hard_sti(); rtl_soft_sti(); #ifdef CONFIG_SMP rtl_request_global_irq (RTL_RESCHEDULE_VECTOR - 0x20, rtl_reschedule_interrupt); #endif return 0; } optimization barrier memory barrier
static int patch_kernel (unsigned int cpu_id){ enum pfunctions i; char *p; irq_desc_t *h; const struct func_table *pfunc = (struct func_table *)&__start_rtlinux_funcs; /* rtl_intercept */ xdo_IRQ = (void *)pfunc[pf_do_IRQ].address; local_ret_from_intr = (void *)pfunc[pf_ret_from_intr].address; if( !(p = find_patch((ulong)pfunc[pf_do_IRQ].address))) { printk("RTLinux cannot patch intercept routine\n"); return -1; } execute all interrupt service routines associated with an interrupt address of do_IRQ() of Linux address of ret_from_intr() of Linux Restore the hardware context
else { save_jump(p,pf_do_IRQ); patch_jump(p,(char *)rtl_intercept); } /* insert call to sti */ *((long *)(pfunc[pf_rtl_emulate_iret].address)) = (long)rtl_soft_sti; #ifdef CONFIG_X86_LOCAL_APIC /* patch the calls to the smp handlers and zap their apic calls */ for(i=PF_LOCAL_START; i <= PF_LOCAL_END;i++){ p = find_patch((ulong)pfunc[i].address); if(!p){ printk("RTLinux can't smp patch %d\n",i); return -1; } else{ save_jump(p,i); patch_jump(p,(char *)rtl_local_intercept); zap_ack_apic(pfunc[i].address); }} init_local_code(pfunc); #endif
for(i=0; i < NR_IRQS; i++){ save_linux_irq_desc[i] = h[i] . handler; rtl_irq_desc[i]= h[i].handler; h[i].handler = &rtl_generic_type; } return 0;} // end of patch_kernel RTLinux virtual irq struct hw_interrupt_type rtl_generic_type = {\ "RTLinux virtual irq", rtl_virt_startup, rtl_virt_shutdown, rtl_virt_enable, rtl_virt_disable, rtl_virt_ack, rtl_virt_set_affinity, };
intercept_t rtl_intercept(MACHDEPREGS regs) { int irq; HardDeclareAndInit(cpu_id); rtl_spin_lock(&rtl_global.hard_irq_controller_lock); if ((irq = rtl_irq_controller_get_irq(regs)) != -1) { rtl_irq_controller_ack(irq); /* may also mask, if needed */ if(G_TEST_RTH(irq)) { rtl_spin_unlock(&rtl_global.hard_irq_controller_lock); dispatch_rtl_handler(irq,MACHDEPREGS_PTR(regs)); rtl_spin_lock(&rtl_global.hard_irq_controller_lock); } else { G_PEND(irq); G_SET(g_pend_since_sti); } ==struct pt_regs ==regs.orig_eax & 0xff; ==if(rtl_irq_desc[irq]) rtl_irq_desc[irq]->ack(irq); ==®s
#define RUN_LINUX_HANDLER(irq) (G_ISPEND(irq) && !L_TEST(l_busy)\ && L_TEST(l_ienable) && G_ISENABLED(irq)) if(RUN_LINUX_HANDLER(irq)) { G_UNPEND(irq); rtl_soft_cli(); /* disable local soft interrupts */ G_DISABLE(irq); /* disable this irq */ rtl_spin_unlock(&rtl_global.hard_irq_controller_lock); rtl_hard_sti(); dispatch_linux_irq(MACHDEPREGS_PTR(regs),irq); RETURN_FROM_INTERRUPT_LINUX; /* goes via ret_from_intr */ } rtl_spin_unlock(&rtl_global.hard_irq_controller_lock); RETURN_FROM_INTERRUPT; } //end of rtl_intercept Right?? soft disable this irq , but pend it! 80x86 would disable all maskable interrupts when transferring control ==xdo_IRQ(*regs); ==return;
intercept_t rtl_local_intercept(MACHDEPREGS regs) { int pnd; HardDeclareAndInit(cpu_id); pnd = MACHDEPREGS_TO_PND(regs); rtl_local_irq_controller_ack(); if(L_TEST_RTH(pnd)) { dispatch_rtl_local_handler(pnd,MACHDEPREGS_PTR(regs));} else{ L_PEND(pnd); L_SET(l_pend_since_sti); } ==regs.orig_eax – 0xe8; ==ack_APIC_irq(); Acknowledge the local APIC the processor has accepted the interrupt rtl_local[rtl_getcpuid()].rt_handlers[pnd](r);
if(!L_ISPEND(pnd) || L_TEST(l_busy) || !L_TEST(l_ienable) ) { RETURN_FROM_LOCAL; } else { L_UNPEND(pnd); /* yes it is stupid, see above */ rtl_soft_cli(); /* disable local soft interrupts */ rtl_hard_sti(); dispatch_local_linux_irq(MACHDEPREGS_PTR(regs),pnd);} RETURN_FROM_LOCAL_LINUX; } //end of rtl_local_intercept ==RETURN_FROM_INTERRUPT ==soft_dispatch_local(regs->orig_eax); #define RETURN_FROM_LOCAL_LINUX \ { int i = (int)®s;\ __asm__ __volatile__("movl %0,%%esp\n\t“ "jmp *local_ret_from_intr" \ : /* no output */ :"c" (i):"memory"); }
original control path of Linux Summary for rtl_intercept cpu receives an interrupt control path of RTLinux Code at address in corresponding gate of IDT push $n-256 jmp common_interrupt rtl_intercept(pt_regs) { if(rtl_irq) rtl_global_handlers[irq] .handler(irq,r); … if(RUN_LINUX_HANDLER(irq)) {xdo_IRQ(*regs); return; } } common_interrupt: SAVE_ALL movl %esp , %eax call do_IRQ jmp ret_from_intr
void rtl_process_pending(void) { int irq = 0; int last_irq = 0; DeclareAndInit(cpu_id); rtl_soft_cli(); /*disable soft interrupts !*/ do{ irq = IRQ_NOT_VALID; G_CLEAR(g_pend_since_sti); L_CLEAR(l_pend_since_sti); #ifdef __LOCAL_IRQS__ while ( (irq = get_lpended_irq()) != IRQ_NOT_VALID ) { soft_dispatch_local(irq); } #endif
#ifdef __RTL_LOCALIRQS__ if (!test_bit(cpu_id, &rtl_reserved_cpumask)) #endif { while ( (irq = get_gpended_irq()) != IRQ_NOT_VALID ) {last_irq = irq; soft_dispatch_global(irq); } } #ifdef __RTL_LOCALIRQS__ }while(irq != IRQ_NOT_VALID || (!test_bit(cpu_id, &rtl_reserved_cpumask) && G_TEST(g_pend_since_sti)) || L_TEST(l_pend_since_sti)); #else }while(irq != IRQ_NOT_VALID || G_TEST(g_pend_since_sti) || L_TEST(l_pend_since_sti)); #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0) if ( softirq_active(cpu_id) & softirq_mask(cpu_id) ) do_softirq(); #endif } Invoked to take care of pending softirqs by Linux kernel
void cleanup_module(void) { HardDeclareAndInit(cpu_id); rtl_printf_cleanup(); rtl_hard_cli(); rtl_soft_sti_no_emulation(); do { rtl_hard_sti(); rtl_process_pending(); rtl_hard_cli(); } while ( G_TEST(g_pend_since_sti) || L_TEST(l_pend_since_sti)); arch_giveup(); rtlinux_suspend_linux_cleanup(); rtl_hard_sti(); }
#define G_TEST_RTH(f) test_bit(irq_top(f),&rtl_global.rtirq[irq_toi(f)]) #define G_SET_RTH(f) set_bit(irq_top(f),&rtl_global.rtirq[irq_toi(f)]) int rtl_request_global_irq (unsigned int irq, unsigned int (*handler)(unsigned int, struct pt_regs *)) { if (!G_TEST_RTH(irq)) { rtl_global_handlers[irq].handler =handler; G_SET_RTH(irq); mb(); if(rtl_global_handlers[irq].handler == handler) { rtl_hard_enable_irq (irq); return 0; } } return -EBUSY; } int rtl_free_global_irq(unsigned int irq ); return
void rtl_hard_enable_irq(unsigned int ix) { rtl_irqstate_t flags; rtl_no_interrupts (flags); rtl_spin_lock(&rtl_global.hard_irq_controller_lock); rtl_irq_controller_enable(ix); rtl_spin_unlock(&rtl_global.hard_irq_controller_lock); rtl_restore_interrupts (flags); } static inline void rtl_irq_controller_enable(unsigned int irq) { if(rtl_irq_desc[irq]) rtl_irq_desc[irq]->enable(irq); } return
int rtl_free_global_irq(unsigned int irq ) { if (!G_TEST_AND_CLEAR_RTH(irq)) return -EINVAL; return 0; /* don't need to clear the handler, because it will never be invoked -- see rtl_intercept. If we wanted to clear the handler we would have a problem with synchronization in the smp case */ } return
#define DeclareAndInit(cpu_id) unsigned int cpu_id = rtl_getcpuid() #ifdef CONFIG_SMP #define rtl_getcpuid() hw_smp_processor_id() #else #define rtl_getcpuid() smp_processor_id() #endif cpu field of thread_info structure #define L_TEST(f) test_bit(f,&rtl_local[cpu_id].flags) #define G_TEST(f) test_bit(f,&rtl_global.flags) #define L_SET(f) set_bit(f,&rtl_local[cpu_id].flags) return
static void save_jump(char *p, enum pfunctions pf) { int i; if(pf > MAX_JUMPS){ patch_failure("RTLinux FATAL ERROR; too many jumps\n"); }else for(i=0;i<5;i++)saved_jumps[pf][i] = p[i]; } static void patch_jump(char *code_to_patch,char *target) { int distance; distance = (int)target - ((int)code_to_patch +5) ; *code_to_patch++= 0xe9; *code_to_patch++ = ((char *)&distance)[0]; *code_to_patch++ = ((char *)&distance)[1]; *code_to_patch++ = ((char *)&distance)[2]; *code_to_patch = ((char *)&distance)[3]; } op code : jump to near position (relative address) return
static void zap_ack_apic(char *apic_caller){ int i; int ack_len = (int) end_apic_ack - (int)start_apic_ack; char *call_point = match_ack(apic_caller); if(call_point){ /*zap the closest ack*/ for(i=0; i<ack_len; i++) call_point[i]= 0x90; }} Finding the closest ack_APIC_irq() to the local interrupt handler Acknowledge the local APIC the processor has accepted the interrupt return
void init_local_code(const struct func_table *pf) { #ifdef CONFIG_SMP local_code.smp_reschedule_interrupt =\ (void *)pf[pf_smp_reschedule_interrupt].address; local_code.smp_invalidate_interrupt = \ (void *)pf[pf_smp_invalidate_interrupt].address; local_code.smp_call_function_interrupt = \ (void *)pf[pf_smp_call_function_interrupt].address; local_code.rtl_reschedule = \ (void *)pf[pf_rtl_reschedule].address; #endif local_code.smp_spurious_interrupt=\ (void *)pf[pf_smp_spurious_interrupt].address; local_code.smp_error_interrupt=\ (void *)pf[pf_smp_error_interrupt].address; local_code.smp_apic_timer_interrupt = \ (void *)pf[pf_smp_apic_timer_interrupt].address; } local APIC interrupt return
void rtl_virt_shutdown(unsigned int irq) { G_DISABLE(irq) ; rtl_irq_desc[irq]->shutdown(irq); } unsigned int rtl_virt_startup (unsigned int irq){ G_ENABLED(irq) ; return (rtl_irq_desc[irq]->startup(irq)); } static void rtl_virt_ack (unsigned int irq){ return;} void rtl_virt_disable(unsigned int irq) { G_DISABLE(irq); } return
// * Interrupt controller descriptor. This is all we need // * to describe about the low-level hardware. struct hw_interrupt_type { const char * typename; void (*startup)(unsigned int irq); void (*shutdown)(unsigned int irq); void (*handle)(unsigned int irq, struct pt_regs * regs); void (*enable)(unsigned int irq); void (*disable)(unsigned int irq); }; return
#define G_TEST_RTH(f) test_bit(irq_top(f),&rtl_global.rtirq[irq_toi(f)]) #define G_PEND(f) set_bit(irq_top(f),&rtl_global.pending[irq_toi(f)]) #define G_SET(f) set_bit(f,&rtl_global.flags) #define dispatch_rtl_handler(irq,r) rtl_global_handlers[irq].handler(irq,r) return
#define RETURN_FROM_INTERRUPT \ { int i = (int)®s;\ __asm__ __volatile__("movl %0,%%esp\n\t" \ RESTORE_ALL : /* no output */ :"c" (i):"memory"); } #define RESTORE_ALL\ "popl %%ebx;\n\t" \ "popl %%ecx;\n\t" \ "popl %%edx;\n\t" \ "popl %%esi;\n\t" \ "popl %%edi;\n\t" \ "popl %%ebp;\n\t" \ "popl %%eax;\n\t" \ "1: popl %%ds;\n\t "\ "2: popl %%es;\n\t "\ "addl $4,%%esp;\n\t" \ "3: iret;\n\t " after handling interrupt,the linux return goes via ret_from_irq ,but the RTLinux return just irets on the intercept return
#define localdbg() do {;} while (0) void soft_dispatch_local(unsigned int vector) { rtl_soft_cli(); switch(vector){ case RESCHEDULE_VECTOR: localdbg(); local_code.smp_reschedule_interrupt(); break; case INVALIDATE_TLB_VECTOR: /* IPI for invalidation */ localdbg(); local_code.smp_invalidate_interrupt(); break; case CALL_FUNCTION_VECTOR: localdbg(); local_code.smp_call_function_interrupt(); break; nextreturn
case SPURIOUS_APIC_VECTOR: localdbg(); local_code.smp_spurious_interrupt(); break; case ERROR_APIC_VECTOR: localdbg(); local_code.smp_error_interrupt(); break; case LOCAL_TIMER_VECTOR: { struct pt_regs r= FAKE_REGS; localdbg(); local_code.smp_apic_timer_interrupt(&r); } break; default: printk("RTL: bad local vector %x\n",vector); break; } } return
struct pt_regs { long ebx; long ecx; long edx; long esi; long edi; long ebp; long eax; int xds; int xes; //int xfs; int xgs; long orig_eax; long eip; int xcs; long eflags; long esp; int xss; }; top of stack push by SAVE_ALL macro in Linux return Irq value push by cpu control unit Start of this frame
#define localdbg() do {;} while (0) void soft_dispatch_local(unsigned int vector) { rtl_soft_cli(); switch(vector){ case RESCHEDULE_VECTOR: localdbg(); local_code.smp_reschedule_interrupt(); break; case INVALIDATE_TLB_VECTOR: /* IPI for invalidation */ localdbg(); local_code.smp_invalidate_interrupt(); break; case CALL_FUNCTION_VECTOR: localdbg(); local_code.smp_call_function_interrupt(); break; next return
case SPURIOUS_APIC_VECTOR: localdbg(); local_code.smp_spurious_interrupt(); break; case ERROR_APIC_VECTOR: localdbg(); local_code.smp_error_interrupt(); break; case LOCAL_TIMER_VECTOR: { struct pt_regs r= FAKE_REGS; localdbg(); local_code.smp_apic_timer_interrupt(&r); } break; default: printk("RTL: bad local vector %x\n",vector); break; } } return