Loading...
Searching...
No Matches
cpu.h
Go to the documentation of this file.
1/*
2 * Copyright (C) 2014, Freie Universitaet Berlin (FUB) & INRIA.
3 * All rights reserved.
4 *
5 * This file is subject to the terms and conditions of the GNU Lesser
6 * General Public License v2.1. See the file LICENSE in the top level
7 * directory for more details.
8 */
9
10#pragma once
11
21
22#include <stdint.h>
23
24#include <msp430.h>
25
26#include "sched.h"
27#include "thread.h"
28
29#ifdef __cplusplus
30extern "C" {
31#endif
32
36#define WORDSIZE 16
37
41#define PROVIDES_PM_SET_LOWEST
42
46#define ISR(a,b) void __attribute__((naked, interrupt (a))) b(void)
47
51extern volatile int __irq_is_in;
52
56static inline void __attribute__((always_inline)) __save_context(void)
57{
58 __asm__("push r15");
59 __asm__("push r14");
60 __asm__("push r13");
61 __asm__("push r12");
62 __asm__("push r11");
63 __asm__("push r10");
64 __asm__("push r9");
65 __asm__("push r8");
66 __asm__("push r7");
67 __asm__("push r6");
68 __asm__("push r5");
69 __asm__("push r4");
70
71 __asm__("mov.w r1,%0" : "=r"(thread_get_active()->sp));
72}
73
77static inline void __attribute__((always_inline)) __restore_context(void)
78{
79 __asm__("mov.w %0,r1" : : "m"(thread_get_active()->sp));
80
81 __asm__("pop r4");
82 __asm__("pop r5");
83 __asm__("pop r6");
84 __asm__("pop r7");
85 __asm__("pop r8");
86 __asm__("pop r9");
87 __asm__("pop r10");
88 __asm__("pop r11");
89 __asm__("pop r12");
90 __asm__("pop r13");
91 __asm__("pop r14");
92 __asm__("pop r15");
93 __asm__("reti");
94}
95
99static inline void __attribute__((always_inline)) __enter_isr(void)
100{
101 /* modify state register pushed to stack to not got to power saving
102 * mode right again */
103 __asm__ volatile(
104 "bic %[mask], 0(SP)" "\n\t"
105 : /* no outputs */
106 : [mask] "i"(CPUOFF | SCG0 | SCG1 | OSCOFF)
107 : "memory"
108 );
109 extern char __stack; /* defined by linker script to end of RAM */
111 __asm__("mov.w %0,r1" : : "i"(&__stack));
112 __irq_is_in = 1;
113}
114
118static inline void __attribute__((always_inline)) __exit_isr(void)
119{
120 __irq_is_in = 0;
121
123 sched_run();
124 }
125
127}
128
132__attribute__((always_inline))
133static inline uintptr_t cpu_get_caller_pc(void)
134{
135 return (uintptr_t)__builtin_return_address(0);
136}
137
138#ifdef __cplusplus
139}
140#endif
141
thread_t * sched_run(void)
Triggers the scheduler to schedule the next thread.
volatile unsigned int sched_context_switch_request
Flag indicating whether a context switch is necessary after handling an interrupt.
static thread_t * thread_get_active(void)
Returns a pointer to the Thread Control Block of the currently running thread.
Definition thread.h:413
volatile int __irq_is_in
The current ISR state (inside or not)
static void __restore_context(void)
Restore the thread context from inside an ISR.
Definition cpu.h:77
static void __enter_isr(void)
Run this code on entering interrupt routines.
Definition cpu.h:99
static void __exit_isr(void)
Run this code on exiting interrupt routines.
Definition cpu.h:118
static void __save_context(void)
Save the current thread context from inside an ISR.
Definition cpu.h:56
Scheduler API definition.