# sched_demo_311551147 code explanation
## Code
```cpp=
#include <bits/stdc++.h>
#include <unistd.h>
#include <sched.h>
#include <pthread.h>
#include <time.h>
#include <chrono>
using namespace std;
vector<string> parsePolicies(string);
vector<int> parsePriorities(string);
vector<string> split(string, char);
void *threadRoutine(void *args);
pthread_barrier_t barrier;
cpu_set_t cpuSet;
int cpuNum = 0;
typedef struct {
pthread_t* thread_id;
int thread_num;
string sched_policy;
int sched_priority;
} thread_info_t;
float busyTime;
int main(int argc, char* argv[]) {
int option = 0;
int threads;
vector<string> policies;
vector<int> priorities;
CPU_ZERO(&cpuSet);
CPU_SET(cpuNum, &cpuSet);
sched_setaffinity(getpid(), sizeof(cpuSet), &cpuSet);
while(option = getopt(argc, argv, "n:t:s:p:")){
if(option == -1) {
break;
}
// extern char *optarg
switch(option) {
case 'n':
threads = stoi(optarg);
break;
case 't':
busyTime = stof(optarg);
break;
case 's':
policies = parsePolicies(optarg);
break;
case 'p':
priorities = parsePriorities(optarg);
default:
break;
}
}
// use threads + 1 here, so that we can start all threads at once after the for loop
pthread_barrier_init(&barrier, nullptr, threads + 1);
vector<pthread_t> threadIds(threads);
vector<pthread_attr_t*> threadAttr(threads);
for (int i = 0; i < threads; i++) {
thread_info_t* threadInfo = new thread_info_t {
&threadIds[i],
i,
policies[i],
priorities[i],
};
threadAttr[i] = new pthread_attr_t;
pthread_attr_init(threadAttr[i]);
// cout << "here";
pthread_attr_setinheritsched(threadAttr[i], PTHREAD_EXPLICIT_SCHED);
// cout << "here2";
if(policies[i] == "FIFO"){
pthread_attr_setschedpolicy(threadAttr[i], SCHED_FIFO);
}
else {
pthread_attr_setschedpolicy(threadAttr[i], SCHED_OTHER);
}
struct sched_param* params = new sched_param;
params -> sched_priority = priorities[i];
pthread_attr_setschedparam(threadAttr[i], params);
pthread_create(&threadIds[i], threadAttr[i], threadRoutine, (void*)threadInfo);
// the the attr here before the thread start running
// pthread_setaffinity_np(threadIds[i], sizeof(cpuSet), &cpuSet);
}
pthread_barrier_wait(&barrier);
// wait for all threads
for(int i = 0; i < threads; i++) {
pthread_join(threadIds[i], NULL);
pthread_attr_destroy(threadAttr[i]);
}
return 0;
}
vector<string> parsePolicies(string optarg) {
vector<string> policies = split(optarg, ',');
return policies;
}
vector<int> parsePriorities(string optarg) {
vector<string> p = split(optarg, ',');
vector<int> priorities;
for(auto s: p) {
priorities.push_back(stoi(s));
}
return priorities;
}
vector<string> split(string optarg, char delim) {
vector<string> res;
optarg.push_back(delim);
string cur;
for(auto c: optarg) {
if(c == delim) {
res.push_back(cur);
cur = "";
} else {
cur.push_back(c);
}
}
return res;
}
void *threadRoutine(void* args) {
// wait until all threads are ready
thread_info_t* info = (thread_info_t*)args;
int id = (info -> thread_num);
string policy = (info -> sched_policy);
pthread_setaffinity_np(*(info -> thread_id), sizeof(cpuSet), &cpuSet);
pthread_barrier_wait(&barrier);
for(int i = 0; i < 3; i++) {
auto start = chrono::system_clock::now();
cout << "Thread " << id << " is running" << endl;
// busy working for t secs
while(true) {
auto end = chrono::system_clock::now();
chrono::duration<float> passed = end - start;
if(passed.count() >= busyTime){
break;
}
}
// if the current pthread has the policy "NORMAL", then it should let go of the cpu
if(policy == "NORMAL") {
sched_yield();
}
}
return nullptr;
}
```
## Explanation
### CPU affinity
在這次作業的實作過程中首先透過
```cpp=
CPU_ZERO(&cpuSet);
CPU_SET(cpuNum, &cpuSet);
sched_setaffinity(getpid(), sizeof(cpuSet), &cpuSet);
```
將process的affinity設定在同一個core上執行,在之後每一個pthread執行前也會執行同樣的動作確保所有pthread都是在同一個core上執行
### Read command line arguments
接著,透過getopt的方式讀取有幾個thread、對應的priority等等的資訊
```cpp=
while(option = getopt(argc, argv, "n:t:s:p:")){
if(option == -1) {
break;
}
// extern char *optarg
switch(option) {
case 'n':
threads = stoi(optarg);
break;
case 't':
busyTime = stof(optarg);
break;
case 's':
policies = parsePolicies(optarg);
break;
case 'p':
priorities = parsePriorities(optarg);
default:
break;
}
}
```
### pthread_barrier_init
在讀取完資訊後便是建立相對應的pthread,不過在這之前為了避免有些pthread一被建立就馬上執行,而導致無法看出schedule policy的影響,所以在建立pthread之前先執行了`pthread_barrier_init(&barrier, nullptr, threads + 1);`,這一步設定了`threads + 1`次的barrier,因此,只有當pthread全數準備好之後,並且在原程式碼的第98行call最後一次wait,所有的pthread才會開始根據schedule policy開始運作。
### pthread creation
而在實際建立pthread的code當中,
```cpp=
for (int i = 0; i < threads; i++) {
thread_info_t* threadInfo = new thread_info_t {
&threadIds[i],
i,
policies[i],
priorities[i],
};
threadAttr[i] = new pthread_attr_t;
pthread_attr_init(threadAttr[i]);
// cout << "here";
pthread_attr_setinheritsched(threadAttr[i], PTHREAD_EXPLICIT_SCHED);
// cout << "here2";
if(policies[i] == "FIFO"){
pthread_attr_setschedpolicy(threadAttr[i], SCHED_FIFO);
}
else {
pthread_attr_setschedpolicy(threadAttr[i], SCHED_OTHER);
}
struct sched_param* params = new sched_param;
params -> sched_priority = priorities[i];
pthread_attr_setschedparam(threadAttr[i], params);
pthread_create(&threadIds[i], threadAttr[i], threadRoutine, (void*)threadInfo);
// the the attr here before the thread start running
// pthread_setaffinity_np(threadIds[i], sizeof(cpuSet), &cpuSet);
}
```
最重要的部分有3個,分別是先透過`pthread_attr_setschedpolicy(threadAttr[i], SCHED_FIFO);`設定該個pthread的policy為FIFO或NORMAL,接著是`pthread_attr_setschedparam(threadAttr[i], params);`設定該pthread的priority
,最後是`pthread_create(&threadIds[i], threadAttr[i], threadRoutine, (void*)threadInfo);`,透過前面兩步所設定的參數真正的create出一個pthread,並且執行`threadRoutine`這個function。
### threadRoutine
```cpp=
void *threadRoutine(void* args) {
// wait until all threads are ready
thread_info_t* info = (thread_info_t*)args;
int id = (info -> thread_num);
string policy = (info -> sched_policy);
pthread_setaffinity_np(*(info -> thread_id), sizeof(cpuSet), &cpuSet);
pthread_barrier_wait(&barrier);
for(int i = 0; i < 3; i++) {
auto start = chrono::system_clock::now();
cout << "Thread " << id << " is running" << endl;
// busy working for t secs
while(true) {
auto end = chrono::system_clock::now();
chrono::duration<float> passed = end - start;
if(passed.count() >= busyTime){
break;
}
}
// if the current pthread has the policy "NORMAL", then it should let go of the cpu
if(policy == "NORMAL") {
sched_yield();
}
}
return nullptr;
}
```
上方為threadRoutine的實作,在line 8先設定該pthread的cpu affinity,這個步驟確定所有pthread都在同一個core上執行。而再第12行開始的for loop當中便是busy working的部分,首先在15行先print出目前正在執行的pthread,第18行則是讀取現在的時間(`end`),如果現在的時間減掉開始busy working的時間(`start`),代表這個pthread已經做完了該次的busy working,那麼就直接break(line 22),並且決定是否要讓出現在的CPU。讓出CPU與否的準則則是用schedule policy決定,若目前的policy是NORMAL那麼就要用`sched_yield`讓出CPU,否則就繼續執行(因為在這次作業裡,當FIFO在執行時,必定是當前priority最高的pthread,所以不須被中斷)。
以上便是這次作業的code explanation