Simple Example

The following example shows how to use UMS library with one UMS scheduler per-CPU core with their own ready queue.

  1// SPDX-License-Identifier: AGPL-3.0-only
  2
  3#include "global.h"
  4#include "list.h"
  5
  6#include <ums.h>
  7#include <stdio.h>
  8#include <stdlib.h>
  9#include <errno.h>
 10#include <unistd.h>
 11#include <pthread.h>
 12
 13#ifdef HAVE_SCHED_H
 14#include <sched.h>
 15#endif
 16
 17struct context_list_node {
 18	ums_context_t context;
 19	struct list_head list;
 20};
 21
 22struct ums_sched_rq {
 23	struct list_head head;
 24};
 25
 26ums_completion_list_t comp_list;
 27__thread struct ums_sched_rq rq;
 28
 29static struct context_list_node *get_next_context(void)
 30{
 31	ums_context_t context;
 32	struct context_list_node *node;
 33
 34	if (!list_empty(&rq.head)) {
 35		node = list_first_entry(&rq.head,
 36					struct context_list_node,
 37					list);
 38		list_del(&node->list);
 39		return node;
 40	}
 41
 42	while (dequeue_ums_completion_list_items(comp_list, &context)) {
 43		if (errno == EINTR)
 44			continue;
 45		else
 46			return NULL;
 47	}
 48
 49	node = malloc(sizeof(*node));
 50	if (!node)
 51		return NULL;
 52	node->context = context;
 53
 54	list_add_tail(&node->list, &rq.head);
 55
 56	while ((context = get_next_ums_list_item(context)) > 0) {
 57		node = malloc(sizeof(*node));
 58		if (!node)
 59			return NULL;
 60		node->context = context;
 61
 62		list_add_tail(&node->list, &rq.head);
 63	}
 64
 65	node = list_first_entry(&rq.head,
 66				struct context_list_node,
 67				list);
 68	list_del(&node->list);
 69
 70	return node;
 71}
 72
 73static inline void execute_next_context(void)
 74{
 75	struct context_list_node *node;
 76	ums_context_t context;
 77
 78	node = get_next_context();
 79	if (!node) {
 80		perror("get_next_context");
 81		return;
 82	}
 83
 84	if (execute_ums_thread(node->context))
 85		perror("execute_ums_thread");
 86
 87	free(node);
 88}
 89
 90static void sched_entry_proc(ums_reason_t reason,
 91			     ums_activation_t *activation,
 92			     void *args)
 93{
 94	ums_context_t context;
 95	long worker_result;
 96
 97	switch (reason) {
 98	case UMS_SCHEDULER_STARTUP:
 99		execute_next_context();
100		break;
101	case UMS_SCHEDULER_THREAD_YIELD:
102		context = activation->context;
103		worker_result = *((long *) args);
104
105		printf("worker %d yielded with value %ld\n",
106		       context,
107		       worker_result);
108		fflush(stdout);
109
110		free(args);
111
112		execute_next_context();
113		break;
114	case UMS_SCHEDULER_THREAD_END:
115		execute_next_context();
116		break;
117	default:
118		break;
119	}
120}
121
122static void *sched_pthread_proc(void *arg)
123{
124	ums_scheduler_startup_info_t sched_info;
125
126	sched_info.completion_list = comp_list;
127	sched_info.ums_scheduler_entry_point = sched_entry_proc;
128
129	INIT_LIST_HEAD(&rq.head);
130
131#if !HAVE_DECL_PTHREAD_ATTR_SETAFFINITY_NP && defined(HAVE_SCHED_SETAFFINITY)
132	(void) sched_setaffinity(0, sizeof(cpu_set_t), arg);
133	free(arg);
134#endif
135
136	if (enter_ums_scheduling_mode(&sched_info))
137		perror("enter_ums_scheduling_mode");
138
139	return NULL;
140}
141
142int initialize_ums_scheduling(pthread_t *sched_threads,
143			     long nthreads)
144{
145	pthread_attr_t attr;
146	cpu_set_t cpus, *cpus_arg = NULL;
147	long i;
148
149	if (pthread_attr_init(&attr))
150		return -1;
151
152	if (create_ums_completion_list(&comp_list))
153		return -1;
154
155	for (i = 0L; i < nthreads; i++) {
156		CPU_ZERO(&cpus);
157		CPU_SET(i, &cpus);
158		if (pthread_attr_setdetachstate(&attr,
159						PTHREAD_CREATE_DETACHED))
160			goto out;
161#if HAVE_DECL_PTHREAD_ATTR_SETAFFINITY_NP
162		if (pthread_attr_setaffinity_np(&attr,
163						sizeof(cpu_set_t),
164						&cpus))
165			goto out;
166
167#else /* !HAVE_DECL_PTHREAD_ATTR_SETAFFINITY_NP */
168#ifdef HAVE_SCHED_SETAFFINITY
169		cpus_arg = malloc(sizeof(*cpus_arg));
170		if (!cpus_arg)
171			goto out;
172		*cpus_arg = cpus;
173#endif
174#endif /* !HAVE_DECL_PTHREAD_ATTR_SETAFFINITY_NP */
175		if (pthread_create(sched_threads + i,
176				   &attr,
177				   sched_pthread_proc,
178				   cpus_arg))
179#if !HAVE_DECL_PTHREAD_ATTR_SETAFFINITY_NP && defined(HAVE_SCHED_SETAFFINITY)
180			goto sched_thread_create;
181#else
182			goto out;
183#endif
184	}
185
186	return 0;
187
188sched_thread_create:
189	free(cpus_arg);
190out:
191	delete_ums_completion_list(&comp_list);
192	return -1;
193}
194
195int release_ums_scheduling(pthread_t *sched_threads,
196			   long nthreads)
197{
198	return delete_ums_completion_list(&comp_list);
199}
200
201int create_ums_worker_thread(pthread_t *thread, void *(*func)(void *),
202			    void *arg)
203{
204	ums_attr_t attr;
205
206	attr.completion_list = comp_list;
207	attr.pthread_attr = NULL;
208
209	return ums_pthread_create(thread, &attr, func, arg);
210}
211
212static void *worker_pthread_proc(void *arg)
213{
214	long *result;
215
216	result = malloc(sizeof(*result));
217	if (!result)
218		return NULL;
219
220	*result = (long) (intptr_t) arg;
221
222	if (ums_thread_yield(result))
223		perror("ums_thread_yield");
224
225	return NULL;
226}
227
228int main(int argc, char **argv)
229{
230	long			nproc = sysconf(_SC_NPROCESSORS_ONLN);
231	pthread_t		sched_threads[nproc];
232	long			nworkers = 24L * nproc;
233	pthread_t		workers[nworkers];
234	long			i;
235
236	if (initialize_ums_scheduling(sched_threads, nproc)) {
237		perror("initialize_ums_scheduling");
238		return 1;
239	}
240
241	for (i = 0L; i < nworkers; i++) {
242		if (create_ums_worker_thread(workers + i,
243					     worker_pthread_proc,
244					     (void *) i))
245			perror("create_ums_worker_thread");
246	}
247
248	for (i = 0L; i < nworkers; i++)
249		pthread_join(workers[i], NULL);
250
251	if (release_ums_scheduling(sched_threads, nproc)) {
252		perror("release_ums_scheduling");
253		return 1;
254	}
255
256	return 0;
257}