libacfutils
A general purpose library of utility functions designed to make it easier to develop addons for the X-Plane flight simulator.
Loading...
Searching...
No Matches
mt_cairo_render.c
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
7 * with the License.
8 *
9 * You can obtain a copy of the license in the file COPYING
10 * or http://www.opensource.org/licenses/CDDL-1.0.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file COPYING.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22/*
23 * Copyright 2023 Saso Kiselkov. All rights reserved.
24 */
25
26#include <XPLMGraphics.h>
27#include <XPLMUtilities.h>
28
29#include "acfutils/assert.h"
30#include "acfutils/dr.h"
31#include "acfutils/geom.h"
32#include "acfutils/glctx.h"
33#include "acfutils/glew.h"
34#include "acfutils/list.h"
36#include "acfutils/safe_alloc.h"
37#include "acfutils/glutils.h"
38#include "acfutils/shader.h"
39#include "acfutils/thread.h"
40#include "acfutils/time.h"
41
42#ifdef _USE_MATH_DEFINES
43#undef _USE_MATH_DEFINES
44#endif
45
46#if IBM
47#ifdef __SSE2__
48#undef __SSE2__
49#endif
50#endif /* IBM */
51
52#include <cglm/cglm.h>
53
54TEXSZ_MK_TOKEN(mt_cairo_render_tex);
55TEXSZ_MK_TOKEN(mt_cairo_render_pbo);
56
57typedef struct {
58 GLfloat pos[3];
59 GLfloat tex0[2];
60} vtx_t;
61
62typedef struct {
63 cairo_t *cr;
64 cairo_surface_t *surf;
66
68 char *init_filename;
69 int init_line;
70 vect3_t monochrome;
71 bool_t use_ffp;
72
73 mt_cairo_uploader_t *mtul;
74 list_node_t mtul_queue_node;
75
76 unsigned w, h;
77 double fps;
78 mt_cairo_render_cb_t render_cb;
79 mt_cairo_init_cb_t init_cb;
80 mt_cairo_fini_cb_t fini_cb;
81 void *userinfo;
82
83 int render_rs;
84 int present_rs;
85 render_surf_t rs[2];
86 bool_t dirty; /* changed the surface, reupload */
87 bool_t texed; /* has glTexImage2D been applied? */
88 GLuint tex;
89 GLuint pbo;
90 GLint filter;
91 list_node_t ul_inprog_node;
92 GLsync sync;
93 void *coherent_data;
94
95 thread_t thr;
96 condvar_t cv;
97 condvar_t render_done_cv;
98 bool_t one_shot_block;
99 mutex_t lock;
100 bool_t started;
101 bool_t shutdown;
102 bool_t fg_mode;
103
104 /* Only accessed from OpenGL drawing thread, so no locking req'd */
105 struct {
106 double x1, x2, y1, y2;
107 vect2_t pos;
108 vect2_t size;
109 } last_draw;
110 GLuint vao;
111 GLuint vtx_buf;
112 GLuint idx_buf;
113 GLuint shader;
114 bool_t shader_is_custom;
115 GLint shader_loc_pvm;
116 GLint shader_loc_tex;
117 GLint shader_loc_vtx_pos;
118 GLint shader_loc_vtx_tex0;
119 GLint shader_loc_color_in;
120
121 bool_t ctx_checking;
122 glctx_t *create_ctx;
123};
124
126 uint64_t refcnt;
127 glctx_t *ctx;
128 mutex_t lock;
129 condvar_t cv_queue;
130 condvar_t cv_done;
131 list_t queue;
132 bool_t shutdown;
133 thread_t worker;
134};
135
136static const char *vert_shader =
137 "#version 120\n"
138 "#extension GL_EXT_gpu_shader4 : require\n"
139 "uniform mat4 pvm;\n"
140 "attribute vec3 vtx_pos;\n"
141 "attribute vec2 vtx_tex0;\n"
142 "varying vec2 tex_coord;\n"
143 "void main() {\n"
144 " tex_coord = vtx_tex0;\n"
145 " gl_Position = pvm * vec4(vtx_pos, 1.0f);\n"
146 "}\n";
147
148static const char *frag_shader =
149 "#version 120\n"
150 "uniform sampler2D tex;\n"
151 "varying vec2 tex_coord;\n"
152 "void main() {\n"
153 " gl_FragColor = texture2D(tex, tex_coord);\n"
154 "}\n";
155
156static const char *frag_shader_mono =
157 "#version 120\n"
158 "uniform sampler2D tex;\n"
159 "uniform vec3 color_in;\n"
160 "varying vec2 tex_coord;\n"
161 "void main() {\n"
162 " gl_FragColor = vec4(color_in, texture2D(tex, tex_coord).r);\n"
163 "}\n";
164
165static const char *vert_shader410 =
166 "#version 410\n"
167 "uniform mat4 pvm;\n"
168 "layout(location = 0) in vec3 vtx_pos;\n"
169 "layout(location = 1) in vec2 vtx_tex0;\n"
170 "layout(location = 0) out vec2 tex_coord;\n"
171 "void main() {\n"
172 " tex_coord = vtx_tex0;\n"
173 " gl_Position = pvm * vec4(vtx_pos, 1.0f);\n"
174 "}\n";
175
176static const char *frag_shader410 =
177 "#version 410\n"
178 "uniform sampler2D tex;\n"
179 "layout(location = 0) in vec2 tex_coord;\n"
180 "layout(location = 0) out vec4 color_out;\n"
181 "void main() {\n"
182 " color_out = texture(tex, tex_coord);\n"
183 "}\n";
184
185static const char *frag_shader410_mono =
186 "#version 410\n"
187 "uniform sampler2D tex;\n"
188 "uniform vec3 color_in;\n"
189 "layout(location = 0) in vec2 tex_coord;\n"
190 "layout(location = 0) out vec4 color_out;\n"
191 "void main() {\n"
192 " color_out = vec4(color_in, texture(tex, tex_coord).r);\n"
193 "}\n";
194
195static bool_t glob_inited = B_FALSE;
196static bool_t coherent = B_FALSE;
197static thread_id_t mtcr_main_thread;
198
199static struct {
200 dr_t viewport;
201 dr_t proj_matrix;
202 dr_t mv_matrix;
203 dr_t draw_call_type;
204} drs;
205
206static void mtcr_gl_formats(const mt_cairo_render_t *mtcr, GLint *intfmt,
207 GLint *format);
208
209static bool_t
210cr_init(mt_cairo_render_t *mtcr, render_surf_t *rs)
211{
212 cairo_format_t cr_fmt;
213
214 ASSERT(mtcr != NULL);
215 ASSERT(rs != NULL);
216
217 cr_fmt = (!IS_NULL_VECT(mtcr->monochrome) ? CAIRO_FORMAT_A8 :
218 CAIRO_FORMAT_ARGB32);
219 rs->surf = cairo_image_surface_create(cr_fmt, mtcr->w, mtcr->h);
220 rs->cr = cairo_create(rs->surf);
221 if (mtcr->init_cb != NULL && !mtcr->init_cb(rs->cr, mtcr->userinfo))
222 goto errout;
223 cairo_set_operator(rs->cr, CAIRO_OPERATOR_CLEAR);
224 cairo_paint(rs->cr);
225 cairo_set_operator(rs->cr, CAIRO_OPERATOR_OVER);
226 return (B_TRUE);
227errout:
228 cairo_destroy(rs->cr);
229 rs->cr = NULL;
230 cairo_surface_destroy(rs->surf);
231 rs->surf = NULL;
232 return (B_FALSE);
233}
234
235static void
236cr_destroy(const mt_cairo_render_t *mtcr, render_surf_t *rs)
237{
238 ASSERT(mtcr != NULL);
239 ASSERT(rs != NULL);
240
241 if (rs->cr == NULL)
242 return;
243 ASSERT(rs->surf != NULL);
244
245 if (mtcr->fini_cb != NULL)
246 mtcr->fini_cb(rs->cr, mtcr->userinfo);
247 cairo_destroy(rs->cr);
248 rs->cr = NULL;
249 cairo_surface_destroy(rs->surf);
250 rs->surf = NULL;
251}
252
253/*
254 * Recalculates the absolute cv_timedwait sleep target based on our framerate.
255 */
256static uint64_t
257recalc_sleep_time(mt_cairo_render_t *mtcr)
258{
259 double fps;
260 ASSERT(mtcr != NULL);
261 mutex_enter(&mtcr->lock);
262 fps = mtcr->fps;
263 mutex_exit(&mtcr->lock);
264 if (fps <= 0)
265 return (0);
266 return (microclock() + SEC2USEC(1.0 / fps));
267}
268
269static void
270mtul_submit_mtcr(mt_cairo_uploader_t *mtul, mt_cairo_render_t *mtcr)
271{
272 ASSERT(mtul != NULL);
273 ASSERT(mtcr != NULL);
274 ASSERT(mtcr->dirty);
275
276 mutex_enter(&mtul->lock);
277 if (!list_link_active(&mtcr->mtul_queue_node)) {
278 list_insert_tail(&mtul->queue, mtcr);
279 cv_broadcast(&mtul->cv_queue);
280 }
281 while (mtcr->dirty)
282 cv_wait(&mtul->cv_done, &mtul->lock);
283 /* render_done_cv will be signalled by the uploader */
284 mutex_exit(&mtul->lock);
285}
286
287static size_t
288mtcr_get_surf_sz(const mt_cairo_render_t *mtcr)
289{
290 cairo_format_t cr_fmt;
291 ASSERT(mtcr != NULL);
292 cr_fmt = (!IS_NULL_VECT(mtcr->monochrome) ? CAIRO_FORMAT_A8 :
293 CAIRO_FORMAT_ARGB32);
294 return (cairo_format_stride_for_width(cr_fmt, mtcr->w) * mtcr->h);
295}
296
297static void
298worker_render_once(mt_cairo_render_t *mtcr)
299{
300 render_surf_t *rs;
301 mt_cairo_uploader_t *mtul;
302
303 ASSERT(mtcr != NULL);
304 ASSERT(mtcr->render_rs != -1);
305 ASSERT_MUTEX_HELD(&mtcr->lock);
306
307 if (mtcr->coherent_data != NULL) {
308 size_t sz = mtcr_get_surf_sz(mtcr);
309
310 rs = &mtcr->rs[0];
311 mutex_exit(&mtcr->lock);
312
313 mtcr->render_cb(rs->cr, mtcr->w, mtcr->h, mtcr->userinfo);
314 cairo_surface_flush(rs->surf);
315
316 mutex_enter(&mtcr->lock);
317 memcpy(mtcr->coherent_data,
318 cairo_image_surface_get_data(rs->surf), sz);
319 mtcr->dirty = B_TRUE;
320 mtcr->texed = B_FALSE;
321 mtcr->present_rs = mtcr->render_rs;
322 cv_broadcast(&mtcr->render_done_cv);
323 } else {
324 ASSERT3S(mtcr->render_rs, >=, 0);
325 ASSERT3S(mtcr->render_rs, <, ARRAY_NUM_ELEM(mtcr->rs));
326 rs = &mtcr->rs[mtcr->render_rs];
327 mutex_exit(&mtcr->lock);
328
329 mtcr->render_cb(rs->cr, mtcr->w, mtcr->h, mtcr->userinfo);
330 cairo_surface_flush(rs->surf);
331
332 mutex_enter(&mtcr->lock);
333 mtcr->dirty = B_TRUE;
334
335 mtul = mtcr->mtul;
336 if (mtul != NULL) {
337 ASSERT(!coherent);
338 mutex_exit(&mtcr->lock);
339 /* render_done_cv will be signalled by the uploader */
340 mtul_submit_mtcr(mtul, mtcr);
341 mutex_enter(&mtcr->lock);
342 } else {
343 mtcr->present_rs = mtcr->render_rs;
344 cv_broadcast(&mtcr->render_done_cv);
345 }
346 mtcr->render_rs = !mtcr->render_rs;
347 }
348}
349
350/*
351 * Main mt_cairo_render_t worker thread. Simply waits around for the
352 * required interval and fires off the rendering callback. This performs
353 * no canvas clearing between calls, so the callback is responsible for
354 * making sure its output canvas looks right.
355 */
356static void
357worker(void *arg)
358{
359 mt_cairo_render_t *mtcr;
360 char name[32];
361 char shortname[7];
362 uint64_t next_time = 0;
363
364 ASSERT(arg != NULL);
365 mtcr = arg;
366 ASSERT(mtcr->render_cb != NULL);
367
368 strlcpy(shortname, mtcr->init_filename, sizeof (shortname));
369 snprintf(name, sizeof (name), "mtcr:%s:%d", shortname, mtcr->init_line);
370 thread_set_name(name);
371
372 mutex_enter(&mtcr->lock);
373 mtcr->render_rs = 0;
374 /*
375 * Render the first frame immediately to make sure we have something
376 * to show ASAP.
377 */
378 if (mtcr->fps > 0)
379 worker_render_once(mtcr);
380 while (!mtcr->shutdown) {
381 if (!mtcr->one_shot_block) {
382 if (mtcr->fps > 0) {
383 if (next_time == 0) {
384 /*
385 * If we were in fps=0 mode before,
386 * this will be zero. So reset the
387 * timer ahead of time to avoid
388 * rendering two consecutive frames.
389 */
390 next_time = recalc_sleep_time(mtcr);
391 }
392 cv_timedwait(&mtcr->cv, &mtcr->lock, next_time);
393 /*
394 * Recalc the next frame time now to maintain
395 * near as possible constant framerate that
396 * isn't affected by the cairo render time.
397 */
398 next_time = recalc_sleep_time(mtcr);
399 } else {
400 cv_wait(&mtcr->cv, &mtcr->lock);
401 next_time = 0;
402 }
403 }
404 if (mtcr->shutdown)
405 break;
406 worker_render_once(mtcr);
407 }
408 mutex_exit(&mtcr->lock);
409}
410
425void
426mt_cairo_render_glob_init(bool_t want_coherent_mem)
427{
428 if (glob_inited)
429 return;
430 cairo_surface_destroy(cairo_image_surface_create(CAIRO_FORMAT_ARGB32,
431 1, 1));
432 mtcr_main_thread = curthread_id;
433 fdr_find(&drs.viewport, "sim/graphics/view/viewport");
434 fdr_find(&drs.proj_matrix, "sim/graphics/view/projection_matrix");
435 fdr_find(&drs.mv_matrix, "sim/graphics/view/modelview_matrix");
436 fdr_find(&drs.draw_call_type, "sim/graphics/view/draw_call_type");
437 /*
438 * Important caveat: in Zink mode we MUST utilize coherent memory
439 * (or just avoid using an uploader). The uploader tries to map
440 * a buffer using glMapBuffer while holding the mtcr->lock. This
441 * can cause a deadlock, because Zink's internal architecture seems
442 * to sometimes require main thread progress before a background
443 * thread's buffer mapping request can succeed.
444 */
445 coherent = ((want_coherent_mem || glutils_in_zink_mode()) &&
446 GLEW_ARB_buffer_storage);
447 glob_inited = B_TRUE;
448}
449
450static void
451mtcr_gl_init(mt_cairo_render_t *mtcr)
452{
453 GLint old_vao = 0;
454 bool_t on_main_thread = (curthread_id == mtcr_main_thread);
455 GLint intfmt, gl_fmt;
456
457 ASSERT(mtcr != NULL);
458
459 if (GLEW_VERSION_3_0 && !on_main_thread) {
460 glGetIntegerv(GL_VERTEX_ARRAY_BINDING, &old_vao);
461
462 glGenVertexArrays(1, &mtcr->vao);
463 glBindVertexArray(mtcr->vao);
464 }
465
466 glGenBuffers(1, &mtcr->vtx_buf);
467
468 if (GLEW_VERSION_3_0 && !on_main_thread) {
469 glBindBuffer(GL_ARRAY_BUFFER, mtcr->vtx_buf);
470 glutils_enable_vtx_attr_ptr(VTX_ATTRIB_POS, 3, GL_FLOAT,
471 GL_FALSE, sizeof (vtx_t), offsetof(vtx_t, pos));
472 glutils_enable_vtx_attr_ptr(VTX_ATTRIB_TEX0, 2, GL_FLOAT,
473 GL_FALSE, sizeof (vtx_t), offsetof(vtx_t, tex0));
474 }
475
476 mtcr->idx_buf = glutils_make_quads_IBO(4);
477 if (GLEW_VERSION_3_0 && !on_main_thread)
478 glBindVertexArray(old_vao);
479 glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
480
481 ASSERT0(mtcr->pbo);
482 glGenBuffers(1, &mtcr->pbo);
483 ASSERT(mtcr->pbo != 0);
484 mtcr_gl_formats(mtcr, &intfmt, &gl_fmt);
485 IF_TEXSZ(TEXSZ_ALLOC_INSTANCE(mt_cairo_render_pbo,
486 mtcr, mtcr->init_filename, mtcr->init_line, gl_fmt,
487 GL_UNSIGNED_BYTE, mtcr->w, mtcr->h));
488
489 if (coherent) {
490 const GLuint flags = (GL_MAP_WRITE_BIT |
491 GL_MAP_PERSISTENT_BIT | GL_MAP_COHERENT_BIT);
492 size_t sz = mtcr_get_surf_sz(mtcr);;
493
494 glBindBuffer(GL_PIXEL_UNPACK_BUFFER, mtcr->pbo);
495 glBufferStorage(GL_PIXEL_UNPACK_BUFFER, sz, 0, flags);
496 mtcr->coherent_data = glMapBufferRange(GL_PIXEL_UNPACK_BUFFER,
497 0, sz, flags);
498 if (mtcr->coherent_data == NULL) {
499 logMsg("WARNING: cannot grab coherent memory buffer "
500 "with glMapBufferRange(). You may be running out "
501 "of VRAM. Switching to non-coherent mode for this "
502 "renderer (%s:%d).", mtcr->init_filename,
503 mtcr->init_line);
504 }
505 glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
506 }
507 glGenTextures(1, &mtcr->tex);
508 ASSERT(mtcr->tex != 0);
509 glBindTexture(GL_TEXTURE_2D, mtcr->tex);
510 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, mtcr->filter);
511 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, mtcr->filter);
512 if (on_main_thread)
513 XPLMBindTexture2d(0, 0);
514
515 IF_TEXSZ(TEXSZ_ALLOC_INSTANCE(mt_cairo_render_tex, mtcr,
516 mtcr->init_filename, mtcr->init_line, gl_fmt,
517 GL_UNSIGNED_BYTE, mtcr->w, mtcr->h));
518 mtcr->texed = B_FALSE;
519}
520
521/*
522 * Frees a texture & PBO previously allocated using rs_tex_alloc. If the
523 * texture or PBO have already been freed, this function does nothing.
524 * The targets of the `tex' and `pbo' arguments are set to 0 after freeing.
525 */
526static void
527mtcr_gl_fini(mt_cairo_render_t *mtcr)
528{
529 GLint intfmt, format;
530
531 ASSERT(mtcr != NULL);
532
533 if (mtcr->vao != 0) {
534 glDeleteVertexArrays(1, &mtcr->vao);
535 mtcr->vao = 0;
536 }
537 if (mtcr->vtx_buf != 0) {
538 glDeleteBuffers(1, &mtcr->vtx_buf);
539 mtcr->vtx_buf = 0;
540 mtcr->last_draw.pos = NULL_VECT2;
541 }
542 if (mtcr->idx_buf != 0) {
543 glDeleteBuffers(1, &mtcr->idx_buf);
544 mtcr->idx_buf = 0;
545 }
546 mtcr_gl_formats(mtcr, &intfmt, &format);
547 if (mtcr->tex != 0) {
548 glDeleteTextures(1, &mtcr->tex);
549 mtcr->tex = 0;
550 IF_TEXSZ(TEXSZ_FREE_INSTANCE(mt_cairo_render_tex, mtcr,
551 format, GL_UNSIGNED_BYTE, mtcr->w, mtcr->h));
552 }
553 if (mtcr->pbo != 0) {
554 if (mtcr->coherent_data) {
555 glBindBuffer(GL_PIXEL_UNPACK_BUFFER, mtcr->pbo);
556 glUnmapBuffer(GL_PIXEL_UNPACK_BUFFER);
557 glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
558 mtcr->coherent_data = NULL;
559 }
560 glDeleteBuffers(1, &mtcr->pbo);
561 mtcr->pbo = 0;
562 IF_TEXSZ(TEXSZ_FREE_INSTANCE(mt_cairo_render_pbo, mtcr,
563 format, GL_UNSIGNED_BYTE, mtcr->w, mtcr->h));
564 }
565 if (mtcr->sync != NULL) {
566 glDeleteSync(mtcr->sync);
567 mtcr->sync = NULL;
568 }
569 if (mtcr->shader != 0 && !mtcr->shader_is_custom) {
570 glDeleteProgram(mtcr->shader);
571 mtcr->shader = 0;
572 }
573}
574
575/*
576 * Due to a bug in AMD's drivers on Windows (at least as of 23.04.2), when
577 * running in XP12 on top of Zink, we cannot utilize shader rendering,
578 * because texture coordinates get flipped on input to the vertex shader for
579 * the second triangle. So for that specific platform, we need to revert to
580 * the fixed-function pipeline (which doesn't seem to exhibit this bug).
581 * This will only work when using our own built-in shader.
582 */
583static bool_t
584check_use_ffp(const mt_cairo_render_t *mtcr)
585{
586 ASSERT(mtcr != NULL);
587 return (IBM && !mtcr->shader_is_custom &&
588 IS_NULL_VECT(mtcr->monochrome) &&
589 curthread_id == mtcr_main_thread && glutils_in_zink_mode() &&
590 strstr((const char *)glGetString(GL_RENDERER), "Radeon") != NULL);
591}
592
598mt_cairo_render_t *
599mt_cairo_render_init_impl(const char *filename, int line,
600 unsigned w, unsigned h, double fps, mt_cairo_init_cb_t init_cb,
601 mt_cairo_render_cb_t render_cb, mt_cairo_fini_cb_t fini_cb, void *userinfo)
602{
603 mt_cairo_render_t *mtcr = safe_calloc(1, sizeof (*mtcr));
604
605 ASSERT(w != 0);
606 ASSERT(h != 0);
607 ASSERT(render_cb != NULL);
608
610
611 mtcr->init_filename = strdup(filename);
612 mtcr->init_line = line;
613 mtcr->w = w;
614 mtcr->h = h;
615 mtcr->render_rs = -1;
616 mtcr->present_rs = -1;
617 mtcr->render_cb = render_cb;
618 mtcr->init_cb = init_cb;
619 mtcr->fini_cb = fini_cb;
620 mtcr->userinfo = userinfo;
621 mtcr->fps = fps;
622 mtcr->monochrome = NULL_VECT3;
623 mtcr->filter = GL_LINEAR;
624 mtcr->last_draw.pos = NULL_VECT2;
625
626 mutex_init(&mtcr->lock);
627 cv_init(&mtcr->cv);
628 cv_init(&mtcr->render_done_cv);
629
630 mtcr_gl_init(mtcr);
631 if (!cr_init(mtcr, &mtcr->rs[0])) {
633 return (NULL);
634 }
635 if (mtcr->coherent_data == NULL) {
636 if (!cr_init(mtcr, &mtcr->rs[1])) {
638 return (NULL);
639 }
640 }
642
644 mtcr->create_ctx = glctx_get_current();
645 mtcr->use_ffp = check_use_ffp(mtcr);
646
647 VERIFY(thread_create(&mtcr->thr, worker, mtcr));
648 mtcr->started = B_TRUE;
649
650 return (mtcr);
651}
652
657void
658mt_cairo_render_fini(mt_cairo_render_t *mtcr)
659{
660 if (mtcr->started) {
661 mutex_enter(&mtcr->lock);
662 mtcr->shutdown = B_TRUE;
663 cv_broadcast(&mtcr->cv);
664 mutex_exit(&mtcr->lock);
665 thread_join(&mtcr->thr);
666 }
667 if (mtcr->mtul != NULL) {
668 mutex_enter(&mtcr->mtul->lock);
669 ASSERT(mtcr->mtul->refcnt != 0);
670 mtcr->mtul->refcnt--;
671 if (list_link_active(&mtcr->mtul_queue_node))
672 list_remove(&mtcr->mtul->queue, mtcr);
673 mutex_exit(&mtcr->mtul->lock);
674 }
675 for (size_t i = 0; i < ARRAY_NUM_ELEM(mtcr->rs); i++)
676 cr_destroy(mtcr, &mtcr->rs[i]);
677 mtcr_gl_fini(mtcr);
678
679 free(mtcr->init_filename);
680
681 mutex_destroy(&mtcr->lock);
682 cv_destroy(&mtcr->cv);
683 cv_destroy(&mtcr->render_done_cv);
684
685 if (mtcr->create_ctx != NULL)
686 glctx_destroy(mtcr->create_ctx);
687
688 free(mtcr);
689}
690
694void
695mt_cairo_render_set_fps(mt_cairo_render_t *mtcr, double fps)
696{
697 if (mtcr->fps != fps) {
698 ASSERT(!mtcr->fg_mode);
699 mutex_enter(&mtcr->lock);
700 mtcr->fps = fps;
701 cv_broadcast(&mtcr->cv);
702 mutex_exit(&mtcr->lock);
703 }
704}
705
709double
710mt_cairo_render_get_fps(mt_cairo_render_t *mtcr)
711{
712 return (mtcr->fps);
713}
714
734void
735mt_cairo_render_enable_fg_mode(mt_cairo_render_t *mtcr)
736{
737 ASSERT(mtcr != NULL);
738 ASSERT3F(mtcr->fps, ==, 0);
739 ASSERT(mtcr->started);
740
741 if (!mtcr->fg_mode) {
742 mutex_enter(&mtcr->lock);
743 mtcr->shutdown = B_TRUE;
744 cv_broadcast(&mtcr->cv);
745 mutex_exit(&mtcr->lock);
746 thread_join(&mtcr->thr);
747
748 mtcr->fg_mode = B_TRUE;
749 mtcr->started = B_FALSE;
750 }
751}
752
758bool_t
759mt_cairo_render_get_fg_mode(const mt_cairo_render_t *mtcr)
760{
761 ASSERT(mtcr != NULL);
762 return (mtcr->fg_mode);
763}
764
771void
773 unsigned gl_filter_enum)
774{
775 ASSERT(mtcr != NULL);
776 ASSERT(mtcr->tex != 0);
777 glBindTexture(GL_TEXTURE_2D, mtcr->tex);
778 mtcr->filter = gl_filter_enum;
779 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, gl_filter_enum);
780 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, gl_filter_enum);
781 glBindTexture(GL_TEXTURE_2D, 0);
782}
783
784static void
785set_shader_impl(mt_cairo_render_t *mtcr, unsigned prog, bool_t force)
786{
787 /* Forcibly reload our own shader */
788 if (force && !mtcr->shader_is_custom && mtcr->shader != 0) {
789 ASSERT0(prog);
790 glDeleteProgram(mtcr->shader);
791 mtcr->shader = 0;
792 }
793 if (prog != 0) {
794 if (!mtcr->shader_is_custom && mtcr->shader != 0)
795 glDeleteProgram(mtcr->shader);
796 mtcr->shader = prog;
797 mtcr->shader_is_custom = B_TRUE;
798 } else if (mtcr->shader_is_custom || mtcr->shader == 0) {
799 /* Reinstall our standard shader */
800 mtcr->shader_is_custom = B_FALSE;
801 if (GLEW_VERSION_4_1) {
802 const char *frag_shader_text = (IS_NULL_VECT(
803 mtcr->monochrome) ? frag_shader410 :
804 frag_shader410_mono);
805
806 mtcr->shader = shader_prog_from_text(
807 "mt_cairo_render_shader",
808 vert_shader410, frag_shader_text, NULL);
809 } else {
810 const char *frag_shader_text = (IS_NULL_VECT(
811 mtcr->monochrome) ? frag_shader :
812 frag_shader_mono);
813
814 mtcr->shader = shader_prog_from_text(
815 "mt_cairo_render_shader",
816 vert_shader, frag_shader_text, NULL);
817 }
818 }
819 VERIFY(mtcr->shader != 0);
820 mtcr->shader_loc_vtx_pos =
821 glGetAttribLocation(mtcr->shader, "vtx_pos");
822 mtcr->shader_loc_vtx_tex0 =
823 glGetAttribLocation(mtcr->shader, "vtx_tex0");
824 mtcr->shader_loc_pvm = glGetUniformLocation(mtcr->shader, "pvm");
825 mtcr->shader_loc_tex = glGetUniformLocation(mtcr->shader, "tex");
826 mtcr->shader_loc_color_in =
827 glGetUniformLocation(mtcr->shader, "color_in");
828 mtcr->use_ffp = check_use_ffp(mtcr);
829}
830
864void
865mt_cairo_render_set_shader(mt_cairo_render_t *mtcr, unsigned prog)
866{
867 ASSERT(mtcr != NULL);
868 set_shader_impl(mtcr, prog, B_FALSE);
869}
870
877unsigned
878mt_cairo_render_get_shader(mt_cairo_render_t *mtcr)
879{
880 ASSERT(mtcr != NULL);
881 return (mtcr->shader_is_custom ? mtcr->shader : 0);
882}
883
897void
898mt_cairo_render_set_monochrome(mt_cairo_render_t *mtcr, vect3_t color)
899{
900 ASSERT(mtcr != NULL);
901 /*
902 * If the monochrome status hasn't changed, there's no need to
903 * rebuild the surface. Just store the potentially new color.
904 */
905 if (IS_NULL_VECT(mtcr->monochrome) == IS_NULL_VECT(color)) {
906 mtcr->monochrome = color;
907 return;
908 }
909 /*
910 * Stop the worker thread.
911 */
912 if (mtcr->started) {
913 mutex_enter(&mtcr->lock);
914 mtcr->shutdown = B_TRUE;
915 cv_broadcast(&mtcr->cv);
916 mutex_exit(&mtcr->lock);
917 thread_join(&mtcr->thr);
918 }
919 for (size_t i = 0; i < ARRAY_NUM_ELEM(mtcr->rs); i++)
920 cr_destroy(mtcr, &mtcr->rs[i]);
921 mtcr_gl_fini(mtcr);
922
923 mtcr->monochrome = color;
924 mtcr->render_rs = -1;
925 mtcr->present_rs = -1;
926
927 mtcr_gl_init(mtcr);
928 VERIFY(cr_init(mtcr, &mtcr->rs[0]));
929 if (mtcr->coherent_data == NULL)
930 VERIFY(cr_init(mtcr, &mtcr->rs[1]));
931 /*
932 * If we were set up to use our own shader, reload it to switch
933 * to the monochrome version.
934 */
935 if (!mtcr->shader_is_custom)
936 set_shader_impl(mtcr, 0, B_TRUE);
937 /*
938 * Restart the worker if not in FG mode.
939 */
940 if (!mtcr->fg_mode) {
941 mtcr->shutdown = B_FALSE;
942 VERIFY(thread_create(&mtcr->thr, worker, mtcr));
943 mtcr->started = B_TRUE;
944 }
945}
946
952mt_cairo_render_get_monochrome(const mt_cairo_render_t *mtcr)
953{
954 ASSERT(mtcr != NULL);
955 return (mtcr->monochrome);
956}
957
974void
975mt_cairo_render_once(mt_cairo_render_t *mtcr)
976{
977 ASSERT(mtcr != NULL);
978 ASSERT0(mtcr->fg_mode);
979 mutex_enter(&mtcr->lock);
980 cv_broadcast(&mtcr->cv);
981 mutex_exit(&mtcr->lock);
982}
983
993void
994mt_cairo_render_once_wait(mt_cairo_render_t *mtcr)
995{
996 ASSERT(mtcr != NULL);
997 if (mtcr->fg_mode) {
998 ASSERT(mtcr->render_cb != NULL);
999
1000 mutex_enter(&mtcr->lock);
1001 if (mtcr->render_rs == -1)
1002 mtcr->render_rs = 0;
1003 worker_render_once(mtcr);
1004 mutex_exit(&mtcr->lock);
1005 } else {
1006 mutex_enter(&mtcr->lock);
1007 mtcr->one_shot_block = B_TRUE;
1008 cv_broadcast(&mtcr->cv);
1009 cv_wait(&mtcr->render_done_cv, &mtcr->lock);
1010 mtcr->one_shot_block = B_FALSE;
1011 mutex_exit(&mtcr->lock);
1012 }
1013}
1014
1015static void
1016mtcr_gl_formats(const mt_cairo_render_t *mtcr, GLint *intfmt, GLint *format)
1017{
1018 ASSERT(mtcr != NULL);
1019 ASSERT(intfmt != NULL);
1020 ASSERT(format != NULL);
1021
1022 if (!IS_NULL_VECT(mtcr->monochrome)) {
1023 *intfmt = GL_R8;
1024 *format = GL_RED;
1025 } else {
1026 *intfmt = GL_RGBA;
1027 *format = GL_BGRA;
1028 }
1029}
1030
1031/*
1032 * Uploads a finished cairo surface render to the provided texture & PBO.
1033 * The upload is normally done async via the PBO, but if that fails, the
1034 * upload is performed synchronously.
1035 */
1036static void
1037rs_upload(mt_cairo_render_t *mtcr, render_surf_t *rs)
1038{
1039 void *src, *dest;
1040 size_t sz;
1041
1042 if (mtcr->coherent_data != NULL)
1043 return;
1044
1045 ASSERT(mtcr != NULL);
1046 ASSERT(rs != NULL);
1047 ASSERT(rs->surf != NULL);
1048 ASSERT(mtcr->tex != 0);
1049 ASSERT(mtcr->pbo != 0);
1050
1051 sz = mtcr_get_surf_sz(mtcr);
1052 glBindBuffer(GL_PIXEL_UNPACK_BUFFER, mtcr->pbo);
1053 /* Buffer respecification - makes sure to orphan the old buffer! */
1054 glBufferData(GL_PIXEL_UNPACK_BUFFER, sz, NULL, GL_STREAM_DRAW);
1055 src = cairo_image_surface_get_data(rs->surf);
1056 dest = glMapBuffer(GL_PIXEL_UNPACK_BUFFER, GL_WRITE_ONLY);
1057 if (dest != NULL) {
1058 memcpy(dest, src, sz);
1059 glUnmapBuffer(GL_PIXEL_UNPACK_BUFFER);
1060 /*
1061 * We MUSTN'T call glTexImage2D yet, because if we're running
1062 * on a background uploader thread, the OpenGL renderer can
1063 * break and update the texture with old memory contents.
1064 * Don't ask me why, I have NO CLUE why the driver messes
1065 * this up. So we need to do the glTexImage2D synchronously
1066 * on the rendering thread. That seems to pick up the buffer
1067 * orphaning operation correctly.
1068 */
1069 mtcr->texed = B_FALSE;
1070 } else {
1071 GLint intfmt, format;
1072
1073 mtcr_gl_formats(mtcr, &intfmt, &format);
1074 logMsg("Error asynchronously updating mt_cairo_render "
1075 "surface %p(%s:%d): glMapBuffer returned NULL",
1076 mtcr, mtcr->init_filename, mtcr->init_line);
1077 glBindTexture(GL_TEXTURE_2D, mtcr->tex);
1078 glTexImage2D(GL_TEXTURE_2D, 0, intfmt, mtcr->w, mtcr->h, 0,
1079 format, GL_UNSIGNED_BYTE, src);
1080 mtcr->texed = B_TRUE;
1081 glBindTexture(GL_TEXTURE_2D, 0);
1082 }
1083 glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
1084}
1085
1086/*
1087 * After an MT-uploader async-uploads the new surface data, we still
1088 * need to apply it to the texture itself. Otherwise, it will only
1089 * sit in the orphaned buffer. This must be done from the thread
1090 * which plans to use the texture in actual rendering (otherwise the
1091 * drivers spaz out).
1092 * Careful, any texture binding point used previously is unbound by
1093 * this function. This is to facilitate interop with
1094 * mt_cairo_render_get_tex to avoid leaving bound textures lying
1095 * around.
1096 */
1097static void
1098mtcr_tex_apply(mt_cairo_render_t *mtcr, bool_t bind)
1099{
1100 ASSERT(mtcr != NULL);
1101
1102 if (!mtcr->texed) {
1103 GLint intfmt, format;
1104
1105 mtcr_gl_formats(mtcr, &intfmt, &format);
1106 ASSERT(mtcr->tex != 0);
1107 glBindTexture(GL_TEXTURE_2D, mtcr->tex);
1108 ASSERT(mtcr->pbo != 0);
1109 glBindBuffer(GL_PIXEL_UNPACK_BUFFER, mtcr->pbo);
1110 glTexImage2D(GL_TEXTURE_2D, 0, intfmt, mtcr->w, mtcr->h, 0,
1111 format, GL_UNSIGNED_BYTE, NULL);
1112 glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
1113 mtcr->texed = B_TRUE;
1114 if (!bind)
1115 glBindTexture(GL_TEXTURE_2D, 0);
1116 } else if (bind) {
1117 glBindTexture(GL_TEXTURE_2D, mtcr->tex);
1118 }
1119}
1120
1121/*
1122 * Binds the current render_surf_t's texture to the current OpenGL context.
1123 * This is called from the foreground renderer to start drawing a finished
1124 * render frame.
1125 *
1126 * @return The render_surf_t that was bound, or NULL if none is available
1127 * for display.
1128 */
1129static bool_t
1130bind_cur_tex(mt_cairo_render_t *mtcr)
1131{
1132 ASSERT(mtcr != NULL);
1133 ASSERT_MUTEX_HELD(&mtcr->lock);
1134
1135 /* Nothing ready for present yet */
1136 if (mtcr->present_rs == -1)
1137 return (B_FALSE);
1138 glActiveTexture(GL_TEXTURE0);
1139 if (mtcr->dirty && mtcr->mtul == NULL) {
1140 rs_upload(mtcr, &mtcr->rs[mtcr->present_rs]);
1141 mtcr->dirty = B_FALSE;
1142 }
1143 /* NOW we can safely update the texture */
1144 mtcr_tex_apply(mtcr, B_TRUE);
1145
1146 return (B_TRUE);
1147}
1148
1149static void
1150prepare_vtx_buffer(mt_cairo_render_t *mtcr, vect2_t pos, vect2_t size,
1151 double x1, double x2, double y1, double y2, vtx_t buf[4])
1152{
1153 ASSERT(mtcr != NULL);
1154 ASSERT(buf != NULL);
1155
1156 if (VECT2_EQ(mtcr->last_draw.pos, pos) &&
1157 VECT2_EQ(mtcr->last_draw.size, size) &&
1158 mtcr->last_draw.x1 == x1 && mtcr->last_draw.x2 == x2 &&
1159 mtcr->last_draw.y1 == y1 && mtcr->last_draw.y2 == y2 &&
1160 !mtcr->use_ffp) {
1161 return;
1162 }
1163 buf[0].pos[0] = pos.x;
1164 buf[0].pos[1] = pos.y;
1165 buf[0].pos[2] = 0;
1166 buf[0].tex0[0] = x1;
1167 buf[0].tex0[1] = y2;
1168
1169 buf[1].pos[0] = pos.x;
1170 buf[1].pos[1] = pos.y + size.y;
1171 buf[1].pos[2] = 0;
1172 buf[1].tex0[0] = x1;
1173 buf[1].tex0[1] = y1;
1174
1175 buf[2].pos[0] = pos.x + size.x;
1176 buf[2].pos[1] = pos.y + size.y;
1177 buf[2].pos[2] = 0;
1178 buf[2].tex0[0] = x2;
1179 buf[2].tex0[1] = y1;
1180
1181 buf[3].pos[0] = pos.x + size.x;
1182 buf[3].pos[1] = pos.y;
1183 buf[3].pos[2] = 0;
1184 buf[3].tex0[0] = x2;
1185 buf[3].tex0[1] = y2;
1186
1187 if (!mtcr->use_ffp) {
1188 ASSERT(mtcr->vtx_buf != 0);
1189 glBindBuffer(GL_ARRAY_BUFFER, mtcr->vtx_buf);
1190 glBufferData(GL_ARRAY_BUFFER, 4 * sizeof (vtx_t), buf,
1191 GL_STATIC_DRAW);
1192 } else {
1193 glEnableClientState(GL_VERTEX_ARRAY);
1194 glEnableClientState(GL_TEXTURE_COORD_ARRAY);
1195 glVertexPointer(3, GL_FLOAT, sizeof(vtx_t),
1196 ((void *)buf) + offsetof(vtx_t, pos));
1197 glTexCoordPointer(2, GL_FLOAT, sizeof(vtx_t),
1198 ((void *)buf) + offsetof(vtx_t, tex0));
1199 }
1200 mtcr->last_draw.pos = pos;
1201 mtcr->last_draw.size = size;
1202 mtcr->last_draw.x1 = x1;
1203 mtcr->last_draw.x2 = x2;
1204 mtcr->last_draw.y1 = y1;
1205 mtcr->last_draw.y2 = y2;
1206}
1207
1214void
1215mt_cairo_render_draw(mt_cairo_render_t *mtcr, vect2_t pos, vect2_t size)
1216{
1217 mt_cairo_render_draw_subrect(mtcr, ZERO_VECT2, VECT2(1, 1), pos, size);
1218}
1219
1224void
1225mt_cairo_render_draw_pvm(mt_cairo_render_t *mtcr, vect2_t pos, vect2_t size,
1226 const float *pvm)
1227{
1229 size, pvm);
1230}
1231
1236void
1237mt_cairo_render_draw_subrect(mt_cairo_render_t *mtcr,
1238 vect2_t src_pos, vect2_t src_sz, vect2_t pos, vect2_t size)
1239{
1240 mat4 pvm;
1241 /*
1242 * Until X-Plane 12.06, XP wrongly reported `sim/view/draw_call_type`
1243 * as `0` in window callbacks. 12.06 fixes that, but the viewport
1244 * method doesn't work for Windows (the viewport is always reported
1245 * in pixels, whereas the OGL matrices that XP provides as datarefs
1246 * take interface scaling into account.) On XP > 12.06, we always
1247 * use the matrices rather than the viewport to decide how to draw.
1248 */
1249 int xp_version;
1250 XPLMGetVersions(&xp_version, NULL, NULL);
1251
1252 if (xp_version < 12060 && dr_geti(&drs.draw_call_type) != 0) {
1253 int vp[4];
1254
1255 VERIFY3S(dr_getvi(&drs.viewport, vp, 0, 4), ==, 4);
1256 ASSERT3S(vp[2] - vp[0], >, 0);
1257 ASSERT3S(vp[3] - vp[1], >, 0);
1258 glm_ortho(vp[0], vp[2], vp[1], vp[3], 0, 1, pvm);
1259 } else {
1260 mat4 proj, mv;
1261
1262 VERIFY3S(dr_getvf32(&drs.mv_matrix, (float *)mv, 0, 16),
1263 ==, 16);
1264 VERIFY3S(dr_getvf32(&drs.proj_matrix, (float *)proj, 0, 16),
1265 ==, 16);
1266 glm_mat4_mul(proj, mv, pvm);
1267 }
1268 mt_cairo_render_draw_subrect_pvm(mtcr, src_pos, src_sz, pos, size,
1269 (float *)pvm);
1270}
1271
1302void
1303mt_cairo_render_draw_subrect_pvm(mt_cairo_render_t *mtcr,
1304 vect2_t src_pos, vect2_t src_sz, vect2_t pos, vect2_t size,
1305 const float *pvm)
1306{
1307 GLint old_vao = 0;
1308 bool_t use_vao;
1309 double x1 = src_pos.x, x2 = src_pos.x + src_sz.x;
1310 double y1 = src_pos.y, y2 = src_pos.y + src_sz.y;
1311 bool cull_front = false;
1312 vtx_t vtx_buf[4] = {};
1313
1314 mutex_enter(&mtcr->lock);
1315 if (!bind_cur_tex(mtcr)) {
1316 mutex_exit(&mtcr->lock);
1317 return;
1318 }
1319 mutex_exit(&mtcr->lock);
1320
1321 use_vao = (mtcr->vao != 0 && (!mtcr->ctx_checking ||
1322 (mtcr->create_ctx != NULL && glctx_is_current(mtcr->create_ctx))));
1323
1324 if (use_vao) {
1325 glGetIntegerv(GL_VERTEX_ARRAY_BINDING, &old_vao);
1326
1327 glBindVertexArray(mtcr->vao);
1328 glEnable(GL_BLEND);
1329 } else if (mtcr->use_ffp) {
1330 XPLMSetGraphicsState(0, 1, 0, 1, 1, 0, 0);
1331 glMatrixMode(GL_PROJECTION);
1332 glPushMatrix();
1333 glLoadMatrixf(pvm);
1334 glMatrixMode(GL_MODELVIEW);
1335 glPushMatrix();
1336 glLoadIdentity();
1337 glUseProgram(0);
1338
1339 glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, mtcr->idx_buf);
1340 } else {
1341#if APL
1342 /*
1343 * Leaving this on on MacOS breaks glDrawElements
1344 * and makes it perform horribly.
1345 */
1346 glDisableClientState(GL_VERTEX_ARRAY);
1347#endif /* !APL */
1348 XPLMSetGraphicsState(1, 1, 1, 1, 1, 1, 1);
1349 glBindBuffer(GL_ARRAY_BUFFER, mtcr->vtx_buf);
1350 glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, mtcr->idx_buf);
1351
1352 glutils_enable_vtx_attr_ptr(mtcr->shader_loc_vtx_pos, 3,
1353 GL_FLOAT, GL_FALSE, sizeof (vtx_t), offsetof(vtx_t, pos));
1354 glutils_enable_vtx_attr_ptr(mtcr->shader_loc_vtx_tex0, 2,
1355 GL_FLOAT, GL_FALSE, sizeof (vtx_t), offsetof(vtx_t, tex0));
1356 }
1357 prepare_vtx_buffer(mtcr, pos, size, x1, x2, y1, y2, vtx_buf);
1358 if (!mtcr->use_ffp) {
1359 ASSERT(mtcr->shader != 0);
1360 glUseProgram(mtcr->shader);
1361
1362 glUniformMatrix4fv(mtcr->shader_loc_pvm,
1363 1, GL_FALSE, (const float *)pvm);
1364 glUniform1i(mtcr->shader_loc_tex, 0);
1365 if (!IS_NULL_VECT(mtcr->monochrome)) {
1366 glUniform3f(mtcr->shader_loc_color_in,
1367 mtcr->monochrome.x, mtcr->monochrome.y,
1368 mtcr->monochrome.z);
1369 }
1370 }
1371 if ((size.x < 0 && size.y >= 0) || (size.x >= 0 && size.y < 0)) {
1372 glCullFace(GL_FRONT);
1373 cull_front = true;
1374 }
1375 glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, NULL);
1376
1377 /*
1378 * State cleanup
1379 */
1380 if (cull_front)
1381 glCullFace(GL_BACK);
1382
1383 if (use_vao) {
1384 glBindVertexArray(old_vao);
1385 } else if (mtcr->use_ffp) {
1386 glPopMatrix();
1387 glMatrixMode(GL_PROJECTION);
1388 glPopMatrix();
1389 glDisableClientState(GL_VERTEX_ARRAY);
1390 glDisableClientState(GL_COLOR_ARRAY);
1391 XPLMBindTexture2d(0, 0);
1392 } else {
1393 glutils_disable_vtx_attr_ptr(mtcr->shader_loc_vtx_pos);
1394 glutils_disable_vtx_attr_ptr(mtcr->shader_loc_vtx_tex0);
1395 /*
1396 * X-Plane needs to know that we have unbound the texture
1397 * previously bound in slot #0. Otherwise we can cause
1398 * glitchy window rendering.
1399 */
1400 XPLMBindTexture2d(0, 0);
1401 }
1402 glBindBuffer(GL_ARRAY_BUFFER, 0);
1403 glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
1404 glUseProgram(0);
1405}
1406
1415void
1416mt_cairo_render_set_uploader(mt_cairo_render_t *mtcr, mt_cairo_uploader_t *mtul)
1417{
1418 mt_cairo_uploader_t *mtul_old;
1419
1420 ASSERT(mtcr != NULL);
1421
1422 if (mtul == mtcr->mtul || coherent)
1423 return;
1424
1425 mtul_old = mtcr->mtul;
1426 if (mtul_old != NULL) {
1427 mutex_enter(&mtul_old->lock);
1428 ASSERT(mtul_old->refcnt != 0);
1429 mtul_old->refcnt--;
1430 if (list_link_active(&mtcr->mtul_queue_node))
1431 list_remove(&mtul_old->queue, mtcr);
1432 mutex_exit(&mtul_old->lock);
1433 }
1434
1435 mutex_enter(&mtcr->lock);
1436 mtcr->mtul = mtul;
1437 mutex_exit(&mtcr->lock);
1438
1439 /*
1440 * Because now the main rendering thread will no longer cause us
1441 * to upload in-sync, in case we have a pending frame, immediately
1442 * add us to the MTUL.
1443 */
1444 if (mtul != NULL) {
1445 mutex_enter(&mtul->lock);
1446 mtul->refcnt++;
1447 if (!list_link_active(&mtcr->mtul_queue_node)) {
1448 list_insert_tail(&mtul->queue, mtcr);
1449 cv_broadcast(&mtcr->mtul->cv_queue);
1450 }
1451 mutex_exit(&mtul->lock);
1452 }
1453}
1454
1459mt_cairo_uploader_t *
1460mt_cairo_render_get_uploader(mt_cairo_render_t *mtcr)
1461{
1462 mt_cairo_uploader_t *mtul;
1463
1464 ASSERT(mtcr != NULL);
1465 mutex_enter(&mtcr->lock);
1466 mtul = mtcr->mtul;
1467 mutex_exit(&mtcr->lock);
1468
1469 return (mtul);
1470}
1471
1476GLuint
1477mt_cairo_render_get_tex(mt_cairo_render_t *mtcr)
1478{
1479 GLuint tex;
1480
1481 mutex_enter(&mtcr->lock);
1482
1483 if (mtcr->present_rs != -1) {
1484 /* Upload & apply the texture if it has changed */
1485 if (mtcr->dirty && mtcr->mtul == NULL) {
1486 rs_upload(mtcr, &mtcr->rs[!mtcr->present_rs]);
1487 mtcr->dirty = B_FALSE;
1488 }
1489 mtcr_tex_apply(mtcr, B_FALSE);
1490 tex = mtcr->tex;
1491 } else {
1492 /* No texture ready yet */
1493 tex = 0;
1494 }
1495
1496 mutex_exit(&mtcr->lock);
1497
1498 return (tex);
1499}
1500
1505unsigned
1506mt_cairo_render_get_width(mt_cairo_render_t *mtcr)
1507{
1508 ASSERT(mtcr != NULL);
1509 return (mtcr->w);
1510}
1511
1516unsigned
1517mt_cairo_render_get_height(mt_cairo_render_t *mtcr)
1518{
1519 ASSERT(mtcr != NULL);
1520 return (mtcr->h);
1521}
1522
1523void
1524mt_cairo_render_blit_back2front(mt_cairo_render_t *mtcr,
1525 const mtcr_rect_t *rects, size_t num)
1526{
1527 LACF_UNUSED(mtcr);
1528 LACF_UNUSED(rects);
1529 LACF_UNUSED(num);
1530}
1531
1538void
1539mt_cairo_render_set_ctx_checking_enabled(mt_cairo_render_t *mtcr,
1540 bool_t flag)
1541{
1542 mtcr->ctx_checking = flag;
1543}
1544
1545static void
1546mtul_upload(mt_cairo_render_t *mtcr, list_t *ul_inprog_list)
1547{
1548 ASSERT(mtcr != NULL);
1549 ASSERT(ul_inprog_list != NULL);
1550
1551 mutex_enter(&mtcr->lock);
1552
1553 ASSERT(!coherent);
1554 if (mtcr->render_rs == -1) {
1555 /*
1556 * No frame ready, this happens if we got added to the
1557 * uploader's work queue in mt_cairo_render_set_uploader.
1558 */
1559 mutex_exit(&mtcr->lock);
1560 return;
1561 }
1562 if (mtcr->dirty) {
1563 render_surf_t *rs = &mtcr->rs[mtcr->render_rs];
1564 rs_upload(mtcr, rs);
1565 ASSERT3P(mtcr->sync, ==, NULL);
1566 mtcr->sync = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0);
1567 ASSERT(!list_link_active(&mtcr->ul_inprog_node));
1568 list_insert_tail(ul_inprog_list, mtcr);
1569 }
1570 mutex_exit(&mtcr->lock);
1571}
1572
1573static bool_t
1574mtul_try_complete_ul(mt_cairo_render_t *mtcr, list_t *ul_inprog_list)
1575{
1576 enum { UL_TIMEOUT = 500000 /* ns */ };
1577
1578 ASSERT(mtcr != NULL);
1579 ASSERT(mtcr->sync != NULL);
1580 ASSERT(ul_inprog_list != NULL);
1581
1582 if (glClientWaitSync(mtcr->sync, GL_SYNC_FLUSH_COMMANDS_BIT,
1583 UL_TIMEOUT) == GL_TIMEOUT_EXPIRED) {
1584 return (B_FALSE);
1585 }
1586 /*
1587 * We need to remove the surface from the ul_inprog_list BEFORE
1588 * resetting rs->dirty, otherwise the mtcr could attempt to emit
1589 * another frame. This could try to double-add the surface while
1590 * it's still active on the ul_inprog_list.
1591 */
1592 list_remove(ul_inprog_list, mtcr);
1593 ASSERT(!coherent);
1594
1595 mutex_enter(&mtcr->lock);
1596
1597 glDeleteSync(mtcr->sync);
1598 mtcr->sync = NULL;
1599 ASSERT(mtcr->dirty);
1600 mtcr->dirty = B_FALSE;
1601 mtcr->texed = B_FALSE;
1602 mtcr->present_rs = mtcr->render_rs;
1603 cv_broadcast(&mtcr->render_done_cv);
1604 mutex_exit(&mtcr->lock);
1605
1606 return (B_TRUE);
1607}
1608
1609static void
1610mtul_drain_queue(mt_cairo_uploader_t *mtul)
1611{
1612 list_t ul_inprog_list;
1613
1614 ASSERT(mtul != NULL);
1615 ASSERT_MUTEX_HELD(&mtul->lock);
1616
1617 list_create(&ul_inprog_list, sizeof (mt_cairo_render_t),
1618 offsetof(mt_cairo_render_t, ul_inprog_node));
1619
1620 do {
1621 mt_cairo_render_t *mtcr;
1622 /*
1623 * Dequeue new work assignments and start the upload.
1624 */
1625 while ((mtcr = list_remove_head(&mtul->queue)) != NULL) {
1626 mutex_exit(&mtul->lock);
1627 mtul_upload(mtcr, &ul_inprog_list);
1628 mutex_enter(&mtul->lock);
1629 }
1630 /*
1631 * No more uploads pending for start. Now see if we can
1632 * complete an upload.
1633 */
1634 mtcr = list_head(&ul_inprog_list);
1635 if (mtcr != NULL) {
1636 bool_t ul_done;
1637
1638 mutex_exit(&mtul->lock);
1639 ul_done = mtul_try_complete_ul(mtcr, &ul_inprog_list);
1640 mutex_enter(&mtul->lock);
1641 if (ul_done) {
1642 /*
1643 * The rs has already been removed from
1644 * the ul_inprog_list.
1645 */
1646 cv_broadcast(&mtul->cv_done);
1647 }
1648 }
1649 } while (list_count(&ul_inprog_list) != 0);
1650
1651 list_destroy(&ul_inprog_list);
1652}
1653
1654/*
1655 * Actual upload worker thread main function.
1656 *
1657 * There is some nonsense in the way Nvidia synchronizes texture resources
1658 * between multiple shared OpenGL contexts which means we could get flickering
1659 * of the old render if we simply pushed new pixels into the PBO & texture
1660 * that was being used by the foreground renderer.
1661 *
1662 * To avoid this, we generate a COMPLETELY new texture + PBO set for the
1663 * upload. When these have completed uploading, we do an atomic texture & PBO
1664 * swap in the render surface. At the same time, the foreground renderer
1665 * is isolated from this by taking full ownership of the textures while it
1666 * is using them to render.
1667 */
1668static void
1669mtul_worker(void *arg)
1670{
1671 mt_cairo_uploader_t *mtul;
1672
1673 thread_set_name("mtul_worker");
1674
1675 ASSERT(arg != NULL);
1676 mtul = arg;
1677
1678 ASSERT(mtul->ctx != NULL);
1679 VERIFY(glctx_make_current(mtul->ctx));
1680 VERIFY3U(glewInit(), ==, GLEW_OK);
1681
1682 mutex_enter(&mtul->lock);
1683
1684 while (!mtul->shutdown) {
1685 mtul_drain_queue(mtul);
1686 /* pause for more work */
1687 if (list_head(&mtul->queue) == NULL)
1688 cv_wait(&mtul->cv_queue, &mtul->lock);
1689 }
1690
1691 mutex_exit(&mtul->lock);
1692
1694}
1695
1748mt_cairo_uploader_t *
1750{
1751 mt_cairo_uploader_t *mtul = safe_calloc(1, sizeof (*mtul));
1752 glctx_t *ctx_main;
1753
1755 if (coherent) {
1756 /*
1757 * In coherent mode, just create a stub uploader and return.
1758 * Don't actually do any uploading.
1759 */
1760 return (mtul);
1761 }
1762 ctx_main = glctx_get_current();
1763 ASSERT(ctx_main != NULL);
1764
1766 ctx_main, 2, 1, B_FALSE, B_FALSE);
1767 glctx_destroy(ctx_main);
1768 if (mtul->ctx == NULL) {
1769 free(mtul);
1770 return (NULL);
1771 }
1772 mutex_init(&mtul->lock);
1773 cv_init(&mtul->cv_queue);
1774 cv_init(&mtul->cv_done);
1775 list_create(&mtul->queue, sizeof (mt_cairo_render_t),
1776 offsetof(mt_cairo_render_t, mtul_queue_node));
1777
1778 VERIFY(thread_create(&mtul->worker, mtul_worker, mtul));
1779
1780 return (mtul);
1781}
1782
1789void
1790mt_cairo_uploader_fini(mt_cairo_uploader_t *mtul)
1791{
1792 ASSERT(mtul != NULL);
1793
1794 if (coherent) {
1795 ZERO_FREE(mtul);
1796 return;
1797 }
1798 ASSERT0(mtul->refcnt);
1799 ASSERT0(list_count(&mtul->queue));
1800
1801 mutex_enter(&mtul->lock);
1802 mtul->shutdown = B_TRUE;
1803 cv_broadcast(&mtul->cv_queue);
1804 mutex_exit(&mtul->lock);
1805 thread_join(&mtul->worker);
1806
1807 ASSERT(mtul->ctx != NULL);
1808 glctx_destroy(mtul->ctx);
1809 list_destroy(&mtul->queue);
1810 mutex_destroy(&mtul->lock);
1811 cv_destroy(&mtul->cv_queue);
1812 cv_destroy(&mtul->cv_done);
1813
1814 memset(mtul, 0, sizeof (*mtul));
1815 ZERO_FREE(mtul);
1816}
#define ASSERT3P(x, op, y)
Definition assert.h:212
#define VERIFY(x)
Definition assert.h:78
#define ASSERT3S(x, op, y)
Definition assert.h:209
#define ASSERT(x)
Definition assert.h:208
#define ASSERT3F(x, op, y)
Definition assert.h:211
#define ASSERT0(x)
Definition assert.h:213
#define VERIFY3S(x, op, y)
Definition assert.h:125
#define VERIFY3U(x, op, y)
Definition assert.h:136
#define ARRAY_NUM_ELEM(_array)
Definition core.h:171
#define dr_getvf32(__dr, __ff, __off, __num)
Definition dr.h:473
#define fdr_find(dr,...)
Definition dr.h:318
#define dr_geti(__dr)
Definition dr.h:348
#define dr_getvi(__dr, __i, __off, __num)
Definition dr.h:426
#define NULL_VECT3
Definition geom.h:216
#define VECT2_EQ(a, b)
Definition geom.h:196
#define IS_NULL_VECT(a)
Definition geom.h:228
#define NULL_VECT2
Definition geom.h:214
#define ZERO_VECT2
Definition geom.h:208
#define VECT2(x, y)
Definition geom.h:190
API_EXPORT glctx_t * glctx_get_current(void)
Definition glctx.c:370
API_EXPORT void glctx_destroy(glctx_t *ctx)
Definition glctx.c:514
API_EXPORT void * glctx_get_xplane_win_ptr(void)
Definition glctx.c:361
API_EXPORT bool_t glctx_is_current(glctx_t *ctx)
Definition glctx.c:415
API_EXPORT glctx_t * glctx_create_invisible(void *win_ptr, glctx_t *share_ctx, int major_ver, int minor_ver, bool_t fwd_compat, bool_t debug)
Definition glctx.c:314
API_EXPORT bool_t glctx_make_current(glctx_t *ctx)
Definition glctx.c:440
static void glutils_disable_vtx_attr_ptr(GLint index)
Definition glutils.h:649
#define TEXSZ_FREE_INSTANCE(__token_id, __instance, __format, __type, __w, __h)
Definition glutils.h:373
#define TEXSZ_MK_TOKEN(name)
Definition glutils.h:296
#define TEXSZ_ALLOC_INSTANCE(__token_id, __instance, __filename, __line, __format, __type, __w, __h)
Definition glutils.h:361
#define IF_TEXSZ(__xxx)
Definition glutils.h:434
GLuint glutils_make_quads_IBO(size_t num_vtx)
Definition glutils.c:176
static void glutils_enable_vtx_attr_ptr(GLint index, GLint size, GLenum type, GLboolean normalized, size_t stride, size_t offset)
Definition glutils.h:631
bool_t glutils_in_zink_mode(void)
Definition glutils.c:1446
int list_link_active(const list_node_t *)
Definition list.c:525
void list_destroy(list_t *)
Definition list.c:136
void * list_head(const list_t *)
Definition list.c:292
void list_create(list_t *, size_t, size_t)
Definition list.c:113
size_t list_count(const list_t *)
Definition list.c:543
void list_remove(list_t *, void *)
Definition list.c:226
void * list_remove_head(list_t *)
Definition list.c:251
void list_insert_tail(list_t *, void *)
Definition list.c:213
#define logMsg(...)
Definition log.h:112
bool_t(* mt_cairo_init_cb_t)(cairo_t *cr, void *userinfo)
void mt_cairo_render_once_wait(mt_cairo_render_t *mtcr)
vect3_t mt_cairo_render_get_monochrome(const mt_cairo_render_t *mtcr)
unsigned mt_cairo_render_get_shader(mt_cairo_render_t *mtcr)
void mt_cairo_render_set_texture_filter(mt_cairo_render_t *mtcr, unsigned gl_filter_enum)
void mt_cairo_render_draw_subrect(mt_cairo_render_t *mtcr, vect2_t src_pos, vect2_t src_sz, vect2_t pos, vect2_t size)
unsigned mt_cairo_render_get_tex(mt_cairo_render_t *mtcr)
void mt_cairo_render_fini(mt_cairo_render_t *mtcr)
unsigned mt_cairo_render_get_width(mt_cairo_render_t *mtcr)
void mt_cairo_render_glob_init(bool_t want_coherent_mem)
void mt_cairo_render_draw_pvm(mt_cairo_render_t *mtcr, vect2_t pos, vect2_t size, const float *pvm)
void mt_cairo_render_set_shader(mt_cairo_render_t *mtcr, unsigned prog)
mt_cairo_render_t * mt_cairo_render_init_impl(const char *filename, int line, unsigned w, unsigned h, double fps, mt_cairo_init_cb_t init_cb, mt_cairo_render_cb_t render_cb, mt_cairo_fini_cb_t fini_cb, void *userinfo)
double mt_cairo_render_get_fps(mt_cairo_render_t *mtcr)
void(* mt_cairo_fini_cb_t)(cairo_t *cr, void *userinfo)
mt_cairo_uploader_t * mt_cairo_render_get_uploader(mt_cairo_render_t *mtcr)
mt_cairo_uploader_t * mt_cairo_uploader_init(void)
void mt_cairo_render_draw(mt_cairo_render_t *mtcr, vect2_t pos, vect2_t size)
void mt_cairo_uploader_fini(mt_cairo_uploader_t *mtul)
void mt_cairo_render_set_uploader(mt_cairo_render_t *mtcr, mt_cairo_uploader_t *mtul)
unsigned mt_cairo_render_get_height(mt_cairo_render_t *mtcr)
void(* mt_cairo_render_cb_t)(cairo_t *cr, unsigned w, unsigned h, void *userinfo)
void mt_cairo_render_set_monochrome(mt_cairo_render_t *mtcr, vect3_t color)
bool_t mt_cairo_render_get_fg_mode(const mt_cairo_render_t *mtcr)
void mt_cairo_render_enable_fg_mode(mt_cairo_render_t *mtcr)
void mt_cairo_render_set_fps(mt_cairo_render_t *mtcr, double fps)
void mt_cairo_render_draw_subrect_pvm(mt_cairo_render_t *mtcr, vect2_t src_pos, vect2_t src_sz, vect2_t pos, vect2_t size, const float *pvm)
void mt_cairo_render_once(mt_cairo_render_t *mtcr)
#define ZERO_FREE(ptr)
Definition safe_alloc.h:253
static void * safe_calloc(size_t nmemb, size_t size)
Definition safe_alloc.h:71
Definition geom.h:89
CONDITION_VARIABLE condvar_t
Definition thread.h:465
static void cv_destroy(condvar_t *cv)
Definition thread.h:936
#define ASSERT_MUTEX_HELD(mtx)
Definition thread.h:1228
static void thread_set_name(const char *name)
Definition thread.h:852
static void mutex_destroy(mutex_t *mtx)
Definition thread.h:499
static void thread_join(thread_t *thrp)
Definition thread.h:836
static void mutex_enter(mutex_t *mtx)
Definition thread.h:530
static void cv_init(condvar_t *cv)
Definition thread.h:926
#define curthread_id
Definition thread.h:467
static void mutex_exit(mutex_t *mtx)
Definition thread.h:556
DWORD thread_id_t
Definition thread.h:401
HANDLE thread_t
Definition thread.h:393
static void cv_wait(condvar_t *cv, mutex_t *mtx)
Definition thread.h:868
static int cv_timedwait(condvar_t *cv, mutex_t *mtx, uint64_t limit)
Definition thread.h:898
static void mutex_init(mutex_t *mtx)
Definition thread.h:488
static void cv_broadcast(condvar_t *cv)
Definition thread.h:960
#define thread_create(thrp, start_proc, arg)
Definition thread.h:189