forked from RandyGaul/cute_headers
-
Notifications
You must be signed in to change notification settings - Fork 0
/
cute_spritebatch.h
2258 lines (1897 loc) · 79.8 KB
/
cute_spritebatch.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
------------------------------------------------------------------------------
Licensing information can be found at the end of the file.
------------------------------------------------------------------------------
cute_spritebatch.h - v1.04
To create implementation (the function definitions)
#define SPRITEBATCH_IMPLEMENTATION
in *one* C/CPP file (translation unit) that includes this file
SUMMARY:
This header implements a 2D sprite batcher by tracking different textures within
a rolling atlas cache. Over time atlases are decayed and recreated when textures
stop being used. This header is useful for batching sprites at run-time. This avoids
the need to compile texture atlases as a pre-process step, letting the game load
images up individually, dramatically simplifying art pipelines.
MORE DETAILS:
`spritebatch_push` is used to push sprite instances into a buffer. Rendering sprites
works by calling `spritebatch_flush`. `spritebatch_flush` will use a user-supplied
callback to report sprite batches. This callback is of type `submit_batch_fn`. The
batches are reported as an array of `spritebatch_sprite_t` sprites, and can be
further sorted by the user (for example to sort by depth). Sprites in a batch share
the same texture handle (either from the same base image, or from the same internal
atlas).
cute_spritebatch does not know anything about how to generate texture handles, or
destroy them. As such, the user must supply two callbacks for creating handles and
destroying them. These can be simple wrappers around, for example, `glGenTextures`
and `glDeleteTextures`.
Finally, cute_spritebatch will periodically need access to pixels from images. These
pixels are used to generate textures, or to build atlases (which in turn generate a
texture). cute_spritebatch does not need to know much about your images, other than
the pixel stride. The user supplies callback of type `get_pixels_fn`, which lets
cute_spritebatch retreive the pixels associated with a particular image. The pixels
can be stored in RAM and handed to cute_spritebatch whenever requested, or the pixels
can be fetched directly from disk and handed to cute_spritebatch. It doesn't matter
to cute_spritebatch. Since `get_pixels_fn` can be called from `spritebatch_flush` it
is recommended to avoid file i/o within the `get_pixels_fn` callback, and instead try
to already have pixels ready in RAM.
The `spritebatch_defrag` function performs atlas creation and texture management. It
should be called periodically. It can be called once per game tick (once per render),
or optionally called at a different frequency (once every N game ticks).
PROS AND CONS:
PROS
- Texture atlases are completely hidden behind an api. The api in this header can
easily be implemented with different backend sprite batchers. For example on
some platforms bindless textures can be utilized in order to avoid texture
atlases entirely! Code using this API can have the backend implementation swapped
without requiring any user code to change.
- Sprites are batched in an effective manner to dramatically reduce draw call counts.
- Supporting hotswapping or live-reloading of images can be trivialized due to
moving atlas creation out of the art-pipeline and into the run-time.
- Since atlases are built at run-time and continually maintained, images are
guaranteed to be drawn at the same time on-screen as their atlas neighbors. This is
typically not the case for atlas preprocessors, as a *guess* must be made to try
and organize images together in atlases that need to be drawn at roughly the same
time.
CONS
- Performance hits in the `spritebatch_defrag` function, and a little as well in
the `spritebatch_flush` function. Extra run-time memory usage for bookkeeping,
which implies a RAM hit as well as more things to clog the CPU cache.
- If each texture comes from a separate image on-disk, opening individual files on
disk can be very slow. For example on Windows just performing permissions and
related work to open a file is time-consuming. This can be mitigated by moving
assets into a single larger file, for example a .zip archive and read from using
a file io abstraction like PHYSFS.
- For large numbers of separate images, some file abstraction is necessary to avoid
a large performance hit on opening/closing many individual files. This problem is
*not* solved by cute_spritebatch.h, and instead should be solved by some separate
file abstraction system. PHYSFS is a good example of a solid file io abstraction.
EXAMPLE USAGE:
spritebatch_config_t config;
spritebatch_set_default_config(&config);
config.batch_callback = my_report_batches_function;
config.get_pixels_callback = my_get_pixels_function;
config.generate_texture_callback = my_make_texture_handle_function;
config.delete_texture_callback = my_destroy_texture_handle_function;
spritebatch_t batcher;
spritebatch_init(&batcher, &config);
while (game_is_running)
{
for (int i = 0; i < sprite_count; ++i)
spritebatch_push(
&batcher,
sprites[i].image_id,
sprites[i].image_width_in_pixels,
sprites[i].image_height_in_pixels,
sprites[i].position_x,
sprites[i].poxition_y,
sprites[i].scale_x,
sprites[i].scale_y,
sprites[i].cos_rotation_angle,
sprites[i].sin_rotation_angle
);
spritebatch_tick(&batcher);
spritebatch_defrag(&batcher);
spritebatch_flush(&batcher);
}
CUSTOMIZATION:
The following macros can be defined before including this header with the
SPRITEBATCH_IMPLEMENTATION symbol defined, in order to customize the internal
behavior of cute_spritebatch.h. Search this header to find how each macro is
defined and used. Note that MALLOC/FREE functions can optionally take a context
parameter for custom allocation.
SPRITEBATCH_MALLOC
SPRITEBATCH_MEMCPY
SPRITEBATCH_MEMSET
SPRITEBATCH_MEMMOVE
SPRITEBATCH_ASSERT
SPRITEBATCH_ATLAS_FLIP_Y_AXIS_FOR_UV
SPRITEBATCH_ATLAS_EMPTY_COLOR
SPRITEBATCH_LOG
Revision history:
0.01 (11/20/2017) experimental release
1.00 (04/14/2018) initial release
1.01 (05/07/2018) modification for easier file embedding
1.02 (02/03/2019) moved def of spritebatch_t for easier embedding,
inverted get pixels callback to let users have an easier time
with memory management, added support for pixel padding along
the edges of all textures (useful for certain shader effects)
1.03 (08/18/2020) refactored `spritebatch_push` so that sprites can have userdata
1.04 (08/20/2021) qsort -> mergesort to avoid bugs, optional override
sprites_sorter_callback sorting routines provided by Kariem,
added new function `spritebatch_prefetch`
*/
#ifndef SPRITEBATCH_H
#ifndef SPRITEBATCH_U64
#define SPRITEBATCH_U64 unsigned long long
#endif // SPRITEBATCH_U64
typedef struct spritebatch_t spritebatch_t;
typedef struct spritebatch_config_t spritebatch_config_t;
typedef struct spritebatch_sprite_t spritebatch_sprite_t;
// Sprites will be pushed into the spritebatch with this struct. All the fields
// should be set before calling `spritebatch_push`, though `texture_id` and
// `sort_bits` can simply be set to zero.
//
// After sprites are pushed onto the spritebatch via `spritebatch_push`, they will
// be sorted, `texture_id` is assigned to a generated atlas, and handed back to you
// via the `submit_batch_fn` callback.
struct spritebatch_sprite_t
{
// `image_id` must be a unique identifier for the image a sprite references.
// You must set this value!
SPRITEBATCH_U64 image_id;
// The `texture_id` can set to zero `spritebatch_push`. This value will be overwritten
// with a valid texture id of a generated atlas before batches are reported back to you.
SPRITEBATCH_U64 texture_id;
int w, h; // width and height of this sprite in pixels
float x, y; // x and y position
float sx, sy; // scale on x and y axis
float c, s; // cosine and sine (represents cos(angle) and sin(angle))
float minx, miny; // u coordinate -- Required only for premade atlas sprites, otherwise don't use because they will be overwritten
float maxx, maxy; // v coordinate -- Required only for premade atlas sprites, otherwise don't use because they will be overwritten
// This field is *completely optional* -- just set it to zero if you don't want to bother.
// User-defined sorting key, see: http://realtimecollisiondetection.net/blog/?p=86
int sort_bits;
// This is a *completely optional* field. The idea is that the `SPRITEBATCH_SPRITE_USERDATA`
// macro can be defined to insert arbitrary data into sprites. For example, if you want to
// allow individual sprites to have different alpha values, or a tint color, you can add
// in some floats here within a single struct. Internally this field is *never* accessed, and
// is simply handed back to you in each sprite via the `submit_batch_fn` callback.
#ifdef SPRITEBATCH_SPRITE_USERDATA
SPRITEBATCH_SPRITE_USERDATA udata;
#endif
};
// Pushes a sprite onto an internal buffer. Does no other logic.
int spritebatch_push(spritebatch_t* sb, spritebatch_sprite_t sprite);
// Ensures the image associated with your unique `image_id` is loaded up into spritebatch. This
// function pretends to draw a sprite referencing `image_id` but doesn't actually do any
// drawing at all. Use this function as an optimization to pre-load images you know will be
// drawn very soon, e.g. prefetch all ten images within a single animation just as it starts
// playing.
void spritebatch_prefetch(spritebatch_t* sb, SPRITEBATCH_U64 image_id, int w, int h);
// If a match for `image_id` is found, the texture id and uv coordinates are looked up and returned
// as a sprite instance. This is sometimes useful to render sprites through an external mechanism,
// such as Dear ImGui.
spritebatch_sprite_t spritebatch_fetch(spritebatch_t* sb, SPRITEBATCH_U64 image_id, int w, int h);
// Increments internal timestamps on all textures, for use in `spritebatch_defrag`.
void spritebatch_tick(spritebatch_t* sb);
// Sorts the internal sprites and flushes the buffer built by `spritebatch_push`. Will call
// the `submit_batch_fn` function for each batch of sprites and return them as an array. Any `image_id`
// within the `spritebatch_push` buffer that do not yet have a texture handle will request pixels
// from the image via `get_pixels_fn` and request a texture handle via `generate_texture_handle_fn`.
// Returns the number of batches created and submitted.
int spritebatch_flush(spritebatch_t* sb);
// All textures created so far by `spritebatch_flush` will be considered as candidates for creating
// new internal texture atlases. Internal texture atlases compress images together inside of one
// texture to dramatically reduce draw calls. When an atlas is created, the most recently used `image_id`
// instances are prioritized, to ensure atlases are filled with images all drawn at the same time.
// As some textures cease to draw on screen, they "decay" over time. Once enough images in an atlas
// decay, the atlas is removed, and any "live" images in the atlas are used to create new atlases.
// Can be called every 1/N times `spritebatch_flush` is called.
int spritebatch_defrag(spritebatch_t* sb);
int spritebatch_init(spritebatch_t* sb, spritebatch_config_t* config, void* udata);
void spritebatch_term(spritebatch_t* sb);
void spritebatch_register_premade_atlas(spritebatch_t* sb, SPRITEBATCH_U64 image_id, int w, int h);
void spritebatch_cleanup_premade_atlas(spritebatch_t* sb, SPRITEBATCH_U64 image_id);
// Sprite batches are submit via synchronous callback back to the user. This function is called
// from inside `spritebatch_flush`. Each time `submit_batch_fn` is called an array of sprites
// is handed to the user. The sprites are intended to be further sorted by the user as desired
// (for example, additional sorting based on depth). `w` and `h` are the width/height, respectively,
// of the texture the batch of sprites resides upon. w/h can be useful for knowing texture dim-
// ensions, which is needed to know texel size or other measurements.
typedef void (submit_batch_fn)(spritebatch_sprite_t* sprites, int count, int texture_w, int texture_h, void* udata);
// cute_spritebatch.h needs to know how to get the pixels of an image, generate textures handles (for
// example glGenTextures for OpenGL), and destroy texture handles. These functions are all called
// from within the `spritebatch_defrag` function, and sometimes from `spritebatch_flush`.
// Called when the pixels are needed from the user. `image_id` maps to a unique image, and is *not*
// related to `texture_id` at all. `buffer` must be filled in with `bytes_to_fill` number of bytes.
// The user is assumed to know the width/height of the image, and can optionally verify that
// `bytes_to_fill` matches the user's w * h * stride for this particular image.
typedef void (get_pixels_fn)(SPRITEBATCH_U64 image_id, void* buffer, int bytes_to_fill, void* udata);
// Called with a new texture handle is needed. This will happen whenever a new atlas is created,
// and whenever new `image_id`s first appear to cute_spritebatch, and have yet to find their way
// into an appropriate atlas.
typedef SPRITEBATCH_U64 (generate_texture_handle_fn)(void* pixels, int w, int h, void* udata);
// Called whenever a texture handle is ready to be free'd up. This happens whenever a particular image
// or a particular atlas has not been used for a while, and is ready to be released.
typedef void (destroy_texture_handle_fn)(SPRITEBATCH_U64 texture_id, void* udata);
// (Optional) If the user provides this callback, cute_spritebatch will call it to sort all of sprites before submit_batch
// callback is called. The intention of sorting is to minimize the submit_batch calls. cute_spritebatch
// provides its own internal sorting function which will be used if the user does not provide this callback.
//
// Example using std::sort (C++) - Please note the lambda needs to be a non-capturing one.
//
// config.sprites_sorter_callback = [](spritebatch_sprite_t* sprites, int count)
// {
// std::sort(sprites, sprites + count,
// [](const spritebatch_sprite_t& a, const spritebatch_sprite_t& b) {
// if (a.sort_bits < b.sort_bits) return true;
// if (a.sort_bits == b.sort_bits && a.texture_id < b.texture_id) return true;
// return false;
// });
// };
typedef void (sprites_sorter_fn)(spritebatch_sprite_t* sprites, int count);
// Sets all function pointers originally defined in the `config` struct when calling `spritebatch_init`.
// Useful if DLL's are reloaded, or swapped, etc.
void spritebatch_reset_function_ptrs(spritebatch_t* sb, submit_batch_fn* batch_callback, get_pixels_fn* get_pixels_callback, generate_texture_handle_fn* generate_texture_callback, destroy_texture_handle_fn* delete_texture_callback, sprites_sorter_fn* sprites_sorter_callback);
// Initializes a set of good default paramaters. The users must still set
// the four callbacks inside of `config`.
void spritebatch_set_default_config(spritebatch_config_t* config);
struct spritebatch_config_t
{
int pixel_stride;
int atlas_width_in_pixels;
int atlas_height_in_pixels;
int atlas_use_border_pixels;
int ticks_to_decay_texture; // number of ticks it takes for a texture handle to be destroyed via `destroy_texture_handle_fn`
int lonely_buffer_count_till_flush; // Number of unique textures allowed to persist that are not a part of an atlas yet, each one allowed is another draw call.
// These are called "lonely textures", since they don't belong to any atlas yet. Set this to 0 if you want all textures to be
// immediately put into atlases. Setting a higher number, like 64, will buffer up 64 unique textures (which means up to an
// additional 64 draw calls) before flushing them into atlases. Too low of a lonely buffer count combined with a low tick
// to decay rate will cause performance problems where atlases are constantly created and immedately destroyed -- you have
// been warned! Use `SPRITEBATCH_LOG` to gain some insight on what's going on inside the spritebatch when tuning these settings.
float ratio_to_decay_atlas; // from 0 to 1, once ratio is less than `ratio_to_decay_atlas`, flush active textures in atlas to lonely buffer
float ratio_to_merge_atlases; // from 0 to 0.5, attempts to merge atlases with some ratio of empty space
submit_batch_fn* batch_callback;
get_pixels_fn* get_pixels_callback;
generate_texture_handle_fn* generate_texture_callback;
destroy_texture_handle_fn* delete_texture_callback;
sprites_sorter_fn* sprites_sorter_callback; // (Optional)
void* allocator_context;
};
#define SPRITEBATCH_H
#endif
#if !defined(SPRITE_BATCH_INTERNAL_H)
// hashtable.h implementation by Mattias Gustavsson
// See: http://www.mattiasgustavsson.com/ and https://github.com/mattiasgustavsson/libs/blob/master/hashtable.h
// begin hashtable.h
/*
------------------------------------------------------------------------------
Licensing information can be found at the end of the file.
------------------------------------------------------------------------------
hashtable.h - v1.1 - Cache efficient hash table implementation for C/C++.
Do this:
#define HASHTABLE_IMPLEMENTATION
before you include this file in *one* C/C++ file to create the implementation.
*/
#ifndef hashtable_h
#define hashtable_h
#ifndef HASHTABLE_U64
#define HASHTABLE_U64 unsigned long long
#endif
typedef struct hashtable_t hashtable_t;
void hashtable_init( hashtable_t* table, int item_size, int initial_capacity, void* memctx );
void hashtable_term( hashtable_t* table );
void* hashtable_insert( hashtable_t* table, HASHTABLE_U64 key, void const* item );
void hashtable_remove( hashtable_t* table, HASHTABLE_U64 key );
void hashtable_clear( hashtable_t* table );
void* hashtable_find( hashtable_t const* table, HASHTABLE_U64 key );
int hashtable_count( hashtable_t const* table );
void* hashtable_items( hashtable_t const* table );
HASHTABLE_U64 const* hashtable_keys( hashtable_t const* table );
void hashtable_swap( hashtable_t* table, int index_a, int index_b );
#endif /* hashtable_h */
/*
----------------------
IMPLEMENTATION
----------------------
*/
#ifndef hashtable_t_h
#define hashtable_t_h
#ifndef HASHTABLE_U32
#define HASHTABLE_U32 unsigned int
#endif
struct hashtable_internal_slot_t
{
HASHTABLE_U32 key_hash;
int item_index;
int base_count;
};
struct hashtable_t
{
void* memctx;
int count;
int item_size;
struct hashtable_internal_slot_t* slots;
int slot_capacity;
HASHTABLE_U64* items_key;
int* items_slot;
void* items_data;
int item_capacity;
void* swap_temp;
};
#endif /* hashtable_t_h */
// end hashtable.h (more later)
typedef struct
{
SPRITEBATCH_U64 image_id;
int sort_bits;
int w;
int h;
float x, y;
float sx, sy;
float c, s;
float premade_minx, premade_miny; // u coordinate for premade
float premade_maxx, premade_maxy; // v coordinate for premade
#ifdef SPRITEBATCH_SPRITE_USERDATA
SPRITEBATCH_SPRITE_USERDATA udata;
#endif
} spritebatch_internal_sprite_t;
typedef struct
{
int timestamp;
int w, h;
float minx, miny;
float maxx, maxy;
SPRITEBATCH_U64 image_id;
} spritebatch_internal_texture_t;
typedef struct spritebatch_internal_atlas_t
{
SPRITEBATCH_U64 texture_id;
float volume_ratio;
hashtable_t sprites_to_textures;
struct spritebatch_internal_atlas_t* next;
struct spritebatch_internal_atlas_t* prev;
} spritebatch_internal_atlas_t;
typedef struct
{
int timestamp;
int w, h;
SPRITEBATCH_U64 image_id;
SPRITEBATCH_U64 texture_id;
} spritebatch_internal_lonely_texture_t;
typedef struct
{
int w, h;
int mark_for_cleanup;
SPRITEBATCH_U64 image_id;
SPRITEBATCH_U64 texture_id;
} spritebatch_internal_premade_atlas;
struct spritebatch_t
{
int input_count;
int input_capacity;
spritebatch_internal_sprite_t* input_buffer;
int sprite_count;
int sprite_capacity;
spritebatch_sprite_t* sprites;
spritebatch_sprite_t* sprites_scratch;
int key_buffer_count;
int key_buffer_capacity;
SPRITEBATCH_U64* key_buffer;
int pixel_buffer_size; // number of pixels
void* pixel_buffer;
hashtable_t sprites_to_premade_textures;
hashtable_t sprites_to_lonely_textures;
hashtable_t sprites_to_atlases;
spritebatch_internal_atlas_t* atlases;
int pixel_stride;
int atlas_width_in_pixels;
int atlas_height_in_pixels;
int atlas_use_border_pixels;
int ticks_to_decay_texture;
int lonely_buffer_count_till_flush;
int lonely_buffer_count_till_decay;
float ratio_to_decay_atlas;
float ratio_to_merge_atlases;
submit_batch_fn* batch_callback;
get_pixels_fn* get_pixels_callback;
generate_texture_handle_fn* generate_texture_callback;
destroy_texture_handle_fn* delete_texture_callback;
sprites_sorter_fn* sprites_sorter_callback;
void* mem_ctx;
void* udata;
};
#ifndef _CRT_SECURE_NO_WARNINGS
#define _CRT_SECURE_NO_WARNINGS
#endif
#ifndef _CRT_NONSTDC_NO_DEPRECATE
#define _CRT_NONSTDC_NO_DEPRECATE
#endif
#ifndef SPRITEBATCH_MALLOC
#include <stdlib.h>
#define SPRITEBATCH_MALLOC(size, ctx) malloc(size)
#define SPRITEBATCH_FREE(ptr, ctx) free(ptr)
#endif
#ifndef SPRITEBATCH_MEMCPY
#include <string.h>
#define SPRITEBATCH_MEMCPY(dst, src, n) memcpy(dst, src, n)
#endif
#ifndef SPRITEBATCH_MEMSET
#include <string.h>
#define SPRITEBATCH_MEMSET(ptr, val, n) memset(ptr, val, n)
#endif
#ifndef SPRITEBATCH_MEMMOVE
#include <string.h>
#define SPRITEBATCH_MEMMOVE(dst, src, n) memmove(dst, src, n)
#endif
#ifndef SPRITEBATCH_ASSERT
#include <assert.h>
#define SPRITEBATCH_ASSERT(condition) assert(condition)
#endif
// flips output uv coordinate's y. Can be useful to "flip image on load"
#ifndef SPRITEBATCH_ATLAS_FLIP_Y_AXIS_FOR_UV
#define SPRITEBATCH_ATLAS_FLIP_Y_AXIS_FOR_UV 1
#endif
// flips output uv coordinate's y. Can be useful to "flip image on load"
#ifndef SPRITEBATCH_LONELY_FLIP_Y_AXIS_FOR_UV
#define SPRITEBATCH_LONELY_FLIP_Y_AXIS_FOR_UV 1
#endif
#ifndef SPRITEBATCH_ATLAS_EMPTY_COLOR
#define SPRITEBATCH_ATLAS_EMPTY_COLOR 0x00000000
#endif
#ifndef SPRITEBATCH_LOG
#if 0
#define SPRITEBATCH_LOG printf
#else
#define SPRITEBATCH_LOG(...)
#endif
#endif
#ifndef HASHTABLE_MEMSET
#define HASHTABLE_MEMSET(ptr, val, n) SPRITEBATCH_MEMSET(ptr, val, n)
#endif
#ifndef HASHTABLE_MEMCPY
#define HASHTABLE_MEMCPY(dst, src, n) SPRITEBATCH_MEMCPY(dst, src, n)
#endif
#ifndef HASHTABLE_MALLOC
#define HASHTABLE_MALLOC(ctx, size) SPRITEBATCH_MALLOC(size, ctx)
#endif
#ifndef HASHTABLE_FREE
#define HASHTABLE_FREE(ctx, ptr) SPRITEBATCH_FREE(ptr, ctx)
#endif
#define SPRITE_BATCH_INTERNAL_H
#endif
#ifdef SPRITEBATCH_IMPLEMENTATION
#ifndef SPRITEBATCH_IMPLEMENTATION_ONCE
#define SPRITEBATCH_IMPLEMENTATION_ONCE
#define HASHTABLE_IMPLEMENTATION
#ifdef HASHTABLE_IMPLEMENTATION
#ifndef HASHTABLE_IMPLEMENTATION_ONCE
#define HASHTABLE_IMPLEMENTATION_ONCE
// hashtable.h implementation by Mattias Gustavsson
// See: http://www.mattiasgustavsson.com/ and https://github.com/mattiasgustavsson/libs/blob/master/hashtable.h
// begin hashtable.h (continuing from first time)
#ifndef HASHTABLE_SIZE_T
#include <stddef.h>
#define HASHTABLE_SIZE_T size_t
#endif
#ifndef HASHTABLE_ASSERT
#include <assert.h>
#define HASHTABLE_ASSERT( x ) assert( x )
#endif
#ifndef HASHTABLE_MEMSET
#include <string.h>
#define HASHTABLE_MEMSET( ptr, val, cnt ) ( memset( ptr, val, cnt ) )
#endif
#ifndef HASHTABLE_MEMCPY
#include <string.h>
#define HASHTABLE_MEMCPY( dst, src, cnt ) ( memcpy( dst, src, cnt ) )
#endif
#ifndef HASHTABLE_MALLOC
#include <stdlib.h>
#define HASHTABLE_MALLOC( ctx, size ) ( malloc( size ) )
#define HASHTABLE_FREE( ctx, ptr ) ( free( ptr ) )
#endif
static HASHTABLE_U32 hashtable_internal_pow2ceil( HASHTABLE_U32 v )
{
--v;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
++v;
v += ( v == 0 );
return v;
}
void hashtable_init( hashtable_t* table, int item_size, int initial_capacity, void* memctx )
{
initial_capacity = (int)hashtable_internal_pow2ceil( initial_capacity >=0 ? (HASHTABLE_U32) initial_capacity : 32U );
table->memctx = memctx;
table->count = 0;
table->item_size = item_size;
table->slot_capacity = (int) hashtable_internal_pow2ceil( (HASHTABLE_U32) ( initial_capacity + initial_capacity / 2 ) );
int slots_size = (int)( table->slot_capacity * sizeof( *table->slots ) );
table->slots = (struct hashtable_internal_slot_t*) HASHTABLE_MALLOC( table->memctx, (HASHTABLE_SIZE_T) slots_size );
HASHTABLE_ASSERT( table->slots );
HASHTABLE_MEMSET( table->slots, 0, (HASHTABLE_SIZE_T) slots_size );
table->item_capacity = (int) hashtable_internal_pow2ceil( (HASHTABLE_U32) initial_capacity );
table->items_key = (HASHTABLE_U64*) HASHTABLE_MALLOC( table->memctx,
table->item_capacity * ( sizeof( *table->items_key ) + sizeof( *table->items_slot ) + table->item_size ) + table->item_size );
HASHTABLE_ASSERT( table->items_key );
table->items_slot = (int*)( table->items_key + table->item_capacity );
table->items_data = (void*)( table->items_slot + table->item_capacity );
table->swap_temp = (void*)( ( (uintptr_t) table->items_data ) + table->item_size * table->item_capacity );
}
void hashtable_term( hashtable_t* table )
{
HASHTABLE_FREE( table->memctx, table->items_key );
HASHTABLE_FREE( table->memctx, table->slots );
}
// from https://gist.github.com/badboy/6267743
static HASHTABLE_U32 hashtable_internal_calculate_hash( HASHTABLE_U64 key )
{
key = ( ~key ) + ( key << 18 );
key = key ^ ( key >> 31 );
key = key * 21;
key = key ^ ( key >> 11 );
key = key + ( key << 6 );
key = key ^ ( key >> 22 );
HASHTABLE_ASSERT( key );
return (HASHTABLE_U32) key;
}
static int hashtable_internal_find_slot( hashtable_t const* table, HASHTABLE_U64 key )
{
int const slot_mask = table->slot_capacity - 1;
HASHTABLE_U32 const hash = hashtable_internal_calculate_hash( key );
int const base_slot = (int)( hash & (HASHTABLE_U32)slot_mask );
int base_count = table->slots[ base_slot ].base_count;
int slot = base_slot;
while( base_count > 0 )
{
HASHTABLE_U32 slot_hash = table->slots[ slot ].key_hash;
if( slot_hash )
{
int slot_base = (int)( slot_hash & (HASHTABLE_U32)slot_mask );
if( slot_base == base_slot )
{
HASHTABLE_ASSERT( base_count > 0 );
--base_count;
if( slot_hash == hash && table->items_key[ table->slots[ slot ].item_index ] == key )
return slot;
}
}
slot = ( slot + 1 ) & slot_mask;
}
return -1;
}
static void hashtable_internal_expand_slots( hashtable_t* table )
{
int const old_capacity = table->slot_capacity;
struct hashtable_internal_slot_t* old_slots = table->slots;
table->slot_capacity *= 2;
int const slot_mask = table->slot_capacity - 1;
int const size = (int)( table->slot_capacity * sizeof( *table->slots ) );
table->slots = (struct hashtable_internal_slot_t*) HASHTABLE_MALLOC( table->memctx, (HASHTABLE_SIZE_T) size );
HASHTABLE_ASSERT( table->slots );
HASHTABLE_MEMSET( table->slots, 0, (HASHTABLE_SIZE_T) size );
for( int i = 0; i < old_capacity; ++i )
{
HASHTABLE_U32 const hash = old_slots[ i ].key_hash;
if( hash )
{
int const base_slot = (int)( hash & (HASHTABLE_U32)slot_mask );
int slot = base_slot;
while( table->slots[ slot ].key_hash )
slot = ( slot + 1 ) & slot_mask;
table->slots[ slot ].key_hash = hash;
int item_index = old_slots[ i ].item_index;
table->slots[ slot ].item_index = item_index;
table->items_slot[ item_index ] = slot;
++table->slots[ base_slot ].base_count;
}
}
HASHTABLE_FREE( table->memctx, old_slots );
}
static void hashtable_internal_expand_items( hashtable_t* table )
{
table->item_capacity *= 2;
HASHTABLE_U64* const new_items_key = (HASHTABLE_U64*) HASHTABLE_MALLOC( table->memctx,
table->item_capacity * ( sizeof( *table->items_key ) + sizeof( *table->items_slot ) + table->item_size ) + table->item_size);
HASHTABLE_ASSERT( new_items_key );
int* const new_items_slot = (int*)( new_items_key + table->item_capacity );
void* const new_items_data = (void*)( new_items_slot + table->item_capacity );
void* const new_swap_temp = (void*)( ( (uintptr_t) new_items_data ) + table->item_size * table->item_capacity );
HASHTABLE_MEMCPY( new_items_key, table->items_key, table->count * sizeof( *table->items_key ) );
HASHTABLE_MEMCPY( new_items_slot, table->items_slot, table->count * sizeof( *table->items_key ) );
HASHTABLE_MEMCPY( new_items_data, table->items_data, (HASHTABLE_SIZE_T) table->count * table->item_size );
HASHTABLE_FREE( table->memctx, table->items_key );
table->items_key = new_items_key;
table->items_slot = new_items_slot;
table->items_data = new_items_data;
table->swap_temp = new_swap_temp;
}
void* hashtable_insert( hashtable_t* table, HASHTABLE_U64 key, void const* item )
{
HASHTABLE_ASSERT( hashtable_internal_find_slot( table, key ) < 0 );
if( table->count >= ( table->slot_capacity - table->slot_capacity / 3 ) )
hashtable_internal_expand_slots( table );
int const slot_mask = table->slot_capacity - 1;
HASHTABLE_U32 const hash = hashtable_internal_calculate_hash( key );
int const base_slot = (int)( hash & (HASHTABLE_U32)slot_mask );
int base_count = table->slots[ base_slot ].base_count;
int slot = base_slot;
int first_free = slot;
while( base_count )
{
HASHTABLE_U32 const slot_hash = table->slots[ slot ].key_hash;
if( slot_hash == 0 && table->slots[ first_free ].key_hash != 0 ) first_free = slot;
int slot_base = (int)( slot_hash & (HASHTABLE_U32)slot_mask );
if( slot_base == base_slot )
--base_count;
slot = ( slot + 1 ) & slot_mask;
}
slot = first_free;
while( table->slots[ slot ].key_hash )
slot = ( slot + 1 ) & slot_mask;
if( table->count >= table->item_capacity )
hashtable_internal_expand_items( table );
HASHTABLE_ASSERT( !table->slots[ slot ].key_hash && ( hash & (HASHTABLE_U32) slot_mask ) == (HASHTABLE_U32) base_slot );
HASHTABLE_ASSERT( hash );
table->slots[ slot ].key_hash = hash;
table->slots[ slot ].item_index = table->count;
++table->slots[ base_slot ].base_count;
void* dest_item = (void*)( ( (uintptr_t) table->items_data ) + table->count * table->item_size );
HASHTABLE_MEMCPY( dest_item, item, (HASHTABLE_SIZE_T) table->item_size );
table->items_key[ table->count ] = key;
table->items_slot[ table->count ] = slot;
++table->count;
return dest_item;
}
void hashtable_remove( hashtable_t* table, HASHTABLE_U64 key )
{
int const slot = hashtable_internal_find_slot( table, key );
HASHTABLE_ASSERT( slot >= 0 );
int const slot_mask = table->slot_capacity - 1;
HASHTABLE_U32 const hash = table->slots[ slot ].key_hash;
int const base_slot = (int)( hash & (HASHTABLE_U32) slot_mask );
HASHTABLE_ASSERT( hash );
--table->slots[ base_slot ].base_count;
table->slots[ slot ].key_hash = 0;
int index = table->slots[ slot ].item_index;
int last_index = table->count - 1;
if( index != last_index )
{
table->items_key[ index ] = table->items_key[ last_index ];
table->items_slot[ index ] = table->items_slot[ last_index ];
void* dst_item = (void*)( ( (uintptr_t) table->items_data ) + index * table->item_size );
void* src_item = (void*)( ( (uintptr_t) table->items_data ) + last_index * table->item_size );
HASHTABLE_MEMCPY( dst_item, src_item, (HASHTABLE_SIZE_T) table->item_size );
table->slots[ table->items_slot[ last_index ] ].item_index = index;
}
--table->count;
}
void hashtable_clear( hashtable_t* table )
{
table->count = 0;
HASHTABLE_MEMSET( table->slots, 0, table->slot_capacity * sizeof( *table->slots ) );
}
void* hashtable_find( hashtable_t const* table, HASHTABLE_U64 key )
{
int const slot = hashtable_internal_find_slot( table, key );
if( slot < 0 ) return 0;
int const index = table->slots[ slot ].item_index;
void* const item = (void*)( ( (uintptr_t) table->items_data ) + index * table->item_size );
return item;
}
int hashtable_count( hashtable_t const* table )
{
return table->count;
}
void* hashtable_items( hashtable_t const* table )
{
return table->items_data;
}
HASHTABLE_U64 const* hashtable_keys( hashtable_t const* table )
{
return table->items_key;
}
void hashtable_swap( hashtable_t* table, int index_a, int index_b )
{
if( index_a < 0 || index_a >= table->count || index_b < 0 || index_b >= table->count ) return;
int slot_a = table->items_slot[ index_a ];
int slot_b = table->items_slot[ index_b ];
table->items_slot[ index_a ] = slot_b;
table->items_slot[ index_b ] = slot_a;
HASHTABLE_U64 temp_key = table->items_key[ index_a ];
table->items_key[ index_a ] = table->items_key[ index_b ];
table->items_key[ index_b ] = temp_key;
void* item_a = (void*)( ( (uintptr_t) table->items_data ) + index_a * table->item_size );
void* item_b = (void*)( ( (uintptr_t) table->items_data ) + index_b * table->item_size );
HASHTABLE_MEMCPY( table->swap_temp, item_a, table->item_size );
HASHTABLE_MEMCPY( item_a, item_b, table->item_size );
HASHTABLE_MEMCPY( item_b, table->swap_temp, table->item_size );
table->slots[ slot_a ].item_index = index_b;
table->slots[ slot_b ].item_index = index_a;
}
#endif /* HASHTABLE_IMPLEMENTATION */
#endif // HASHTABLE_IMPLEMENTATION_ONCE
/*
contributors:
Randy Gaul (hashtable_clear, hashtable_swap )
revision history:
1.1 added hashtable_clear, hashtable_swap
1.0 first released version
*/
/*
------------------------------------------------------------------------------
This software is available under 2 licenses - you may choose the one you like.
------------------------------------------------------------------------------
ALTERNATIVE A - MIT License
Copyright (c) 2015 Mattias Gustavsson
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
------------------------------------------------------------------------------
ALTERNATIVE B - Public Domain (www.unlicense.org)
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or distribute this
software, either in source code form or as a compiled binary, for any purpose,
commercial or non-commercial, and by any means.
In jurisdictions that recognize copyright laws, the author or authors of this
software dedicate any and all copyright interest in the software to the public
domain. We make this dedication for the benefit of the public at large and to
the detriment of our heirs and successors. We intend this dedication to be an
overt act of relinquishment in perpetuity of all present and future rights to
this software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
------------------------------------------------------------------------------
*/
// end of hashtable.h
#include <stdbool.h>
bool sprite_batch_internal_use_scratch_buffer(spritebatch_t* sb)
{
return sb->sprites_sorter_callback == 0;
}
int spritebatch_init(spritebatch_t* sb, spritebatch_config_t* config, void* udata)
{
// read config params
if (!config | !sb) return 1;
sb->pixel_stride = config->pixel_stride;
sb->atlas_width_in_pixels = config->atlas_width_in_pixels;
sb->atlas_height_in_pixels = config->atlas_height_in_pixels;
sb->atlas_use_border_pixels = config->atlas_use_border_pixels;
sb->ticks_to_decay_texture = config->ticks_to_decay_texture;
sb->lonely_buffer_count_till_flush = config->lonely_buffer_count_till_flush;
sb->lonely_buffer_count_till_decay = sb->lonely_buffer_count_till_flush / 2;
if (sb->lonely_buffer_count_till_decay <= 0) sb->lonely_buffer_count_till_decay = 1;
sb->ratio_to_decay_atlas = config->ratio_to_decay_atlas;
sb->ratio_to_merge_atlases = config->ratio_to_merge_atlases;
sb->batch_callback = config->batch_callback;
sb->get_pixels_callback = config->get_pixels_callback;
sb->generate_texture_callback = config->generate_texture_callback;
sb->delete_texture_callback = config->delete_texture_callback;
sb->sprites_sorter_callback = config->sprites_sorter_callback;
sb->mem_ctx = config->allocator_context;
sb->udata = udata;
if (sb->atlas_width_in_pixels < 1 || sb->atlas_height_in_pixels < 1) return 1;
if (sb->ticks_to_decay_texture < 1) return 1;
if (sb->ratio_to_decay_atlas < 0 || sb->ratio_to_decay_atlas > 1.0f) return 1;
if (sb->ratio_to_merge_atlases < 0 || sb->ratio_to_merge_atlases > 0.5f) return 1;
if (!sb->batch_callback) return 1;
if (!sb->get_pixels_callback) return 1;
if (!sb->generate_texture_callback) return 1;
if (!sb->delete_texture_callback) return 1;
// initialize input buffer
sb->input_count = 0;
sb->input_capacity = 1024;
sb->input_buffer = (spritebatch_internal_sprite_t*)SPRITEBATCH_MALLOC(sizeof(spritebatch_internal_sprite_t) * sb->input_capacity, sb->mem_ctx);
if (!sb->input_buffer) return 1;