25 if (radius_list != NULL) {
36 void geometry_batcher::allocate_internal(
int n_verts)
39 if (n_verts > n_allocated) {
45 if (radius_list != NULL) {
51 radius_list = (
float *)
vm_malloc(
sizeof(
float) * n_verts );
54 Verify( (radius_list != NULL) );
56 memset( vert, 0,
sizeof(
vertex) * n_verts );
57 memset( radius_list, 0,
sizeof(
float) * n_verts );
59 n_allocated = n_verts;
72 to_alloc += (quad * 6);
77 to_alloc += (n_tri * 3);
80 allocate_internal(to_alloc);
85 int to_alloc = (n_to_render * 3);
89 to_alloc += (quad * 6);
94 to_alloc += (n_tri * 3);
98 float *old_radius_list = radius_list;
100 if (to_alloc > n_allocated) {
102 radius_list = (
float *)
vm_malloc(
sizeof(
float) * to_alloc );
105 Verify( (radius_list != NULL) );
107 memset( vert, 0,
sizeof(
vertex) * to_alloc );
108 memset( radius_list, 0,
sizeof(
float) * to_alloc );
110 if (old_vert != NULL) {
111 memcpy( vert, old_vert,
sizeof(
vertex) * n_to_render * 3 );
115 if (old_radius_list != NULL) {
116 memcpy( radius_list, old_radius_list,
sizeof(
float) * n_to_render * 3 );
120 n_allocated = to_alloc;
126 n_to_render = geo.n_to_render;
127 n_allocated = geo.n_allocated;
128 use_radius = geo.use_radius;
130 if (n_allocated > 0) {
132 radius_list = (
float *)
vm_malloc(
sizeof(
float) * n_allocated );
134 memcpy( vert, geo.vert,
sizeof(
vertex) * n_allocated );
135 memcpy( radius_list, geo.radius_list,
sizeof(
float) * n_allocated);
164 vec3d fvec, rvec, uvec;
165 vertex *P = &vert[n_to_render * 3];
166 float *R = &radius_list[n_to_render * 3];
251 for (
int i = 0;
i < 6 ;
i++) {
273 else if ( angle >
PI2 )
278 vec3d fvec, rvec, uvec;
279 vertex *P = &vert[n_to_render * 3];
280 float *R = &radius_list[n_to_render * 3];
322 for (
int i = 0;
i < 6 ;
i++) {
336 vertex *P = &vert[n_to_render *3 ];
338 for (
int i = 0;
i < 3;
i++)
347 vertex *P = &vert[n_to_render * 3];
365 vertex *P = &vert[n_to_render * 3];
366 float *R = &radius_list[n_to_render * 3];
368 vec3d fvec, uvecs, uvece, evec;
416 for(
int i = 0;
i < 6;
i++){
417 P[
i].
r = P[
i].
g = P[
i].
b = P[
i].
a = _color;
434 vec3d uvec, fvec, rvec, center, reye;
462 vertex *pts = &vert[n_to_render * 3];
539 int verts_to_render = n_to_render * 3;
542 buffer_offset = *n_verts;
544 for ( i = 0; i < verts_to_render; ++
i) {
548 if ( use_radius && radius_list != NULL ) {
549 buffer[buffer_offset+
i].
radius = radius_list[
i];
551 buffer[buffer_offset+
i].
radius = 0.0f;
554 buffer[buffer_offset+
i].
r = vert[
i].
r;
555 buffer[buffer_offset+
i].
g = vert[
i].
g;
556 buffer[buffer_offset+
i].
b = vert[
i].
b;
557 buffer[buffer_offset+
i].
a = vert[
i].
a;
560 *n_verts = *n_verts + verts_to_render;
565 if ( buffer_offset < 0 ) {
569 if ( !n_to_render ) {
573 if ( buffer_handle < 0 ) {
586 if ( buffer_offset < 0 ) {
590 if ( !vertices.size() ) {
594 if ( buffer_handle < 0 ) {
606 int verts_to_render = vertices.size();
609 buffer_offset = *n_verts;
611 for ( i = 0; i < verts_to_render; ++
i) {
612 buffer[buffer_offset+
i] = vertices[
i];
615 *n_verts = *n_verts + verts_to_render;
634 vec3d up = {{{0.0f, 1.0f, 0.0f}}};
637 new_particle.
size = rad;
639 int direction = orient % 4;
641 if ( direction == 1 ) {
645 }
else if ( direction == 2 ) {
649 }
else if ( direction == 3 ) {
655 new_particle.
up = up;
657 vertices.push_back(new_particle);
708 if ( !geometry_map.empty() && it != geometry_map.end() ) {
747 if ( !geometry_map.empty() && it != geometry_map.end() ) {
776 if ( !geometry_shader_map.empty() && it != geometry_shader_map.end() ) {
779 item = &geometry_shader_map[
texture];
809 if ( !geometry_map.empty() && it != geometry_map.end() ) {
839 if ( !geometry_map.empty() && it != geometry_map.end() ) {
869 if ( !geometry_map.empty() && it != geometry_map.end() ) {
891 if(width == 0 || height == 0)
899 const int NUM_VERTICES = 4;
915 for(
int i = 0;
i < NUM_VERTICES;
i++)
949 if ( !geometry_map.empty() && it != geometry_map.end() ) {
979 if ( !geometry_map.empty() && it != geometry_map.end() ) {
989 item->
alpha = intensity;
1002 if ( !bi->second.laser )
1005 if ( !bi->second.batch.need_to_render() )
1008 Assert( bi->second.texture >= 0 );
1010 if ( buffer_handle >= 0 ) {
1022 if ( !bi->second.laser )
1025 if ( !bi->second.batch.need_to_render() )
1028 Assert( bi->second.texture >= 0 );
1029 bi->second.batch.load_buffer(buffer, n_verts);
1037 if ( bi->second.laser )
1040 if ( !bi->second.batch.need_to_render() )
1043 Assert( bi->second.texture >= 0 );
1045 if ( buffer_handle >= 0 ) {
1046 bi->second.batch.render_buffer(buffer_handle, bi->second.tmap_flags);
1048 bi->second.batch.render( bi->second.tmap_flags);
1057 if ( bi->second.laser )
1060 if ( !bi->second.batch.need_to_render() )
1063 Assert( bi->second.texture >= 0 );
1065 bi->second.batch.render_buffer(buffer_handle, bi->second.tmap_flags);
1073 if ( bi->second.laser )
1076 if ( !bi->second.batch.need_to_render() )
1079 Assert( bi->second.texture >= 0 );
1080 bi->second.batch.load_buffer(buffer, n_verts);
1088 if ( bi->second.laser )
1091 if ( !bi->second.batch.need_to_render() )
1094 Assert( bi->second.texture >= 0 );
1095 bi->second.batch.load_buffer(buffer, n_verts);
1101 if ( stream_buffer < 0 ) {
1108 if ( Batch_geometry_buffer_size < (n_to_render *
sizeof(
particle_pnt)) ) {
1109 if ( Batch_geometry_buffer != NULL ) {
1110 vm_free(Batch_geometry_buffer);
1113 Batch_geometry_buffer_size = n_to_render *
sizeof(
particle_pnt);
1114 Batch_geometry_buffer =
vm_malloc(Batch_geometry_buffer_size);
1126 if ( stream_buffer >= 0 ) {
1131 if ( ( Batch_buffer_size < (n_to_render *
sizeof(
effect_vertex)) ) ) {
1132 if ( Batch_buffer != NULL ) {
1137 Batch_buffer =
vm_malloc(Batch_buffer_size);
1145 Assert(n_verts <= n_to_render);
1161 geometry_map.clear();
1162 distortion_map.clear();
1167 if ( Batch_buffer != NULL ) {
1169 Batch_buffer = NULL;
1172 Batch_buffer_size = 0;
1191 if ( !distortion_map.empty() && it != distortion_map.end() ) {
1194 item = &distortion_map[
texture];
1226 if ( !distortion_map.empty() && it != distortion_map.end() ) {
1229 item = &distortion_map[
texture];
1236 item->
alpha = intensity;
1249 if ( bi->second.laser )
1252 if ( !bi->second.batch.need_to_render() )
1255 Assert( bi->second.texture >= 0 );
1258 if ( buffer_handle >= 0 ) {
1259 bi->second.batch.render_buffer(buffer_handle, bi->second.tmap_flags);
1261 bi->second.batch.render( bi->second.tmap_flags);
1270 if ( bi->second.laser )
1273 if ( !bi->second.batch.need_to_render() )
1276 Assert( bi->second.texture >= 0 );
1277 bi->second.batch.load_buffer(buffer, n_verts);
1283 int n_to_render = 0;
1286 for (bi = geometry_map.begin(); bi != geometry_map.end(); ++bi) {
1287 n_to_render += bi->second.batch.need_to_render();
1290 for (bi = distortion_map.begin(); bi != distortion_map.end(); ++bi) {
1291 if ( bi->second.laser )
1294 n_to_render += bi->second.batch.need_to_render();
1297 return n_to_render * 3;
1302 int n_to_render = 0;
1305 for (bi = geometry_shader_map.begin(); bi != geometry_shader_map.end(); ++bi) {
1306 n_to_render += bi->second.batch.need_to_render();
#define OGL_EXT_GEOMETRY_SHADER4
#define TMAP_FLAG_POINTLIST
int batch_add_quad(int texture, int tmap_flags, vertex *verts, float alpha)
#define OGL_EXT_FRAMEBUFFER_OBJECT
#define TMAP_FLAG_XPARENT
#define TMAP_FLAG_SOFT_QUAD
void vm_vec_scale_add(vec3d *dest, const vec3d *src1, const vec3d *src2, float k)
int batch_add_beam(int texture, int tmap_flags, vec3d *start, vec3d *end, float width, float intensity)
void batch_render_close()
ubyte g3_transfer_vertex(vertex *dest, const vec3d *src)
GLint GLint GLsizei GLsizei GLsizei depth
void render_buffer(int buffer_handle, int flags)
int Cmdline_softparticles
geometry_shader_batcher batch
struct vec3d::@225::@227 xyz
#define TMAP_HTL_3D_UNLIT
#define gr_update_buffer_object
#define Assertion(expr, msg,...)
#define TMAP_FLAG_VERTEX_GEN
void draw_bitmap(vertex *position, int orient, float rad, float depth=0)
float batch_add_laser(int texture, vec3d *p0, float width1, vec3d *p1, float width2, int r, int g, int b)
int batch_add_bitmap_rotated(int texture, int tmap_flags, vertex *pnt, float angle, float rad, float alpha, float depth)
void gr_set_bitmap(int bitmap_num, int alphablend_mode, int bitblt_mode, float alpha)
int geometry_batch_get_size()
#define TMAP_FLAG_DISTORTION
int distortion_add_bitmap_rotated(int texture, int tmap_flags, vertex *pnt, float angle, float rad, float alpha, float depth)
void batch_render_lasers(int buffer_handle)
void batch_render_all(int stream_buffer)
void vm_vec_add2(vec3d *dest, const vec3d *src)
#define GR_ALPHABLEND_FILTER
void batch_render_geometry_shader_map_bitmaps(int buffer_handle)
GLdouble GLdouble GLdouble r
struct matrix::@228::@230 vec
#define TMAP_FLAG_GOURAUD
void draw_quad(vertex *verts)
void load_buffer(particle_pnt *buffer, int *n_verts)
void vm_rot_point_around_line(vec3d *out, const vec3d *in, float angle, const vec3d *line_point, const vec3d *line_dir)
const char * bm_get_filename(int handle)
Gets the filename of the bitmap indexed by handle, which must exist.
void add_allocate(int quad, int n_tri=0)
void batch_load_buffer_distortion_map_bitmaps(effect_vertex *buffer, int *n_verts)
float vm_vec_normalize_safe(vec3d *v)
vec3d * vm_vec_unrotate(vec3d *dest, const vec3d *src, const matrix *m)
#define Is_Extension_Enabled(x)
#define TMAP_FLAG_CORRECT
void allocate(int quad, int n_tri=0)
void vm_vec_sub(vec3d *dest, const vec3d *src0, const vec3d *src1)
GLboolean GLboolean GLboolean b
#define TMAP_FLAG_TRILIST
GLint GLsizei GLsizei height
void render_buffer(int buffer_handle, int flags)
float Physics_viewer_bank
void draw_tri(vertex *verts)
void batch_load_buffer_geometry_shader_map_bitmaps(particle_pnt *buffer, int *n_verts)
int batch_add_bitmap(int texture, int tmap_flags, vertex *pnt, int orient, float rad, float alpha, float depth)
void batch_load_buffer_lasers(effect_vertex *buffer, int *n_verts)
void * Batch_geometry_buffer
void batch_load_buffer_geometry_map_bitmaps(effect_vertex *buffer, int *n_verts)
vec3d * vm_vec_avg(vec3d *dest, const vec3d *src0, const vec3d *src1)
float draw_laser(vec3d *p0, float width1, vec3d *p1, float width2, int r, int g, int b)
#define TMAP_FLAG_TEXTURED
void batch_render_geometry_map_bitmaps(int buffer_handle)
void load_buffer(effect_vertex *buffer, int *n_verts)
void draw_beam(vec3d *start, vec3d *end, float width, float intensity=1.0f, float offset=0.0f)
struct effect_vertex effect_vertex
int batch_add_tri(int texture, int tmap_flags, vertex *verts, float alpha)
int geometry_batch_add_bitmap(int texture, int tmap_flags, vertex *pnt, int orient, float rad, float alpha, float depth)
#define gr_render_stream_buffer
void geometry_batch_render(int stream_buffer)
#define GR_BITBLT_MODE_NORMAL
void render(int flags, float radius=0.0f)
bool Cmdline_no_geo_sdr_effects
int batch_add_polygon(int texture, int tmap_flags, vec3d *pos, matrix *orient, float width, float height, float alpha)
void batch_render_distortion_map_bitmaps(int buffer_handle)
vec3d * vm_vec_cross(vec3d *dest, const vec3d *src0, const vec3d *src1)
#define TMAP_FLAG_DISTORTION_THRUSTER
#define GR_ALPHABLEND_NONE
int distortion_add_beam(int texture, int tmap_flags, vec3d *start, vec3d *end, float width, float intensity, float offset)
void draw_bitmap(vertex *position, int orient, float rad, float depth=0)
size_t Batch_geometry_buffer_size
GLclampf GLclampf GLclampf alpha
const geometry_batcher & operator=(const geometry_batcher &geo)
bool Use_Shaders_for_effect_rendering
float vm_vec_normalize(vec3d *v)