Index: code/globalincs/vmallocator.h
===================================================================
--- code/globalincs/vmallocator.h	(revision 9388)
+++ code/globalincs/vmallocator.h	(working copy)
@@ -9,22 +9,13 @@
 #include <string>
 #include <queue>
 
-#if defined __GNUC__
-#define GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
-#if GCC_VERSION >= 40300
+
+#ifndef WIN32
 #include <tr1/unordered_map>
-#define SCP_hash_map std::tr1::unordered_map
-#elif GCC_VERSION < 40300 || __clang__
-#include <ext/hash_map>
-#define SCP_hash_map __gnu_cxx::hash_map
-#endif // GCC_VERSION || __clang__
-#endif // __GNUC__
+#else // WIN32
+#include <unordered_map>
+#endif // WIN32
 
-#if ! defined __GNUC__
-#include <hash_map>
-#define SCP_hash_map stdext::hash_map
-#endif // ! defined __GNUC__
-
 #if defined(_MSC_VER) && _MSC_VER >= 1400 || !defined(_MSC_VER)
 
 #define DESTROY( type, p ) (p)->~type( )
@@ -123,6 +114,9 @@
 template< typename T >
 class SCP_queue : public std::queue< T, std::deque< T, SCP_vm_allocator< T > > > { };
 
+template< typename T, typename U >
+class SCP_unordered_map : public std::tr1::unordered_map<T, U, std::tr1::hash<T>, std::equal_to<T>, SCP_vm_allocator<std::pair<const T, U> > > { }; 
+
 template <class T1, class T2>
 bool operator==(const SCP_vm_allocator<T1>&, const SCP_vm_allocator<T2>&) throw()
 {
@@ -143,6 +137,7 @@
 #define SCP_queue std::queue
 #define SCP_vector std::vector
 #define SCP_list std::list
+#define SCP_unordered_map std::unordered_map
 
 #endif
 
Index: code/graphics/grbatch.cpp
===================================================================
--- code/graphics/grbatch.cpp	(revision 9388)
+++ code/graphics/grbatch.cpp	(working copy)
@@ -591,8 +591,8 @@
 	bool laser;
 };
 
-static SCP_vector<batch_item> geometry_map;
-static SCP_vector<batch_item> distortion_map;
+static SCP_unordered_map<int, batch_item> geometry_map;
+static SCP_unordered_map<int, batch_item> distortion_map;
 
 // Used for sending verts to the vertex buffer
 effect_vertex *Batch_buffer = NULL;
@@ -600,40 +600,18 @@
 
 static size_t find_good_batch_item(int texture)
 {
-	size_t max_size = geometry_map.size();
-
-	for (size_t i = 0; i < max_size; i++) {
-		if (geometry_map[i].texture == texture)
-			return i;
+	if(geometry_map[texture].texture != texture) {
+		geometry_map[texture].texture = texture;
 	}
-
-	// don't have an existing match so add a new entry
-	batch_item new_item;
-
-	new_item.texture = texture;
-
-	geometry_map.push_back(new_item);
-
-	return (geometry_map.size() - 1);
+	return texture;
 }
 
 static size_t find_good_distortion_item(int texture)
 {
-	size_t max_size = distortion_map.size();
-
-	for (size_t i = 0; i < max_size; i++) {
-		if (distortion_map[i].texture == texture)
-			return i;
+	if(distortion_map[texture].texture != texture) {
+		distortion_map[texture].texture = texture;
 	}
-
-	// don't have an existing match so add a new entry
-	batch_item new_item;
-
-	new_item.texture = texture;
-
-	distortion_map.push_back(new_item);
-
-	return (distortion_map.size() - 1);
+	return texture;
 }
 
 float batch_add_laser(int texture, vec3d *p0, float width1, vec3d *p1, float width2, int r, int g, int b)
@@ -728,71 +706,71 @@
 
 void batch_render_lasers(bool stream_buffer)
 {
-	for (SCP_vector<batch_item>::iterator bi = geometry_map.begin(); bi != geometry_map.end(); ++bi) {
+	for (SCP_unordered_map<int, batch_item>::iterator bi = geometry_map.begin(); bi != geometry_map.end(); ++bi) {
 
-		if ( !bi->laser )
+		if ( !bi->second.laser )
 			continue;
 
-		if ( !bi->batch.need_to_render() )
+		if ( !bi->second.batch.need_to_render() )
 			continue;
 
-		Assert( bi->texture >= 0 );
-		gr_set_bitmap(bi->texture, GR_ALPHABLEND_FILTER, GR_BITBLT_MODE_NORMAL, 0.99999f);
+		Assert( bi->second.texture >= 0 );
+		gr_set_bitmap(bi->second.texture, GR_ALPHABLEND_FILTER, GR_BITBLT_MODE_NORMAL, 0.99999f);
 		if ( stream_buffer ) {
-			bi->batch.render_buffer(TMAP_FLAG_TEXTURED | TMAP_FLAG_XPARENT | TMAP_HTL_3D_UNLIT | TMAP_FLAG_RGB | TMAP_FLAG_GOURAUD | TMAP_FLAG_CORRECT);
+			bi->second.batch.render_buffer(TMAP_FLAG_TEXTURED | TMAP_FLAG_XPARENT | TMAP_HTL_3D_UNLIT | TMAP_FLAG_RGB | TMAP_FLAG_GOURAUD | TMAP_FLAG_CORRECT);
 		} else {
-			bi->batch.render(TMAP_FLAG_TEXTURED | TMAP_FLAG_XPARENT | TMAP_HTL_3D_UNLIT | TMAP_FLAG_RGB | TMAP_FLAG_GOURAUD | TMAP_FLAG_CORRECT);
+			bi->second.batch.render(TMAP_FLAG_TEXTURED | TMAP_FLAG_XPARENT | TMAP_HTL_3D_UNLIT | TMAP_FLAG_RGB | TMAP_FLAG_GOURAUD | TMAP_FLAG_CORRECT);
 		}
 	}
 }
 
 void batch_load_buffer_lasers(effect_vertex* buffer, int *n_verts)
 {
-	for (SCP_vector<batch_item>::iterator bi = geometry_map.begin(); bi != geometry_map.end(); ++bi) {
+	for (SCP_unordered_map<int, batch_item>::iterator bi = geometry_map.begin(); bi != geometry_map.end(); ++bi) {
 
-		if ( !bi->laser )
+		if ( !bi->second.laser )
 			continue;
 
-		if ( !bi->batch.need_to_render() )
+		if ( !bi->second.batch.need_to_render() )
 			continue;
 
-		Assert( bi->texture >= 0 );
-		bi->batch.load_buffer(buffer, n_verts);
+		Assert( bi->second.texture >= 0 );
+		bi->second.batch.load_buffer(buffer, n_verts);
 	}
 }
 
 void batch_render_geometry_map_bitmaps(bool stream_buffer)
 {
-	for (SCP_vector<batch_item>::iterator bi = geometry_map.begin(); bi != geometry_map.end(); ++bi) {
+	for (SCP_unordered_map<int, batch_item>::iterator bi = geometry_map.begin(); bi != geometry_map.end(); ++bi) {
 
-		if ( bi->laser )
+		if ( bi->second.laser )
 			continue;
 
-		if ( !bi->batch.need_to_render() )
+		if ( !bi->second.batch.need_to_render() )
 			continue;
 
-		Assert( bi->texture >= 0 );
-		gr_set_bitmap(bi->texture, GR_ALPHABLEND_FILTER, GR_BITBLT_MODE_NORMAL, bi->alpha);
+		Assert( bi->second.texture >= 0 );
+		gr_set_bitmap(bi->second.texture, GR_ALPHABLEND_FILTER, GR_BITBLT_MODE_NORMAL, bi->second.alpha);
 		if ( stream_buffer ) {
-			bi->batch.render_buffer(bi->tmap_flags);
+			bi->second.batch.render_buffer(bi->second.tmap_flags);
 		} else {
-			bi->batch.render( bi->tmap_flags);
+			bi->second.batch.render( bi->second.tmap_flags);
 		}
 	}
 }
 
 void batch_load_buffer_geometry_map_bitmaps(effect_vertex* buffer, int *n_verts)
 {
-	for (SCP_vector<batch_item>::iterator bi = geometry_map.begin(); bi != geometry_map.end(); ++bi) {
+	for (SCP_unordered_map<int, batch_item>::iterator bi = geometry_map.begin(); bi != geometry_map.end(); ++bi) {
 
-		if ( bi->laser )
+		if ( bi->second.laser )
 			continue;
 
-		if ( !bi->batch.need_to_render() )
+		if ( !bi->second.batch.need_to_render() )
 			continue;
 
-		Assert( bi->texture >= 0 );
-		bi->batch.load_buffer(buffer, n_verts);
+		Assert( bi->second.texture >= 0 );
+		bi->second.batch.load_buffer(buffer, n_verts);
 	}
 }
 
@@ -898,54 +876,54 @@
 
 void batch_render_distortion_map_bitmaps(bool stream_buffer)
 {
-	for (SCP_vector<batch_item>::iterator bi = distortion_map.begin(); bi != distortion_map.end(); ++bi) {
+	for (SCP_unordered_map<int, batch_item>::iterator bi = distortion_map.begin(); bi != distortion_map.end(); ++bi) {
 
-		if ( bi->laser )
+		if ( bi->second.laser )
 			continue;
 
-		if ( !bi->batch.need_to_render() )
+		if ( !bi->second.batch.need_to_render() )
 			continue;
 
-		Assert( bi->texture >= 0 );
-		gr_set_bitmap(bi->texture, GR_ALPHABLEND_NONE, GR_BITBLT_MODE_NORMAL, bi->alpha);
+		Assert( bi->second.texture >= 0 );
+		gr_set_bitmap(bi->second.texture, GR_ALPHABLEND_NONE, GR_BITBLT_MODE_NORMAL, bi->second.alpha);
 
 		if ( stream_buffer ) {
-			bi->batch.render_buffer(bi->tmap_flags);
+			bi->second.batch.render_buffer(bi->second.tmap_flags);
 		} else {
-			bi->batch.render( bi->tmap_flags);
+			bi->second.batch.render( bi->second.tmap_flags);
 		}
 	}
 }
 
 void batch_load_buffer_distortion_map_bitmaps(effect_vertex* buffer, int *n_verts)
 {
-	for (SCP_vector<batch_item>::iterator bi = distortion_map.begin(); bi != distortion_map.end(); ++bi) {
+	for (SCP_unordered_map<int, batch_item>::iterator bi = distortion_map.begin(); bi != distortion_map.end(); ++bi) {
 
-		if ( bi->laser )
+		if ( bi->second.laser )
 			continue;
 
-		if ( !bi->batch.need_to_render() )
+		if ( !bi->second.batch.need_to_render() )
 			continue;
 
-		Assert( bi->texture >= 0 );
-		bi->batch.load_buffer(buffer, n_verts);
+		Assert( bi->second.texture >= 0 );
+		bi->second.batch.load_buffer(buffer, n_verts);
 	}
 }
 
 int batch_get_size()
 {
 	int n_to_render = 0;
-	SCP_vector<batch_item>::iterator bi;
+	SCP_unordered_map<int, batch_item>::iterator bi;
 
 	for (bi = geometry_map.begin(); bi != geometry_map.end(); ++bi) {
-		n_to_render += bi->batch.need_to_render();
+		n_to_render += bi->second.batch.need_to_render();
 	}
 
 	for (bi = distortion_map.begin(); bi != distortion_map.end(); ++bi) {
-		if ( bi->laser )
+		if ( bi->second.laser )
 			continue;
 
-		n_to_render += bi->batch.need_to_render();
+		n_to_render += bi->second.batch.need_to_render();
 	}
 
 	return n_to_render * 3;
Index: code/object/objcollide.cpp
===================================================================
--- code/object/objcollide.cpp	(revision 9388)
+++ code/object/objcollide.cpp	(working copy)
@@ -42,7 +42,7 @@
 obj_pair pair_free_list;
 
 SCP_vector<int> Collision_sort_list;
-SCP_hash_map<uint, collider_pair> Collision_cached_pairs;
+SCP_unordered_map<uint, collider_pair> Collision_cached_pairs;
 
 struct checkobject;
 extern checkobject CheckObjects[MAX_OBJECTS];
@@ -1039,7 +1039,7 @@
 			opp = opp->next;
 		}
 	} else {
-		SCP_hash_map<uint, collider_pair>::iterator it;
+		SCP_unordered_map<uint, collider_pair>::iterator it;
 		collider_pair *pair_obj;
 
 		for ( it = Collision_cached_pairs.begin(); it != Collision_cached_pairs.end(); ++it ) {
@@ -1180,7 +1180,7 @@
 
 void obj_collide_retime_cached_pairs(int checkdly)
 {
-	SCP_hash_map<uint, collider_pair>::iterator it;
+	SCP_unordered_map<uint, collider_pair>::iterator it;
 
 	for ( it = Collision_cached_pairs.begin(); it != Collision_cached_pairs.end(); ++it ) {
 		it->second.next_check_time = timestamp(checkdly);
