void Reduce16to15(short *pScreen)
{
	unsigned long sbatch=(unsigned long)(scene.viewportX*scene.viewportY)/2;

	__asm
	{
					mov esi,sbatch
					mov edi,pScreen

_565to555:			mov eax,[edi]
					add edi,4

					mov	ebx,eax
					and	eax,11111111110000001111111111000000b
					and	ebx,00000000000111110000000000011111b
					shr	eax,1
					or	ebx,eax

					mov [edi-4],ebx
					dec esi
					jne short _565to555
	}
}



void scen::transformEntityToCamera( long n )
{
	float *matrix = entity[n].matrix_ObjectWorld;
	if( (entity[n].type == 0))
	{
		float *shape = aObject[n].shape3D;

		struct daPoint **where = new struct daPoint*[aObject[n].nbp];
		for(long i=0;i<aObject[n].nbp;i++) where[i] = NULL;

		struct daPoint *plusVite;

		USHORT *r = aObject[n].rel;
		for(i=0;i<aObject[n].nbf;i++)
		{ 
			long ngone = bigFace[nFace].ngone = *r++;
			bigFace[nFace].nsurf = aObject[n].IDF[i];
			bigFace[nFace++].dataPoints = &bigPoint[nPoint];
			for(long j=0;j<ngone;j++)
			{
				if( !(plusVite = where[r[j]/3]) ) 
				{
					float tx = shape[r[j]];
					float ty = shape[r[j]+1];
					float tz = shape[r[j]+2];

					plusVite = &bigPoint[nPoint];
					bigPoint[nPoint].x = (tx*matrix[0]) + (ty*matrix[4]) + (tz*matrix[8]) + (matrix[12]);
					bigPoint[nPoint].y = (tx*matrix[1]) + (ty*matrix[5]) + (tz*matrix[9]) + (matrix[13]);
					bigPoint[nPoint++].z = (tx*matrix[2]) + (ty*matrix[6]) + (tz*matrix[10]) + (matrix[14]);
				}
				else
				{
					bigPoint[nPoint].x	 = plusVite->x;
					bigPoint[nPoint].y	 = plusVite->y;
					bigPoint[nPoint++].z = plusVite->z;
				}
			}
			r += ngone;
		}
		delete where;

	}
	else if( entity[n].type == 3 )	// null object
	{
		float *shape = aObject[n].shape3D;
		// null object is 0,0,0 unique point :)

		shape[0] = matrix[12];
		shape[1] = matrix[13];
		shape[2] = matrix[14];
	}
	else if( entity[n].type == 1 )	// see it later...
	{
		if( (aLight[n-light_at].type == 0) || (aLight[n-light_at].type == 2))	// distant et spot
		{
		}
		if ( (aLight[n-light_at].type == 0) || (aLight[n-light_at].type == 2) )
		{
		}
	}
}










//	----------- FACe normal culling WORKS!!




				float vO[3], vP[3];
				// build viewing vector
				// from the camera position(worldspace -> object space)
				// and a vertex(object space) of the face
				vO[0] = camG[12];
				vO[1] = camG[13];
				vO[2] = camG[14];
//				iVector(vO, entity[n].matrix_ObjectWorld);// transpose to obj space

				float facevect1x = tshp[br[1]] - tshp[br[0]];	// take first 3 pts
				float facevect1y = tshp[br[1]+1] - tshp[br[0]+1];  // to form the normal
				float facevect1z = tshp[br[1]+2] - tshp[br[0]+2];


				float facevect2x = tshp[br[2]]- tshp[br[0]];
				float facevect2y = tshp[br[2]+1] - tshp[br[0]+1];
				float facevect2z = tshp[br[2]+2] - tshp[br[0]+2];

				// build normal vector A, B, C plane equation
				fn[0] = (facevect1y*facevect2z)-(facevect1z*facevect2y);
				fn[1] = (facevect1z*facevect2x)-(facevect1x*facevect2z);
				fn[2] = (facevect1x*facevect2y)-(facevect1y*facevect2x);
					

				// get normaliz value
				float nz = dPoint(fn);

				vP[0]= tshp[br[0]];
				vP[1]= tshp[br[0]+1];
				vP[2]= tshp[br[0]+2];
				// calculate Ax1 + By1 + Cz1
				float dst = dot(vP,fn);

				fn[0] /= nz;
				fn[1] /= nz;
				fn[2] /= nz;

/*
//				this transpose a point in world space to a point in object space
//				using a inverse matrix of camera and object transformations
				tPoint(vO, iCamG);
				vO[0] -= entity[n].currentFrame[0];
				vO[1] += entity[n].currentFrame[1];
				vO[2] -= entity[n].currentFrame[2];
				tPoint(vO, entity[n].matrix_IObject);
*/
				// calculate dot product
//				bigFace[nFace].nx = fn[0];
//				bigFace[nFace].ny = fn[1];
//				bigFace[nFace].nz = fn[2];
				xp = dot(vO,fn);
				xp += dst;
				fn+=3;
