Spring 1999

Purpose:
To gain experience implementing texture mapping and anti-aliasing techniques.
normal_type: 8 filename u_res v_res 0/1
typedef struct color_text_td
{
int type;
char *name;
rgb_td ***texture;
int u_size, v_size;
int repetition;
} color_text_td;
if(illum.intensity_type == MAP || illum.intensity_type == PROC ||
l_interp ==1 || illum.color_type ==PROC ||
illum.color_type ==MAP || illum.normal_type ==PROC ||
illum.normal_type == MAP || illum.env_type !=CONST ||
illum.transp_type != CONST)
{
x_inv_map((int)x,&xt, width);
pnt_img.x=xt; pnt_img.y=y_coord;
pnt_img.z=z;
transform_pts(1,inv_tm,&pnt_img,&pnt_objw);
project_pts(1,&pnt_objw,&pnt_obj);
if ( illum.color_type ==MAP)
{
/* for mip-mapping, map back pixel corners here to get
their location in object space
*/
/*stick your code for color texture mapping here*/
illum.color[face] = my_color_texture(......);
}
/************* get normal vector *******************/
/*
if( llum.normal_type == MAP)
{
pert_norm = bump_map(world_sp[face], nrom_p, ......);
}
else
pert_norm = norm_p;
You need to map pixel corners back for mip-mapping. The code above gives the image space location from the screen coordinates for x.
/* calc x image space distance between 2 pixels */
x_inv_map(10,&xl, width); x_inv_map(11, &xt, width);
d_pix_x = xl-xt;
For y, add and subtract 1/2 scanline. delta_y_iumage for 1/2 scanline is the following:
numsl = (view_p.vt - view_p.vb)/2 * Y_RES; delta_y_image =( 2.0 * (0.5)/(double)(numsl-1));
For Bump Mapping, here is example code to calculate Pu and Pv
pv_0.x = wld_sp[0].x + ansv*(wld_sp[1].x-wld_sp[0].x); pv_0.y = wld_sp[0].y + ansv*(wld_sp[1].y-wld_sp[0].y); pv_0.z = wld_sp[0].z + ansv*(wld_sp[1].z-wld_sp[0].z); pv_1.x = wld_sp[3].x + ansv*(wld_sp[2].x-wld_sp[3].x); pv_1.y = wld_sp[3].y + ansv*(wld_sp[2].y-wld_sp[3].y); pv_1.z = wld_sp[3].z + ansv*(wld_sp[2].z-wld_sp[3].z); Pu.x = pv_1.x -pv_0.x; Pu.y = pv_1.y -pv_0.y; Pu.z = pv_1.z -pv_0.z; pu_0.x = wld_sp[0].x + ansu*(wld_sp[3].x-wld_sp[0].x); pu_0.y = wld_sp[0].y + ansu*(wld_sp[3].y-wld_sp[0].y); pu_0.z = wld_sp[0].z + ansu*(wld_sp[3].z-wld_sp[0].z); pu_1.x = wld_sp[1].x + ansu*(wld_sp[2].x-wld_sp[1].x); pu_1.y = wld_sp[1].y + ansu*(wld_sp[2].y-wld_sp[1].y); pu_1.z = wld_sp[1].z + ansu*(wld_sp[2].z-wld_sp[1].z); Pv.x = pu_1.x -pu_0.x; Pv.y = pu_1.y -pu_0.y; Pv.z = pu_1.z -pu_0.z; cross_xyz(Pu,norm,&pv_temp); cross_xyz(norm,pv_temp,&Pu); cross_xyz(Pv,norm,&pv_temp); cross_xyz(norm,pv_temp,&Pv);
How to get object Center
bounding_box: xmax ymax zmax xmin ymin zmin
The program then computes a scale and translate xyz_td to get the box
into the -1:1 space called illum.scale_obj and illum.trans_obj
cat_mat_4x4(tm[w],View, transf);
/* Apply the resulting transformation to the data points. */
transform_pts((long)world[w].plm_ptr->num_pnts, transf, objptr->pnts,
temp_xyzw);
And create at this point an object centroid xyz_td by looping through
each point in the object and summing the x,y,z's something like:
obj_center[w].x=obj_center[w].y = obj_center[w].z =0.0;
for (i=0; i < world[w].plm_ptr->num_pnts; i++)
{
obj_center[w].x += objptr->pnts[i].x;
obj_center[w].y += objptr->pnts[i].y;
obj_center[w].z += objptr->pnts[i].z;
}
obj_center[w].x /= world[w].plm_ptr->num_pnts;
obj_center[w].y /= world[w].plm_ptr->num_pnts;
obj_center[w].z /= world[w].plm_ptr->num_pnts;
Don't forget to malloc the array earlier in the code!