Alright! I got great news!
As some of you may know, its impossible to install the xNormals plugin on a portable version of Photoshop CS3. And if your like me, you are often on the go and cant really afford to get a laptop.
Last week, I wrote my own cavity map java program so that I can texture when away from my trusty computer.
Its a .JAR executable, so u'll need the latest version of java to run it:
http://www.megaupload.com/?d=TJ7DN4JL
Also, I've posted the essential source code of the function that does the actual cavity map.
Keep in mind, Im pretty new to programing, so the code might be sloppy or inefficient.
//This function takes in a global variable BufferedImage renderedImage
//renderedImage holds the normal map that will be converted to cavity
//At the end of the function, renderedImage is reinitialized to equal the newly generated cavity map.
private void cavityKernel(){
int debugPixelCounter = 1;
final BufferedImage ptreDish = new BufferedImage(renderedImage.getWidth(), renderedImage.getHeight(), BufferedImage.TYPE_INT_RGB);
Graphics2D ptreDishGraphics = ptreDish.createGraphics();
ptreDishGraphics.setColor(Color.WHITE);
ptreDishGraphics.fillRect(0, 0, renderedImage.getWidth(), renderedImage.getHeight());
int[] intARGBArray = new int[8];
int[] redArray = new int[8];
int[] greenArray = new int[8];
int[] blueArray = new int[8];
int rgb = 0;
for(int i = 0; i < renderedImage.getWidth(); i++){
for(int j = 0; j < renderedImage.getHeight(); j++){
try{
//top LH corner
if(i <= 0 || j <= 0){
intARGBArray[0] = renderedImage.getRGB(i, j) ;
redArray[0] = (int)(intARGBArray[0] >>> 16) & 0x000000FF;
greenArray[0] = (int)(intARGBArray[0] >>> 8 ) & 0x000000FF;
blueArray[0] = (int)intARGBArray[0] & 0x000000FF;
}
else{
intARGBArray[0] = renderedImage.getRGB(i-1, j-1) ;
redArray[0] = (int)(intARGBArray[0] >>> 16) & 0x000000FF;
greenArray[0] = (int)(intARGBArray[0] >>> 8 ) & 0x000000FF;
blueArray[0] = (int)intARGBArray[0] & 0x000000FF;
}
//top center
if(j <= 0){
intARGBArray[1] = renderedImage.getRGB(i, j) ;
redArray[1] = (int)(intARGBArray[1] >>> 16) & 0x000000FF;
greenArray[1] = (int)(intARGBArray[1] >>> 8 ) & 0x000000FF;
blueArray[1] = (int)intARGBArray[1] & 0x000000FF;
}
else{
intARGBArray[1] = renderedImage.getRGB(i, j-1) ;
redArray[1] = (int)(intARGBArray[1] >>> 16) & 0x000000FF;
greenArray[1] = (int)(intARGBArray[1] >>> 8 ) & 0x000000FF;
blueArray[1] = (int)intARGBArray[1] & 0x000000FF;
}
//top RH corner
if(i >= renderedImage.getWidth() || j <= 0){
intARGBArray[2] = renderedImage.getRGB(i, j) ;
redArray[2] = (int)(intARGBArray[2] >>> 16) & 0x000000FF;
greenArray[2] = (int)(intARGBArray[2] >>> 8 ) & 0x000000FF;
blueArray[2] = (int)intARGBArray[2] & 0x000000FF;
}
else{
intARGBArray[2] = renderedImage.getRGB(i+1, j-1) ;
redArray[2] = (int)(intARGBArray[2] >>> 16) & 0x000000FF;
greenArray[2] = (int)(intARGBArray[2] >>> 8 ) & 0x000000FF;
blueArray[2] = (int)intARGBArray[2] & 0x000000FF;
}
//middle LH
if(i <= 0){
intARGBArray[3] = renderedImage.getRGB(i, j) ;
redArray[3] = (int)(intARGBArray[3] >>> 16) & 0x000000FF;
greenArray[3] = (int)(intARGBArray[3] >>> 8 ) & 0x000000FF;
blueArray[3] = (int)intARGBArray[3] & 0x000000FF;
}
else{
intARGBArray[3] = renderedImage.getRGB(i-1, j) ;
redArray[3] = (int)(intARGBArray[3] >>> 16) & 0x000000FF;
greenArray[3] = (int)(intARGBArray[3] >>> 8 ) & 0x000000FF;
blueArray[3] = (int)intARGBArray[3] & 0x000000FF;
}
//middle RH
if(i >= renderedImage.getWidth()){ //getLength - 1? cuz getLength counts starting from 1, but image coordinate system starts at 0?
intARGBArray[4] = renderedImage.getRGB(i, j) ;
redArray[4] = (int)(intARGBArray[4] >>> 16) & 0x000000FF;
greenArray[4] = (int)(intARGBArray[4] >>> 8 ) & 0x000000FF;
blueArray[4] = (int)intARGBArray[4] & 0x000000FF;
}
else{
intARGBArray[4] = renderedImage.getRGB(i+1, j) ;
redArray[4] = (int)(intARGBArray[4] >>> 16) & 0x000000FF;
greenArray[4] = (int)(intARGBArray[4] >>> 8 ) & 0x000000FF;
blueArray[4] = (int)intARGBArray[4] & 0x000000FF;
}
//bottom LH corner
if(i <= 0 || j >= renderedImage.getHeight()){
intARGBArray[5] = renderedImage.getRGB(i, j) ;
redArray[5] = (int)(intARGBArray[5] >>> 16) & 0x000000FF;
greenArray[5] = (int)(intARGBArray[5] >>> 8 ) & 0x000000FF;
blueArray[5] = (int)intARGBArray[5] & 0x000000FF;
}
else{
intARGBArray[5] = renderedImage.getRGB(i-1, j+1) ;
redArray[5] = (int)(intARGBArray[5] >>> 16) & 0x000000FF;
greenArray[5] = (int)(intARGBArray[5] >>> 8 ) & 0x000000FF;
blueArray[5] = (int)intARGBArray[5] & 0x000000FF;
}
//bottom center
if(j >= renderedImage.getHeight()){
intARGBArray[6] = renderedImage.getRGB(i, j) ;
redArray[6] = (int)(intARGBArray[6] >>> 16) & 0x000000FF;
greenArray[6] = (int)(intARGBArray[6] >>> 8 ) & 0x000000FF;
blueArray[6] = (int)intARGBArray[6] & 0x000000FF;
}
else{
intARGBArray[6] = renderedImage.getRGB(i, j+1) ;
redArray[6] = (int)(intARGBArray[6] >>> 16) & 0x000000FF;
greenArray[6] = (int)(intARGBArray[6] >>> 8 ) & 0x000000FF;
blueArray[6] = (int)intARGBArray[6] & 0x000000FF;
}
//bottom RH corner
if(i >= renderedImage.getWidth() || j >= renderedImage.getHeight()){
intARGBArray[7] = renderedImage.getRGB(i, j) ;
redArray[7] = (int)(intARGBArray[7] >>> 16) & 0x000000FF;
greenArray[7] = (int)(intARGBArray[7] >>> 8 ) & 0x000000FF;
blueArray[7] = (int)intARGBArray[7] & 0x000000FF;
}
else{
intARGBArray[7] = renderedImage.getRGB(i+1, j+1) ;
redArray[7] = (int)(intARGBArray[7] >>> 16) & 0x000000FF;
greenArray[7] = (int)(intARGBArray[7] >>> 8 ) & 0x000000FF;
blueArray[7] = (int)intARGBArray[7] & 0x000000FF;
}
/*
* There may be some issues with precision since im averaging ints instead of doubles.
*/
//Find pixel value
redArray[0] = ((Math.abs(redArray[0]-redArray[7])
+ Math.abs(redArray[1]-redArray[6])
+ Math.abs(redArray[2]-redArray[5])
+ Math.abs(redArray[3]-redArray[4])) / 4); //use redArray[0] as temp var
greenArray[0] = ((Math.abs(greenArray[0]-greenArray[7])
+ Math.abs(greenArray[1]-greenArray[6])
+ Math.abs(greenArray[2]-greenArray[5])
+ Math.abs(greenArray[3]-greenArray[4])) / 4); //use greenArray[0] as temp var
blueArray[0] = ((Math.abs(blueArray[0]-blueArray[7])
+ Math.abs(blueArray[1]-blueArray[6])
+ Math.abs(blueArray[2]-blueArray[5])
+ Math.abs(blueArray[3]-blueArray[4])) / 4); //use blueArray[0] as temp var
rgb = (redArray[0] + greenArray[0] + blueArray[0]) / 3;
//Set pixel value to ptreDish
ptreDish.setRGB(i+1, j+1, ((rgb << 16 ) | (rgb << 8 ) | rgb));
debugPixelCounter++;
//System.out.println("Pixel #" + debugPixelCounter + " at (" + i + "," + j+ ") correctly sampled.");
} catch (ArrayIndexOutOfBoundsException e){
debugPixelCounter++;
System.out.println("ArrayIndexOutOfBoundsException: Cannot find pixel #" + debugPixelCounter + " at (" + i + "," + j+ ") + !");
}
}
}
renderedImage = ptreDish; //to enable saving
displayBufferedImage(renderedImage); //Repaint display image
}
Replies
Input Image:
Output Image:
The boundary pixels are still giving me issues, i'll re-post a JAR link to replace the old one.
A Ambient Occlusion map is a map which can black and white (but doesn't have to be you will often hear environment artsists chastising people for making their ao pitch black in an environment). The darker parts of an AO map basically represents areas where the sun don't shine, while the lighter ones represent places that get lots of light.
In most cases cracks and wrinkles don't get a lot of light so these two maps look very similar.
There are differences between the way each will look, but the main difference between them is how they are calculated.
Ambient Occlusion can only be calculated in 3-D, and is done by casting a bunch of rays from every point on a mesh. I could write a description but this link does a better job (with pictures and everything). One thing to note is you can AO map anything regardless of whether you have a high poly for it.
A cavity map measures the difference between a high poly mesh and a low poly mesh, much in the same way a displacement map does (exactly the same I believe). This can be done two ways -
1. In a 3D application a bunch of rays are cast from a low poly mesh to a high poly mesh, and for each point the distance between the two is measured. These distance values are rendered to a texture in the form of a black and white image.
2. You can generate this same type of map from a normal map (what is going on in this thread). A normal map uses colors (red green blue) to represent the change in normal direction between a low and high polygon mesh. This is enough information for you to process this map in 2-D into a new black and white image which instead represents depth change.
All this being said the program in this thread is not really doing this to the image, so if you want a visual example of this you may want to look elsewhere for the time being.
However, I think I've figured out a solution, though it would require the user to change the background color of his normal maps. By having the user make the baked normal map background be something that a normal map would never be able to reach (rgb: 001, 010, or 100) then the program could detect the impossible value and simply keep it out of the calculation.
I'll update once its done.