• 不用预计算切向空间的Normal mapping


    先贴出shader 吧 等有时间了 来阐述原理

    // vertex shader
    //varying vec3 ViewPosition;
    //varying vec3 Normal;
    
    varying vec3 Vertex_UV;
    varying vec3 Vertex_Normal;
    varying vec3 Vertex_LightDir;
    varying vec3 Vertex_EyeVec;
    
    void main(void)
    {
    
        gl_Position      =   gl_ModelViewProjectionMatrix * gl_Vertex;
        Vertex_UV        =   gl_MultiTexCoord0.xyz ;
        Vertex_Normal    =   gl_NormalMatrix*gl_Normal;
        vec4 view_vertex =   gl_ModelViewMatrix*gl_Vertex;
        Vertex_LightDir  =   (gl_LightSource[0].position -  view_vertex).xyz;
        Vertex_EyeVec    =   (-view_vertex).xyz ;
        /*
        vec4 MVM        = (gl_ModelViewMatrix*gl_Vertex);
        ViewPosition     = MVM.xyz / MVM.w;
        Normal            = normalize(gl_NormalMatrix*gl_Normal);
        */
        
    }
    //uniform sampler2D tex0; // color map
    uniform sampler2D normalMap; // normal map
    
    //uniform int LightNum;
    
    varying vec3 Vertex_UV;
    varying vec3 Vertex_Normal;
    varying vec3 Vertex_LightDir;
    varying vec3 Vertex_EyeVec;
    
    //out vec4 Out_Color;
    
    mat3 cotangent_frame(vec3 N, vec3 p, vec2 uv)
    {
        // get edge vectors of the pixel triangle
        vec3 dp1 = dFdx( p );
        vec3 dp2 = dFdy( p );
        vec2 duv1 = dFdx( uv );
        vec2 duv2 = dFdy( uv );
     
        // solve the linear system
        vec3 dp2perp = cross( dp2, N );
        vec3 dp1perp = cross( N, dp1 );
        vec3 T = dp2perp * duv1.x + dp1perp * duv2.x;
        vec3 B = dp2perp * duv1.y + dp1perp * duv2.y;
     
        // construct a scale-invariant frame 
        float invmax = inversesqrt( max( dot(T,T), dot(B,B) ) );
        return mat3( T * invmax, B * invmax, N );
    }
    
    vec3 perturb_normal( vec3 N, vec3 V, vec2 texcoord )
    {
        // assume N, the interpolated vertex normal and 
        // V, the view vector (vertex to eye)
       vec3 map = texture(normalMap, texcoord ).xyz;
       map = map * 255./127. - 128./127.;
       mat3 TBN = cotangent_frame(N, -V, texcoord);
       return normalize(TBN * map);
    }
    
    void main(void){
    
      vec2 uv = Vertex_UV.xy;
      
      vec3 N = normalize(Vertex_Normal);
      vec3 L = normalize(Vertex_LightDir);
      vec3 V = normalize(Vertex_EyeVec);
      vec3 PN = perturb_normal(N, V, uv);
    
      //float lambertTerm = dot(PN, L);
      vec4 intensity=vec4(0.0,0.0,0.0,0.0); // 最终的颜色
    
    
      vec3 vDir,lDir,hDir;
      float NdotL,NdotHV;
    
     
      hDir  = normalize(V + L) ;
      NdotL  = max(dot(PN, L),    0.0);
      NdotHV = max(dot(PN, hDir), 0.0);
    
      intensity+= gl_LightSource[0].ambient * 0.5 ;
      intensity+= gl_LightSource[0].diffuse * NdotL * 0.3 ;
    
      if(NdotL!=0)
            intensity += gl_LightSource[0].specular * NdotL * pow(NdotHV,30);
    
      gl_FragColor = intensity;
    
    }

    最近做subsurface scattering ,发现normal mapping 根本加不上,因为SSS实质相当于对表面做了个平滑,细节再加也会被平滑掉。

    后来想着把扰动后的normal 直接加在最终的SSS效果上,不过这样有个问题就是本来的法向图必须增强,因为在最终的效果上叠加法向图效果比较弱

  • 相关阅读:
    filter()函数
    递归算法
    日志模块nb_log
    sys.argv[]简单阐述
    DB2中字符、数字和日期类型之间的转换
    Java 连接 Hive的样例程序及解析
    对hadoop namenode -format执行过程的探究
    想要成为牛人、大佬,那请至少拥有这12项技能!
    形象决定你的收入,别问为什么!
    年轻人,能用钱解决的,绝不要花时间(转)
  • 原文地址:https://www.cnblogs.com/graph/p/3462091.html
Copyright © 2020-2023  润新知