关于Unity特效热扰动的特别优化(伪)-腾讯游戏学院
source link: http://gad.qq.com/article/detail/287950
Go to the source link to view the article. You can view the picture content, updated content and better typesetting reading experience. If the link is broken, please click the button below to view the snapshot at that time.
/******************************************************************** FileName: DistortEffect.cs Description: 屏幕扭曲效果 Created: 2017/04/27 by: puppet_master *********************************************************************/ using System.Collections; using System.Collections.Generic; using UnityEngine; public class DistortEffect : PostEffectBase { //扭曲的时间系数 [Range(0.0f, 1.0f)] public float DistortTimeFactor = 0.15f; //扭曲的强度 [Range(0.0f, 0.2f)] public float DistortStrength = 0.01f; //噪声图 public Texture NoiseTexture = null; //渲染Mask图所用的shader public Shader maskObjShader = null; //降采样系数 public int downSample = 4; private Camera mainCam = null; private Camera additionalCam = null; private RenderTexture renderTexture = null; public void OnRenderImage(RenderTexture source, RenderTexture destination) { if (_Material) { _Material.SetTexture("_NoiseTex", NoiseTexture); _Material.SetFloat("_DistortTimeFactor", DistortTimeFactor); _Material.SetFloat("_DistortStrength", DistortStrength); _Material.SetTexture("_MaskTex", renderTexture); Graphics.Blit(source, destination, _Material); } else { Graphics.Blit(source, destination); } } void Awake() { //创建一个和当前相机一致的相机 InitAdditionalCam(); } private void InitAdditionalCam() { mainCam = GetComponent<Camera>(); if (mainCam == null) return; Transform addCamTransform = transform.FindChild("additionalDistortCam"); if (addCamTransform != null) DestroyImmediate(addCamTransform.gameObject); GameObject additionalCamObj = new GameObject("additionalDistortCam"); additionalCam = additionalCamObj.AddComponent<Camera>(); SetAdditionalCam(); } private void SetAdditionalCam() { if (additionalCam) { additionalCam.transform.parent = mainCam.transform; additionalCam.transform.localPosition = Vector3.zero; additionalCam.transform.localRotation = Quaternion.identity; additionalCam.transform.localScale = Vector3.one; additionalCam.farClipPlane = mainCam.farClipPlane; additionalCam.nearClipPlane = mainCam.nearClipPlane; additionalCam.fieldOfView = mainCam.fieldOfView; additionalCam.backgroundColor = Color.clear; additionalCam.clearFlags = CameraClearFlags.Color; additionalCam.cullingMask = 1 << LayerMask.NameToLayer("Distort"); additionalCam.depth = -999; //分辨率可以低一些 if (renderTexture == null) renderTexture = RenderTexture.GetTemporary(Screen.width >> downSample, Screen.height >> downSample, 0); } } void OnEnable() { SetAdditionalCam(); additionalCam.enabled = true; } void OnDisable() { additionalCam.enabled = false; } void OnDestroy() { if (renderTexture) { RenderTexture.ReleaseTemporary(renderTexture); } DestroyImmediate(additionalCam.gameObject); } //在真正渲染前的回调,此处渲染Mask遮罩图 void OnPreRender() { //maskObjShader进行渲染 if (additionalCam.enabled) { additionalCam.targetTexture = renderTexture; additionalCam.RenderWithShader(maskObjShader, ""); } } }
//by:puppet_master //2017.5.3 Shader "ApcShader/MaskObjPrepass" { //子着色器 SubShader { Pass { Cull Off CGPROGRAM #include "UnityCG.cginc" struct v2f { float4 pos : SV_POSITION; }; v2f vert(appdata_full v) { v2f o; o.pos = mul(UNITY_MATRIX_MVP, v.vertex); return o; } fixed4 frag(v2f i) : SV_Target { //这个Pass直接输出颜色 return fixed4(1,1,1,1); } //使用vert函数和frag函数 #pragma vertex vert #pragma fragment frag ENDCG } } }
//全屏幕扭曲Shader //by:puppet_master //2017.5.3 Shader "Custom/DistortPostEffect" { Properties { _MainTex("Base (RGB)", 2D) = "white" {} _NoiseTex("Noise", 2D) = "black" {}//默认给黑色,也就是不会偏移 _MaskTex("Mask", 2D) = "black" {}//默认给黑色,权重为0 } CGINCLUDE #include "UnityCG.cginc" uniform sampler2D _MainTex; uniform sampler2D _NoiseTex; uniform sampler2D _MaskTex; uniform float _DistortTimeFactor; uniform float _DistortStrength; fixed4 frag(v2f_img i) : SV_Target { //根据时间改变采样噪声图获得随机的输出 float4 noise = tex2D(_NoiseTex, i.uv - _Time.xy * _DistortTimeFactor); //以随机的输出*控制系数得到偏移值 float2 offset = noise.xy * _DistortStrength; //采样Mask图获得权重信息 fixed4 factor = tex2D(_MaskTex, i.uv); //像素采样时偏移offset,用Mask权重进行修改 float2 uv = offset * factor.r + i.uv; return tex2D(_MainTex, uv); } ENDCG SubShader { Pass { ZTest Always Cull Off ZWrite Off Fog{ Mode off } CGPROGRAM #pragma vertex vert_img #pragma fragment frag #pragma fragmentoption ARB_precision_hint_fastest ENDCG } } Fallback off }
/******************************************************************** FileName: DistortEffect.cs Description: 屏幕扭曲效果 Created: 2017/04/27 by: puppet_master *********************************************************************/ using System.Collections; using System.Collections.Generic; using UnityEngine; public class DistortEffect : MonoBehaviour { public Shader shader = null; private Material _material = null; public Material _Material { get { if (_material == null) _material = GenerateMaterial(shader); return _material; } } //根据shader创建用于屏幕特效的材质 protected Material GenerateMaterial(Shader shader) { if (shader == null) return null; //需要判断shader是否支持 if (shader.isSupported == false) return null; Material material = new Material(shader); material.hideFlags = HideFlags.DontSave; if (material) return material; return null; } //扭曲的时间系数 [Range(0.0f, 1.0f)] public float DistortTimeFactor = 0.5f; //扭曲的强度 [Range(0.0f, 0.2f)] public float DistortStrength = 0.01f; //噪声图1 对应R public Texture NoiseTexture1 = null; //噪声图2 对应G public Texture NoiseTexture2 = null; //噪声图3 对应B public Texture NoiseTexture3 = null; //降采样系数 public int downSample = 1; private Camera mainCam = null; private Camera additionalCam = null; private RenderTexture renderTexture = null; public void OnRenderImage(RenderTexture source, RenderTexture destination) { if(additionalCam) { additionalCam.targetTexture = renderTexture; additionalCam.Render(); if (_Material) { _Material.SetTexture("_NoiseTex1", NoiseTexture1); _Material.SetTexture("_NoiseTex2", NoiseTexture2); _Material.SetTexture("_NoiseTex3", NoiseTexture3); _Material.SetFloat("_DistortTimeFactor", DistortTimeFactor); _Material.SetFloat("_DistortStrength", DistortStrength); _Material.SetTexture("_MaskTex", renderTexture); Graphics.Blit(source, destination, _Material); } else { Graphics.Blit(source, destination); } } } void Awake() { //创建一个和当前相机一致的相机 InitAdditionalCam(); } private void InitAdditionalCam() { mainCam = GetComponent<Camera>(); if (mainCam == null) return; mainCam.depthTextureMode |= DepthTextureMode.Depth; Transform addCamTransform = transform.Find("additionalDistortCam"); if (addCamTransform != null) DestroyImmediate(addCamTransform.gameObject); GameObject additionalCamObj = new GameObject("additionalDistortCam"); additionalCam = additionalCamObj.AddComponent<Camera>(); additionalCam.transform.parent = mainCam.transform; additionalCam.transform.localPosition = Vector3.zero; additionalCam.transform.localRotation = Quaternion.identity; additionalCam.transform.localScale = Vector3.one; additionalCam.backgroundColor = Color.clear; additionalCam.clearFlags = CameraClearFlags.Color; additionalCam.cullingMask = 1 << LayerMask.NameToLayer("DistortEffect"); additionalCam.depth = -999; additionalCam.allowHDR = false; additionalCam.allowMSAA = false; SetAdditionalCam(); } private void SetAdditionalCam() { additionalCam.farClipPlane = mainCam.farClipPlane; additionalCam.nearClipPlane = mainCam.nearClipPlane; additionalCam.fieldOfView = mainCam.fieldOfView; //分辨率可以低一些 if (renderTexture == null) renderTexture = RenderTexture.GetTemporary(Screen.width >> downSample, Screen.height >> downSample, 0); } void OnEnable() { if(additionalCam) { additionalCam.gameObject.SetActive(true); additionalCam.enabled = true; SetAdditionalCam(); } } void OnDisable() { if (additionalCam) { additionalCam.gameObject.SetActive(false); additionalCam.enabled = false; } } void OnDestroy() { if (renderTexture) { RenderTexture.ReleaseTemporary(renderTexture); } DestroyImmediate(additionalCam.gameObject); } }
//全屏幕扭曲Shader //by:puppet_master //2017.5.3 Shader "Custom/DistortPostEffect" { Properties { _MainTex("Base (RGB)", 2D) = "white" {} _NoiseTex1("Noise1", 2D) = "black" {}//默认给黑色,也就是不会偏移 _NoiseTex2("Noise2", 2D) = "black" {}//默认给黑色,也就是不会偏移 _NoiseTex3("Noise3", 2D) = "black" {}//默认给黑色,也就是不会偏移 _MaskTex("Mask", 2D) = "black" {}//默认给黑色,权重为0 } CGINCLUDE #include "UnityCG.cginc" uniform sampler2D _MainTex; uniform sampler2D _NoiseTex1; uniform sampler2D _NoiseTex2; uniform sampler2D _NoiseTex3; uniform sampler2D _MaskTex; uniform float _DistortTimeFactor; uniform float _DistortStrength; fixed4 frag(v2f_img i) : SV_Target { //根据时间改变采样噪声图获得随机的输出 float4 noise1 = tex2D(_NoiseTex1, i.uv - _Time.xy * _DistortTimeFactor); float4 noise2 = tex2D(_NoiseTex2, i.uv - _Time.xy * _DistortTimeFactor); float4 noise3 = tex2D(_NoiseTex3, i.uv - _Time.xy * _DistortTimeFactor); //以随机的输出*控制系数得到偏移值 float2 offset1 = noise1.xy * _DistortStrength; float2 offset2 = noise2.xy * _DistortStrength; float2 offset3 = noise3.xy * _DistortStrength; //采样Mask图获得权重信息 fixed4 factor = tex2D(_MaskTex, i.uv); //像素采样时偏移offset,用Mask权重进行修改 float2 uv = offset1 * factor.r + offset2 * factor.g + offset3 * factor.b + i.uv; return tex2D(_MainTex, uv); } ENDCG SubShader { Pass { ZTest Always Cull Off ZWrite Off Fog{ Mode off } CGPROGRAM #pragma vertex vert_img #pragma fragment frag #pragma fragmentoption ARB_precision_hint_fastest ENDCG } } Fallback off }
// Upgrade NOTE: replaced 'mul(UNITY_MATRIX_MVP,*)' with 'UnityObjectToClipPos(*)' Shader "ApcShader/MaskObjPrepass" { //子着色器 Properties { _RGBColor("RGB Color", Color) = (1,0,0,1) } SubShader { Pass { Tags{ "Queue" = "Transparent" "IgnoreProjector" = "True" "RenderType" = "Transparent" } Blend One OneMinusSrcAlpha // note, we use premultiplied alpha, so 1 (1-src) Cull Off Lighting Off ZWrite Off CGPROGRAM #pragma vertex vert #pragma fragment frag #include "UnityCG.cginc" struct appdata_t { float4 vertex : POSITION; float2 texcoord : TEXCOORD0; }; struct v2f { float4 vertex : SV_POSITION; float2 texcoord : TEXCOORD0; float4 projPos : TEXCOORD1; }; fixed4 _RGBColor; sampler2D_float _CameraDepthTexture; v2f vert(appdata_t v) { v2f o; o.vertex = UnityObjectToClipPos(v.vertex); float3 wvp = mul(unity_ObjectToWorld, v.vertex); o.projPos = ComputeScreenPos(o.vertex); COMPUTE_EYEDEPTH(o.projPos.z); o.texcoord = v.texcoord; return o; } fixed4 frag(v2f i) : SV_Target { fixed4 col = _RGBColor; // Do Z clip float zbuf = LinearEyeDepth(SAMPLE_DEPTH_TEXTURE_PROJ(_CameraDepthTexture, UNITY_PROJ_COORD(i.projPos))); float partZ = i.projPos.z; float zalpha = saturate((zbuf - partZ + 1e-2f) * 10000); col.a = col.a * zalpha; // premultiply alpha col.rgb = col.rgb * col.a; return col; } ENDCG } } }
在Unity中,获取深度纹理是非常简单的,我们只需要告诉Unity“把深度纹理给我!”然后再在Shader中直接访问特定的纹理属性即可。这个与Unity沟通的过程是通过在脚本中设置摄像机的depthTextureMode来完成的,例如我们可以通过下面的代码来获取深度纹理:
camera.depthTextureMode = DepthTextureMode.Depth;
一旦设置好了上面的摄像机模式后,我们就可以在Shader中通过声明_CameraDepthTexture变量来访问它。这个过程非常简单,但我们需要知道两行代码的背后,Unity为我们做了许多工作。同理,如果想要获取深度+法线纹理,我们只需要在代码中这样设置:
camera.depthTextureMode = DepthTextureMode.DepthNormals;
然后在Shader中通过声明_CameraDepthNormalsTexture变量来访问它。我们还可以组合这些模式,让一个摄像机同时产生一张深度和深度+法线纹理:
camera.depthTextureMode |= DepthTextureMode.Depth;
camera.depthTextureMode |= DepthTextureMode.DepthNormals;
float d = SMAPLE_DEPTH_TEXTURE(_CameraDepthTexture, i.uv);
其中,i.uv 是一个float2类型的变量,对应了当前像素的纹理坐标。类似的宏还有SAMPLE_DEPTH_TEXTURE_PROJ 和 SAMPLE_DEPTH_TEXTURE_LOD。SAMPLE_DEPTH_TEXTURE_PROJ 宏同样接受两个参数——深度纹理和一个float3或float4类型的纹理坐标,它的内部使用了tex2Dproj这样的函数进行投影纹理采样,纹理坐标的前两个分量首先会除以最后一个分量,再进行纹理采样。如果提供了第四个分量,还会进行一次比较, 通常用于阴影的实现中。SAMPLE_DEPTH_TEXTURE_PROJ 的第二个参数通常是由顶点着色器输出插值而得的屏幕坐标,例如:float d = SMAPLE_DEPTH_TEXTURE_PROJ(_CameraDepthTexture, UNITY_PROJ_COORD(i.srcPos));
其中,i.srcPos 是在顶点着色器中通过调用ComputeScreenPos(o.pos)得到的屏幕坐标。上述这些宏,可以在Unity内置的HLSLSupport.cginc文件中找到。当通过纹理采样得到深度值后,这些深度值往往是非线性的,这种非线性来自于透视投影使用的裁剪矩阵。然而,在我们的计算过程中通常是需要线性的深度值,也就是说,我们需要把投影后的深度值变换到线性空间下,例如视角空间下的深度值。那么,我们应该如何进行这个转换呢?实际上,我们只需要倒推顶点变换的过程即可。下面我们以透视投影为例,推导如何由深度纹理中的深度信息计算得到视角空间下的深度值。
我们之前已知,当我们使用透视投影的裁剪矩阵P(clip)对视角空间下的一个顶点进行变换后,裁剪空间下顶点的z和w分量为:
其中,Far 和 Near 分别是远近裁剪平面的距离。然后,我们通过齐次除法就可以得到NDC下的z分量:
之前我们知道,深度纹理中的深度值是 通过下面的公式由NDC计算而得的:
由上面的这些式子,我们可以推导出用d表示而得的Z(visw)的表达式:
由于在Unity使用的视角空间中,摄像机正向对应的z值均为负值,因此为了得到深度值的正数表示,我们需要对上面的结果取反,最后得到的结果如下:
它的取值范围就是视锥体深度范围,即[Near,Far]。如果我们想要得到范围在[0, 1]之间的深度值,只需要把上面得到的结果除以Far即可。这样,0就表示该点与摄像机位于同一位置,1表示该点位于视锥体的远裁剪平面上。结果如下:
幸运的是,Unity提供了两个辅助函数来为我们进行上述的计算过程——LinearEyeDepth 和 Linear01Depth。LinearEyeDepth 负责把深度纹理的采样结果转换到视角空间下的深度值,也 就是我们上面得到的Z(visw)。而 Linear01Depth 则会返回一个范围在[0, 1]的线性深度值,也就是我们上面得到的Z(01),这两个函数内部使用了内置的_ZBufferParams变量来得到远近裁剪平面的距离。
如果我们需要获取深度+法线纹理,可以直接使用tex2D函数对_CameraDepthNormalsTexture 进行采样,得到里面存储的深度和法线信息。Unity提供了辅助函数来为我们队这个采样结果进行解码,从而得到深度值和法线方向。这个函数是DecodeDepthNormal,它在UnityCG.cginc里被定义:
inline void DecodeDepthNormal(float4 enc, out float depth,out float3 normal){
depth = DecodeFloatRG(enc.zw);
normal = DecodeViewNormalStereo(enc);
}
Recommend
About Joyk
Aggregate valuable and interesting links.
Joyk means Joy of geeK