Hello, I have been developing this image effect for quite some time now and it has gone through many iterations to get to this point (no pun intended). I was wondering though if there were any ways to speed it up still? It is pretty taxing on my system (it is only a laptop ATM) and I feel it could still use some work. I have tried using GameObjects, then object pooling, then the Graphics Library, then Compute buffers, and now here where I am using the GL library. This is the image effect:
and these are the two scripts:
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
public class GLRect : MonoBehaviour
{
public int speed;
public float width = .1f;
public float height = .1f;
public float xMult;
public float yMult;
static Material lineMaterial;
public Camera cam;
private void Start()
{
if (!lineMaterial)
{
Shader shader = Shader.Find("Hidden/Internal-Colored");
lineMaterial = new Material(shader);
//lineMaterial.hideFlags = HideFlags.HideAndDontSave;
// Turn on alpha blending
//lineMaterial.SetInt("_SrcBlend", (int)UnityEngine.Rendering.BlendMode.SrcAlpha);
//lineMaterial.SetInt("_DstBlend", (int)UnityEngine.Rendering.BlendMode.OneMinusSrcAlpha);
// Turn backface culling off
//lineMaterial.SetInt("_Cull", (int)UnityEngine.Rendering.CullMode.Off);
// Turn off depth writes
lineMaterial.SetInt("_ZWrite", 0);
}
}
void startGL()
{
lineMaterial.SetPass(0);
GL.MultMatrix(transform.localToWorldMatrix);
GL.Begin(GL.QUADS);
}
void OnRenderObject()
{
startGL();
for (int i = 0; i < speed; i++)
{
Vector2 v = getRandCoords();
Vector2 colorGenCoord = new Vector2((v.x / xMult + 1) * xMult, (v.y / yMult + 1) * yMult);
Color32 c = textureHandler.getPixelColor(colorGenCoord.x, colorGenCoord.y, xMult * 2, yMult * 2);
GL.Color(c);
GL.Vertex3(v.x, v.y, 1);
GL.Vertex3(v.x, v.y + height, 1);
GL.Vertex3(v.x + width, v.y + height, 1);
GL.Vertex3(v.x + width, v.y, 1);
}
}
private Vector2 getRandCoords()
{
Vector2 v;
float theta = Random.Range(0, 2 * Mathf.PI);
float a = Random.Range(0, 1f);
v.x = a * Mathf.Cos(theta) * xMult;
v.y = a * Mathf.Sin(theta) * yMult;
return v;
}
}
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
public class textureHandler : MonoBehaviour
{
public static int resolution = 256;
public RenderTexture rt;
private static Texture2D tex;
private static byte[] colorArray = new byte[resolution*resolution*4];
public static int abberation = 4;
// Start is called before the first frame update
void Start()
{
rt = new RenderTexture(resolution,resolution,0);
GetComponent<Camera>().targetTexture = rt;
RenderTexture.active = rt;
tex = new Texture2D(rt.width, rt.height);
}
private void OnRenderImage(RenderTexture source, RenderTexture destination)
{
tex.ReadPixels(new Rect(0, 0, tex.width, tex.height), 0, 0);
colorArray = tex.GetRawTextureData();
}
public static Color32 getPixelColor(float x, float y, float camWidth, float camHeight)
{
byte iX = (byte)Mathf.RoundToInt((x / camWidth) * tex.width - 1);
byte iY = (byte)Mathf.RoundToInt((y / camHeight) * tex.height - 1);
int index = (iX + iY * resolution) * abberation;
Color32 color = new Color32(colorArray[index],colorArray[index + 1],colorArray[index + 2], 255);
return color;
}
}
As you can see, the first one is the main script and is attached to the camera you actually see, the second script is attached to a camera that gets input and renders it to an array. I don’t really know how else to optimize this and I’m sure you guys know something. Thanks!