Converting depth values to distances from z-buffer

Hi! Do anyone know how to convert values from depth texture into distances from camera?
I wrote the shader for that, but can’t manage the conversion.

Shader "Custom/MyDepthShader"
{
    Properties
    {
        _MainTex ("Texture", 2D) = "white" {}
      
    }
    SubShader
    {
        // No culling or depth
        Cull Off ZWrite Off ZTest Always

        Pass
        {
            CGPROGRAM
            #pragma vertex vert // compile function vert as vertex shader
            #pragma fragment frag // compile function frag as fragment shader

            #include "UnityCG.cginc"

            struct appdata
            {
                float4 vertex : POSITION;
                float2 uv : TEXCOORD0;
            };

            struct v2f
            {
                float2 uv : TEXCOORD0;
                float4 vertex : SV_POSITION;
            };

            v2f vert (appdata v)
            {
                v2f o;
                o.vertex = UnityObjectToClipPos(v.vertex);
                o.uv = v.uv;
                return o;
            }

            sampler2D _MainTex;
            sampler2D _CameraDepthTexture;

            fixed4 frag (v2f i) : SV_Target
            {
                float depth = tex2D(_CameraDepthTexture, i.uv).r;
                depth = Linear01Depth(depth);
                depth = depth * _ProjectionParams.z;
                return depth;
            }
            ENDCG
        }
    }
}

As a result I want that texture2D would contain distance values.

If you just need view space depth (distance along the camera’s local z axis) then you can use the LinearEyeDepth function;

fixed4 frag (v2f i) : SV_Target
{
    return LinearEyeDepth (tex2D (_CameraDepthTexture, i.uv).r);
}

If you need the actual real world distance from a point in the scene to the camera, then you’ll need to reconstruct it’s position using camera matrices;

struct v2f
{
    float4 pos : SV_POSITION;
    float2 uv : TEXCOORD0;
    float4 viewDir : TEXCOORD1;
}

v2f vert (appdata v)
{
    v2f o;
    o.pos = UnityObjectToClipPos (v.vertex);
    o.uv = v.uv;
    //Find the view-space direction of the far clip plane from the camera (which, when interpolated, gives us per pixel view dir of the scene position)
    o.viewDir = mul (unity_CameraInvProjection, float4 (o.uv * 2.0 - 1.0, 1.0, 1.0));
    return o;
}

half4 frag (v2f i) : SV_Target
{
    float depth01 = Linear01Depth (tex2D (_CameraDepthTexture, i.uv).r);
    //Find the view-space position of the current pixel by multiplying viewDir by depth
    float3 viewPos = (i.viewDir.xyz / i.viewDir.w) * depth01;
    //Length of viewPos is the raw distance to the camera
    return length (viewPos);
}

Thank you for your help. I now found these values along the Z-axes, but they are biased by 0.5 units for some reason Do you know why?

I found an answer. It was so stupid that I couldn’t deduce it earlier. Biase was from cube’s size.

THANK YOU THANK YOU THANK YOU THANK YOU THANK YOU !!!