Shader vertex position depends on camera ?

Hi guys,

I’m new to cg shader writing and I encounter a problem. I’m trying to make variation on the color of the vertex using their world position.

So I watched some exemple on the web to see how to do that and I found that : href=“http://en.wikibooks.org/wiki/Cg”>http://en.wikibooks.org/wiki/CgProgramming/Unity/ShadinginWorldSpace

href=“Cg Programming/Unity/Shading in World Space - Wikibooks, open books for an open world”>Cg Programming/Unity/Shading in World Space - Wikibooks, open books for an open world
but it seems that output.positioninworld_space = mul(_Object2World, input.vertex); varies with the camera position and i can’t any other exemple that are not using this =/

thank you by advance for your help !

Here is the code of my shader :

  Shader "MFMobile/Cloud" {
	Properties {
	      _MainTex ("RGBA Texture Image", 2D) = "white" {} 
	     _CloudBaseColor ("CloudBaseColor", Color) = (0.5,0.5,0.5,1)   
	     _CloudPositon("Cloudposition",Vector)= (0.0,0.0,0.0,0.0)
	   }
   
   SubShader {
      Tags {"Queue" = "Transparent"} 
 
      Pass {	
         Cull Front // first render the back faces
         ZWrite Off // don't write to depth buffer 
            // in order not to occlude other objects
         Blend SrcAlpha OneMinusSrcAlpha 
            // blend based on the fragment's alpha value
 		
         CGPROGRAM
 			
         #pragma vertex vert  
         #pragma fragment frag 
 
         uniform sampler2D _MainTex;    
         uniform float _Cutoff;
         uniform float4 _CloudPositon;
 		
         struct vertexInput {
            float4 vertex : POSITION;
            float4 texcoord : TEXCOORD0;
         };
         struct vertexOutput {
            float4 pos : SV_POSITION;
            float4 tex : TEXCOORD0;
         };
 
         vertexOutput vert(vertexInput input) 
         {
            vertexOutput output;
 
            output.tex = input.texcoord;
            output.pos = mul(UNITY_MATRIX_MVP, input.vertex);
            return output;
         }
 
         float4 frag(vertexOutput input) : COLOR
         {
            return tex2D(_MainTex, float2(input.tex));  
         }
 
         ENDCG
      }
 
      Pass {	
         Cull Back // now render the front faces
         ZWrite Off // don't write to depth buffer 
            // in order not to occlude other objects
         Blend SrcAlpha OneMinusSrcAlpha 
            // blend based on the fragment's alpha value
 		
         CGPROGRAM
 		
         #pragma vertex vert  
         #pragma fragment frag 
 
         uniform sampler2D _MainTex;    
         uniform float _Cutoff;
 		 uniform float4 _LightColor0;
 		 uniform float4 _CloudBaseColor;
 		 uniform float4 _Camera2World;
 		 uniform float4 _CloudPositon;
 		 
         struct vertexInput {
            float4 vertex : POSITION;
            float4 texcoord : TEXCOORD0;
         };
         
         struct vertexOutput {
            float4 pos : SV_POSITION;
            float4 tex : TEXCOORD0;
            float4	cloudShadow : COLOR;
            float3 worldPos : TEXCOORD1;
         };
 		 
         vertexOutput vert(vertexInput input) 
         {
            vertexOutput output;
            output.pos = mul(UNITY_MATRIX_MVP, input.vertex);

             // transform into worlspace
		     float4 world_space_vertex = mul( _Object2World, input.vertex );
		 	 if(world_space_vertex.y>1){
 				output.cloudShadow = float4(0.9,0.9,0.9,1.0);
 			 }else{
 				output.cloudShadow = _CloudBaseColor;
 			 }
		 	
            output.tex = input.texcoord;
            
            return output;
         }
 		 
         float4 frag(vertexOutput input) : COLOR
         {
            return float4 (tex2D(_MainTex, float2(input.tex)) *_LightColor0*input.cloudShadow);
         }
 
         ENDCG
      }
   }
   // Fallback "Unlit/Transparent"
  }

_Object2World does what the name suggest. So there is for sure no camera position involved.

_Object2World is actually what the object’s Transform component represents. It transforms a local space position into worldspace. This matrix is also called “M” (model-matrix) and is also part of the combined MVP matrix which also contains the View and Projection matrix. The View matrix is actually the inverse Transform of the camera. So it transforms worldspace positions into the camera’s local space. The Projection matrix finally does the 3D->2D mapping either in a perspective or orthographic way.

It might help if you showed your actual shader code so we can see where you did what.