bool triggerValue;
if (device.TryGetFeatureValue(UnityEngine.XR.CommonUsages.triggerButton,
out triggerValue)
&& triggerValue)
{
Debug.Log("Trigger button is pressed");
}
In case it helps anyone, the following script will allow you to call any method when a button is pressed, and when it’s released. This only works for the Button entries here (Unity - Manual: Unity XR Input) but it can easily be adapted to work with the Axis as well. Just attach this to any game object.
using System;
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
using UnityEngine.Events;
using UnityEngine.XR;
namespace Zenva.VR
{
public class ButtonController : MonoBehaviour
{
static readonly Dictionary<string, InputFeatureUsage<bool>> availableButtons = new Dictionary<string, InputFeatureUsage<bool>>
{
{"triggerButton", CommonUsages.triggerButton },
{"thumbrest", CommonUsages.thumbrest },
{"primary2DAxisClick", CommonUsages.primary2DAxisClick },
{"primary2DAxisTouch", CommonUsages.primary2DAxisTouch },
{"menuButton", CommonUsages.menuButton },
{"gripButton", CommonUsages.gripButton },
{"secondaryButton", CommonUsages.secondaryButton },
{"secondaryTouch", CommonUsages.secondaryTouch },
{"primaryButton", CommonUsages.primaryButton },
{"primaryTouch", CommonUsages.primaryTouch },
};
public enum ButtonOption
{
triggerButton,
thumbrest,
primary2DAxisClick,
primary2DAxisTouch,
menuButton,
gripButton,
secondaryButton,
secondaryTouch,
primaryButton,
primaryTouch
};
[Tooltip("Input device role (left or right controller)")]
public InputDeviceRole deviceRole;
[Tooltip("Select the button")]
public ButtonOption button;
[Tooltip("Event when the button starts being pressed")]
public UnityEvent OnPress;
[Tooltip("Event when the button is released")]
public UnityEvent OnRelease;
// to check whether it's being pressed
public bool IsPressed { get; private set; }
// to obtain input devices
List<InputDevice> inputDevices;
bool inputValue;
InputFeatureUsage<bool> inputFeature;
void Awake()
{
// get label selected by the user
string featureLabel = Enum.GetName(typeof(ButtonOption), button);
// find dictionary entry
availableButtons.TryGetValue(featureLabel, out inputFeature);
// init list
inputDevices = new List<InputDevice>();
}
void Update()
{
InputDevices.GetDevicesWithRole(deviceRole, inputDevices);
for (int i = 0; i < inputDevices.Count; i++)
{
if (inputDevices[i].TryGetFeatureValue(inputFeature,
out inputValue) && inputValue)
{
// if start pressing, trigger event
if (!IsPressed)
{
IsPressed = true;
OnPress.Invoke();
}
}
// check for button release
else if (IsPressed)
{
IsPressed = false;
OnRelease.Invoke();
}
}
}
}
}
Hello! Here is my example implementation for buttons input for 6Dof VR Headsets using controllers like the Vive or Rift or WMR. This is from a racing game I was working on. Any feedback/questions would be appreciated.
VRPlayer/Camera Parent Code (Attach only this script to your Camera Parent):
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
using UnityEngine.Events;
using UnityEngine.Serialization;
using UnityEngine.XR;
[System.Serializable] // Generic Event holding button value
public class AButtonEvent : UnityEvent<bool>
{
public bool Value { get; set; }
public void Initialize(bool value, UnityAction<bool> method)
{
Value = value;
AddListener(method);
}
}
public class RacerVR : Racer
{
public GameObject LeftAnchor;
public GameObject RightAnchor;
private HandController _leftController;
private HandController _rightController;
private UnityEngine.XR.InputDevice _leftDevice;
private UnityEngine.XR.InputDevice _rightDevice;
// Start is called before the first frame update
void Start()
{
base.Start();
SetDevices();
//Initialize Hands
_leftController = LeftAnchor.AddComponent<HandController>();
_rightController = RightAnchor.AddComponent<HandController>();
}
// Update is called once per frame
private new void Update()
{
base.Update();
//Set Tracked Devices
SetDevicePosAndRot(XRNode.LeftHand, LeftAnchor);
SetDevicePosAndRot(XRNode.RightHand, RightAnchor);
//Set Buttons
UpdateButtonState(_leftDevice, CommonUsages.gripButton, _leftController.GripEvent);
UpdateButtonState(_rightDevice, CommonUsages.gripButton, _rightController.GripEvent);
UpdateButtonState(_leftDevice, CommonUsages.primary2DAxisClick, _leftController.ClickEvent);
UpdateButtonState(_rightDevice, CommonUsages.primary2DAxisClick, _rightController.ClickEvent);
UpdateButtonState(_leftDevice, CommonUsages.triggerButton, _leftController.TriggerEvent);
UpdateButtonState(_rightDevice, CommonUsages.triggerButton, _rightController.TriggerEvent);
UpdateButtonState(_leftDevice, CommonUsages.menuButton, _leftController.MenuEvent);
UpdateButtonState(_rightDevice, CommonUsages.menuButton, _rightController.MenuEvent);
}
private static void SetDevicePosAndRot(XRNode trackedDevice, GameObject anchor)
{
anchor.transform.localPosition = UnityEngine.XR.InputTracking.GetLocalPosition(trackedDevice);
anchor.transform.localRotation = UnityEngine.XR.InputTracking.GetLocalRotation(trackedDevice);
}
private static InputDevice GetCurrentDevice(XRNode node)
{
var device = new InputDevice();
var devices = new List<UnityEngine.XR.InputDevice>();
UnityEngine.XR.InputDevices.GetDevicesAtXRNode(node,
devices);
if (devices.Count == 1)
{
device = devices[0];
//Debug.Log($"Device name '{device.name}' with role '{device.role.ToString()}'");
}
else if (devices.Count > 1)
{
Debug.Log($"Found more than one '{device.role.ToString()}'!");
device = devices[0];
}
return device;
}
private void UpdateButtonState(InputDevice device, InputFeatureUsage<bool> button,
AButtonEvent aButtonPressEvent)
{
bool tempState;
bool invalidDeviceFound = false;
bool buttonState = false;
tempState = device.isValid // the device is still valid
&& device.TryGetFeatureValue(button, out buttonState) // did get a value
&& buttonState; // the value we got
if (!device.isValid)
invalidDeviceFound = true;
if (tempState != aButtonPressEvent.Value) // Button state changed since last frame
{
aButtonPressEvent.Invoke(tempState);
aButtonPressEvent.Value = tempState;
}
if (invalidDeviceFound) // refresh device lists
SetDevices();
}
private void SetDevices()
{
//Set Controller Devices
_leftDevice = GetCurrentDevice(XRNode.LeftHand);
_rightDevice = GetCurrentDevice(XRNode.RightHand);
}
private void ShowCurrentlyAvailableXRDevices()
{
var inputDevices = new List<UnityEngine.XR.InputDevice>();
UnityEngine.XR.InputDevices.GetDevices(inputDevices);
foreach (var device in inputDevices)
{
Debug.Log($"Device found with name '{device.name}' and role '{device.role.ToString()}'");
}
}
}
Hand/Controller Code:
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
using UnityEngine.Events;
using UnityEngine.Serialization;
[RequireComponent(typeof(Animator))]
public class HandController : MonoBehaviour
{
public bool IsGripPressed;
public bool IsTriggerPressed;
public bool IsMenuPressed;
public bool IsClickPressed;
// Button Events
public AButtonEvent GripEvent { get; set; }
public AButtonEvent TriggerEvent { get; set; }
public AButtonEvent MenuEvent { get; set; }
public AButtonEvent ClickEvent { get; set; }
private Animator _animator;
void Start()
{
InitializeButtons();
_animator = GetComponent<Animator>();
}
// Update is called once per frame
void Update()
{
}
private void InitializeButtons()
{
(GripEvent = new AButtonEvent()).Initialize(IsGripPressed, OnGripButtonEvent);
(TriggerEvent = new AButtonEvent()).Initialize(IsTriggerPressed, OnTriggerButtonEvent);
(MenuEvent = new AButtonEvent()).Initialize(IsMenuPressed, OnMenuButtonEvent);
(ClickEvent = new AButtonEvent()).Initialize(IsClickPressed, OnClickButtonEvent);
}
// Button Functions
private void OnGripButtonEvent(bool pressed)
{
IsGripPressed = pressed;
_animator.SetBool("GripAnimation", pressed);
if (pressed)
{
Debug.Log("Grip Pressed");
}
else
{
Debug.Log("Grip Released");
}
}
private void OnTriggerButtonEvent(bool pressed)
{
IsTriggerPressed = pressed;
_animator.SetBool("TriggerAnimation", pressed);
if (pressed)
{
Debug.Log("Trigger Pressed");
}
else
{
Debug.Log("Trigger Released");
}
}
private void OnMenuButtonEvent(bool pressed)
{
IsMenuPressed = pressed;
if (pressed)
{
Debug.Log("Menu Pressed");
}
}
private void OnClickButtonEvent(bool pressed)
{
IsClickPressed = pressed;
_animator.SetBool("ClickAnimation", pressed);
if (pressed)
{
Debug.Log("Click Pressed");
}
else
{
Debug.Log("Click Released");
}
}
}
Glad to see some people experimenting with it, and most of these examples look to get the job done.
Pinging in here to watch this thread for additional questions or feedback. It’s seeing a few minor additions in 2019.2 (device connection and disconnection callbacks), and is a start to help us break away from the limitations and manual steps of using the Input Manager.
I have a question regarding the new XR input system and the Unity UI system. If I have a world space canvas UI and I want to use input from a VR controller to control it (pushing buttons etc.) - is there built-in functionality already to make this happen? I found an older thread by Oculus (https://developer.oculus.com/blog/unitys-ui-system-in-vr/) which talks about subclassing several event system-related classes to make this happen. Is this still required or is there a built-in solution already?
Those are actually returning the same data. We’d like to wean people off of using XRNodes, in fact GetLocalPosition has now been tagged as obsolete. The remainder of InputTracking will be made obsolete over time as we can migrate people over to InputDevice(s) APIs. So my personal suggestion is to always try to use InputDevice APIs, those will not be going away.
This is part of a larger, upcoming thing, and is on it’s way very shortly!
Sorry it’s not yet available.
@StayTalm_Unity Thanks for your answer. Just to be sure, we are indeed talking about a “touch+click to use” functionality (not a “laser pointer thing”), right? Is this planned for this year or would you say its more of a 2020 thing?
And, if I wanted to implement this beforehand, is the general idea in the Oculus thread (i.e.subclassing InputModule and the Raycaster classes) still the preferred way to go?
It’s very simple and clean, and I’m not sure what an equivalent solution is using an InputDevice instead of an XRNode? Any help would be much appreciated.
Hey @addent
It’s a little less simple, but this would be a close equivalent:
public class BasicXRTracker : MonoBehaviour
{
//Keep this around to avoid creating heap garbage
static List<InputDevice> devices = new List<InputDevice>();
[SerializeField]
InputDeviceRole role;
// Update is called once per frame
void Update()
{
InputDevices.GetDevicesWithRole(role, devices);
if(devices.Count > 0)
{
InputDevice device = devices[0];
Vector3 position;
if (device.TryGetFeatureValue(CommonUsages.devicePosition, out position))
this.transform.position = position;
Quaternion rotation;
if (device.TryGetFeatureValue(CommonUsages.deviceRotation, out rotation))
this.transform.rotation = rotation;
}
}
}
However**,** I would suggest using the TrackedPoseDriver, which is part of the XR Legacy Input Helpers package, because there are some quirks to XR that arn’t being picked up by this kind of simple tracker. For one, we actually update the tracking two different times in the frame: once before update, and once right before we send things off for rendering. This second one is crucial, as it prevents that feeling of your virtual hands and head feeling like they are lagging just a little bit behind the real world equivalents. As well, the new XR.InputDevice APIs have a second benefit of letting you use events and persistent InputDevice structs to retain the device you are using. This let’s you avoid redoing work in finding the same device repeatedly, and makes it easier to work with devices that you can have more than one of (for example hardware trackers), where getting the local position or rotation only works for the first one registered.
That said, here is a slightly more robust XRTracker I whipped up using XR.InputDevice APIs:
public class XRTracker : MonoBehaviour
{
//Keep this around to avoid creating heap garbage
static List<InputDevice> devices = new List<InputDevice>();
[SerializeField]
InputDeviceRole role;
InputDevice trackedDevice;
void OnEnable()
{
InputDevices.deviceConnected += OnDeviceConnected;
InputDevices.deviceConnected += OnDeviceDisconnected;
Application.onBeforeRender += OnBeforeRender;
InputDevices.GetDevicesWithRole(role, devices);
if (devices.Count > 0)
OnDeviceConnected(devices[0]);
}
void OnDisable()
{
InputDevices.deviceConnected -= OnDeviceConnected;
InputDevices.deviceConnected -= OnDeviceDisconnected;
Application.onBeforeRender -= OnBeforeRender;
}
void Update()
{
if (trackedDevice.isValid)
TrackToDevice(trackedDevice);
}
void OnDeviceConnected(InputDevice device)
{
if (!trackedDevice.isValid && device.role == role)
trackedDevice = device;
}
void OnDeviceDisconnected(InputDevice device)
{
if (device == trackedDevice)
trackedDevice = new InputDevice();
}
void OnBeforeRender()
{
if (trackedDevice.isValid)
TrackToDevice(trackedDevice);
}
void TrackToDevice(InputDevice trackedDevice)
{
Vector3 position;
if (trackedDevice.TryGetFeatureValue(CommonUsages.devicePosition, out position))
this.transform.position = position;
Quaternion rotation;
if (trackedDevice.TryGetFeatureValue(CommonUsages.deviceRotation, out rotation))
this.transform.rotation = rotation;
}
}
That said, I’d still suggest going with the ready-made TrackedPoseDriver mentioned above. It does all this and exposes proper options and settings, and is something we are committed to maintaining and upgrading as new features become available.
Hope that helps
Thank-you for the examples and the quick response!
I tested the examples with Unity 2019.1.1f1 and Unity 2019.2.0a4, The first example works and I can go with that, but the second example produces the following error:
Assets\Scripts\XRTracker.cs(25,22): error CS0117: ‘InputDevices’ does not contain a definition for ‘deviceConnected’
So this looks like it’s something very new and not available yet.
I’ll check out the TrackedPoseDriver later today. I avoided the XR Legacy Input Helpers package because, you know… it’s legacy.
[Update]: Tried the TrackedPoseDriver… it works fine but feels inconsistent with the naming conventions of the InputDevice APIs. For now, I’m going to go with a hybrid of your two examples:
public class BasicXRTracker : MonoBehaviour
{
//Keep this around to avoid creating heap garbage
static List<InputDevice> devices = new List<InputDevice>();
[SerializeField]
InputDeviceRole role;
InputDevice trackedDevice;
void OnEnable()
{
Application.onBeforeRender += OnBeforeRender;
GetDevice();
}
void OnDisable()
{
Application.onBeforeRender -= OnBeforeRender;
}
void Update()
{
if (trackedDevice.isValid)
TrackToDevice(trackedDevice);
else {
GetDevice();
}
}
void OnBeforeRender()
{
if (trackedDevice.isValid)
TrackToDevice(trackedDevice);
}
void GetDevice()
{
InputDevices.GetDevicesWithRole(role, devices);
if (devices.Count > 0) {
trackedDevice = devices[0];
}
}
void TrackToDevice(InputDevice trackedDevice)
{
Vector3 position;
if (trackedDevice.TryGetFeatureValue(CommonUsages.devicePosition, out position))
this.transform.localPosition = position;
Quaternion rotation;
if (trackedDevice.TryGetFeatureValue(CommonUsages.deviceRotation, out rotation))
this.transform.localRotation = rotation;
}
}
@addent , thanks for asking and @StayTalm_Unity , thanks for providing an answer regarding the new APIs. This is the type of support that really makes a difference on the forums. I would also suggest adding this to the docs.
If it’s not too much to ask, might I tempt you into providing a TrackedPoseDriver “best practise” usage example as well? It is immensely helpful for us “users” to see the API applied by those who made it, just to understand the intended usage patterns.
@StayTalm_Unity I got around to checking the TrackedPoseDriver class now. I was surprised that I had to enable a package called “XR Legacy Input Helpers” for it to be available. I understood from your post above that the TrackedPoseDriver is in fact what we should use going forward, so why does the package have “Legacy” in its name?
@addent
As far as InputDevices.deviceConnected, you are right. It shipped in 2019.2.0a6. Your solution should work most of the time, although the edge case would be if someone disconnects one controller (say, a Vive) and reconnected something else (Maybe a knuckles controller). It would lose the InputDevice reference and stop tracking. You may want to just check if the device is valid at the top of the update functions and if not, do a quick search for any valid device.
@plmx
That naming one is complicated, and political. We assumed a few things would ship and replace older systems a little faster than they did. We are hoping to upgrade it to use InputDevices APIs, and the new Input System as well, but some of this is just incoming.
As for Best Practices, I’ve also put that on the backlog. It’s a good idea, not a lot of people know about the TrackedPoseDriver, and so we are going to get better exposure and information on it.
I’ve submitted a bug report as well, but I thought I’d mention it here too…
The InputDevice.TryGetFeatureUsages() reports incorrect information with an HTC Vive.
In particular, it reports that the various “Finger” inputs exists when they do not.
Ideally this would be fixed to be accurate. I’m trying to avoid writing code specific to each manufacturer’s controllers and the TryGetFeatureUsages() would have been a nice solution to achieve it.
An example of incorrect reporting would be:
Device: OpenVR Controller(Vive. Controller MV) - Right
Role: RightHanded
Features:
Primary2DAxis
Trigger
Grip
Secondary2DAxis ← Not True! This feature does not exist on Vive Controllers!
IndexFinger ← Not True! This feature does not exist on Vive Controllers!
MiddleFinger ← Not True! This feature does not exist on Vive Controllers!
RingFinger ← Not True! This feature does not exist on Vive Controllers!
PinkyFinger ← Not True! This feature does not exist on Vive Controllers!
PrimaryButton
SecondaryButton
GripButton
Primary2DAxisClick
TriggerButton
Primary2DAxisTouch
DevicePosition
DeviceRotation
DeviceVelocity
DeviceAngularVelocity
TrackingState
IsTracked
The script I used to test what it was reporting is:
using System.Collections.Generic;
using UnityEngine;
using UnityEngine.XR;
public class XRFeatureCheck: MonoBehaviour
{
static List<InputDevice> devices = new List<InputDevice>();
static List<InputFeatureUsage> featureUsages = new List<InputFeatureUsage>();
void Update()
{
InputDevices.GetDevices(devices);
string logString = "";
foreach (var device in devices)
{
logString += "\n\n-------------------------------------------------------";
logString += "\nDevice: " + device.name;
logString += "\nRole: " + device.role;
logString += "\nFeatures: ";
var featuresFound = device.TryGetFeatureUsages(featureUsages);
if (!featuresFound)
{
logString += "\n No Features Found.";
}
else
{
foreach (var f in featureUsages)
{
logString += "\n " + f.name;
}
}
}
Debug.Log(logString);
}
}
@addent
That is an unfortunate result of how OpenVR handles buttons and axes. Some of the controllers will use axes reported as not in use, and there is no way to know which buttons will be used by any given controller. I didn’t want to attempt to inject controller-specific logic (e.g. if(ViveWand)… else if(WMRController)… else if(OculusTouch)) into an otherwise generic interface (openVR), as that would make Unity responsible for implementing each controller that decides to work with OpenVR.
As a result, we blast out all potential features, and so every controller used in OpenVR will have the full list of potential input features.