public override void _Ready() { Node n = this; Node rootNode = GetNode("/root"); while (agent == null && n != rootNode) { Node parent = n.GetParent(); n = parent; agent = parent as RLAgent; } if (resettable) { agent.AddResetListener(this); } if (usePathAsID) { this.id = GetPath(); } else { this.id = Name; } OnCreate(); }
public void Start() { if (target == null) { Debug.LogWarning("ForwardTargetReward error: target don't specified!"); } agent = GetComponent <RLAgent>(); agent.AddResetListener(this); minDist = -1; }
void Start() { if (ignoreX) { fx = 0; } else { fx = 1; } if (ignoreY) { fy = 0; } else { fy = 1; } if (ignoreZ) { fz = 0; } else { fz = 1; } f = new Vector3(fx, fy, fz); hist = new float[samples]; head = 0; len = 0; totalReceivedReward = 0.0f; agent = GetComponent <RLAgent>(); agent.AddResetListener(this); prevPosition = transform.localPosition; if (samples <= 0) { Debug.LogWarning("Warning: the field samples value must be greater than 0!!!"); } if (target == null) { Debug.LogWarning("Warning: target is not specified for reward function named AutoApproximationReward."); } }
// Start is called before the first frame update void Start() { if (ignoreX) { fx = 0; } else { fx = 1; } if (ignoreY) { fy = 0; } else { fy = 1; } if (ignoreZ) { fz = 0; } else { fz = 1; } f = new Vector3(fx, fy, fz); prevPosition = transform.localPosition; agent = GetComponent <RLAgent>(); agent.AddResetListener(this); sumOfRewards = 0; }
void Start() { fall = false; agent = GetComponent <RLAgent>(); agent.AddResetListener(this); }