Skip to content

pendulum

run_pendulum_control_cbf_clf = partial(run_pendulum_experiment, controller_class=PendulumCBFCLFDirect, plotfile='data/plots/run_pendulum_control_cbf_clf{suffix}.pdf', theta0=5 * math.pi / 12, tau=0.002, numSteps=15000) module-attribute

Run pendulum with a safe CLF-CBF controller.

run_pendulum_control_online_learning = partial(run_pendulum_experiment, plotfile='data/plots/run_pendulum_control_online_learning{suffix}.pdf', controller_class=ControlPendulumCBFLearned, numSteps=250, theta0=7 * math.pi / 12, tau=0.002, dtype=torch.float64) module-attribute

Run save pendulum control while learning the parameters online

run_pendulum_control_trival = partial(run_pendulum_experiment, controller_class=ControlTrivial, plotfile='data/plots/run_pendulum_control_trival{suffix}.pdf') module-attribute

Run pendulum with a trivial controller.

ControlCBFCLFGroundTruth

Bases: ControlPendulumCBFLearned

Controller that avoids learning but uses the ground truth model

Source code in bayes_cbf/pendulum.py
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
class ControlCBFCLFGroundTruth(ControlPendulumCBFLearned):
    """
    Controller that avoids learning but uses the ground truth model
    """
    needs_ground_truth = False
    def __init__(self, *a, **kw):
        assert kw.pop("use_ground_truth_model", False) is False
        super().__init__(*a, use_ground_truth_model=True,
                         mean_dynamics_model_class=PendulumDynamicsModel,
                         **kw)

control_QP_cbf_clf(x, ctrl_aff_constraints, constraint_margin_weights=[])

Parameters:

Name Type Description Default
A_cbfs

A tuple of CBF functions

required
b_cbfs

A tuple of CBF functions

required
constraint_margin_weights

Add a margin constant to the constraint that is maximized.

[]
Source code in bayes_cbf/pendulum.py
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
def control_QP_cbf_clf(x,
                       ctrl_aff_constraints,
                       constraint_margin_weights=[]):
    """
    Args:
          A_cbfs: A tuple of CBF functions
          b_cbfs: A tuple of CBF functions
          constraint_margin_weights: Add a margin constant to the constraint
                                     that is maximized.

    """
    #import ipdb; ipdb.set_trace()
    clf_idx = 0
    A_total = np.vstack([af.A(x).detach().numpy()
                         for af in ctrl_aff_constraints])
    b_total = np.vstack([af.b(x).detach().numpy()
                         for af in ctrl_aff_constraints]).flatten()
    D_u = A_total.shape[1]
    N_const = A_total.shape[0]

    # u0 = l*g*sin(theta)
    # uopt = 0.1*g
    # contraints = A_total.dot(uopt) - b_total
    # assert contraints[0] <= 0
    # assert contraints[1] <= 0
    # assert contraints[2] <= 0


    # [A, I][ u ]
    #       [ ρ ] ≤ b for all constraints
    #
    # minimize
    #         [ u, ρ1, ρ2 ] [ 1,     0] [  u ]
    #                       [ 0,   100] [ ρ2 ]
    #         [A_cbf, 1] [ u, -ρ ] ≤ b_cbf
    #         [A_clf, 1] [ u, -ρ ] ≤ b_clf
    N_slack = len(constraint_margin_weights)
    A_total_rho = np.hstack(
        (A_total,
         np.vstack((-np.eye(N_slack),
                    np.zeros((N_const - N_slack, N_slack))))
        ))
    A = A_total
    P_rho = np.eye(D_u + N_slack)
    P_rho[D_u:, D_u:] = np.diag(constraint_margin_weights)
    q_rho = np.zeros(P_rho.shape[0])
    #u_rho_init = np.linalg.lstsq(A_total_rho, b_total - 1e-1, rcond=-1)[0]
    u_rho = cvxopt_solve_qp(P_rho.astype(np.float64),
                            q_rho.astype(np.float64),
                            G=A_total_rho.astype(np.float64),
                            h=b_total.astype(np.float64),
                            show_progress=False,
                            maxiters=1000)
    if u_rho is None:
        raise RuntimeError("""QP is infeasible
        minimize
        u_rhoᵀ {P_rho} u_rho
        s.t.
        {A_total_rho} u_rho ≤ {b_total}""".format(
            P_rho=P_rho,
            A_total_rho=A_total_rho, b_total=b_total))
    # Constraints should be satisfied
    constraint = A_total_rho @ u_rho - b_total
    assert np.all((constraint <= 1e-2) | (constraint / np.abs(b_total) <= 1e-2))
    return torch.from_numpy(u_rho[:D_u]).to(dtype=x.dtype)