How to use the torchx.layers.LayerNorm function in torchx

To help you get started, we’ve selected a few torchx examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github SurrealAI / surreal / surreal / model / model_builders / builders.py View on Github external
def __init__(self, D_in, D_act, hidden_sizes=[300, 200], use_layernorm=True):
        super(ActorNetworkX, self).__init__()

        xp_input = L.Placeholder((None, D_in))
        xp = L.Linear(hidden_sizes[0])(xp_input)
        xp = L.ReLU()(xp)
        if use_layernorm:
            # Normalize 1 dimension
            xp = L.LayerNorm(1)(xp)
        xp = L.Linear(hidden_sizes[1])(xp)
        xp = L.ReLU()(xp)
        if use_layernorm:
            xp = L.LayerNorm(1)(xp)
        xp = L.Linear(D_act)(xp)
        xp = L.Tanh()(xp)

        self.model = L.Functional(inputs=xp_input, outputs=xp)
        self.model.build((None, D_in))
github SurrealAI / surreal / surreal / model / model_builders / builders.py View on Github external
def __init__(self, D_in, D_act, hidden_sizes=[300, 200], use_layernorm=True):
        super(ActorNetworkX, self).__init__()

        xp_input = L.Placeholder((None, D_in))
        xp = L.Linear(hidden_sizes[0])(xp_input)
        xp = L.ReLU()(xp)
        if use_layernorm:
            # Normalize 1 dimension
            xp = L.LayerNorm(1)(xp)
        xp = L.Linear(hidden_sizes[1])(xp)
        xp = L.ReLU()(xp)
        if use_layernorm:
            xp = L.LayerNorm(1)(xp)
        xp = L.Linear(D_act)(xp)
        xp = L.Tanh()(xp)

        self.model = L.Functional(inputs=xp_input, outputs=xp)
        self.model.build((None, D_in))
github SurrealAI / surreal / surreal / model / model_builders / builders.py View on Github external
def __init__(self, D_in, D_act, hidden_sizes=[400, 300], use_layernorm=True):
        super(CriticNetworkX, self).__init__()

        xp_input_obs = L.Placeholder((None, D_in))
        xp = L.Linear(hidden_sizes[0])(xp_input_obs)
        xp = L.ReLU()(xp)
        if use_layernorm:
            xp = L.LayerNorm(1)(xp)
        self.model_obs = L.Functional(inputs=xp_input_obs, outputs=xp)
        self.model_obs.build((None, D_in))

        xp_input_concat = L.Placeholder((None, hidden_sizes[0] + D_act))
        xp = L.Linear(hidden_sizes[1])(xp_input_concat)
        xp = L.ReLU()(xp)
        if use_layernorm:
            xp = L.LayerNorm(1)(xp)
        xp = L.Linear(1)(xp)

        self.model_concat = L.Functional(inputs=xp_input_concat, outputs=xp)
        self.model_concat.build((None, D_act + hidden_sizes[0]))