How to use the rlbot.agents.base_agent.BaseAgent function in rlbot

To help you get started, we’ve selected a few rlbot examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github SaltieRL / Saltie / agents / torch_model / torch_model.py View on Github external
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.

import os
import sys
from rlbot.agents.base_agent import SimpleControllerState, BaseAgent, BOT_CONFIG_AGENT_HEADER
from rlbot.parsing.custom_config import ConfigHeader, ConfigObject

path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.insert(0, path)  # this is for first process imports

from examples.levi.output_formatter import LeviOutputFormatter
from examples.levi.input_formatter import LeviInputFormatter


class TorchModelAgent(BaseAgent):
    def __init__(self, name, team, index):
        super().__init__(name, team, index)
        sys.path.insert(0, path)  # this is for separate process imports
        import torch
        self.torch = torch
        self.empty_controller = SimpleControllerState()
        self.model_path = None
        self.model = None
        self.input_formatter = None
        self.output_formatter = None

    def load_config(self, config_object_header: ConfigHeader):
        self.model_path = config_object_header.get('model_path')

    def initialize_agent(self):
        self.model = self.get_model()
github SaltieRL / Saltie / swarm-trainer_hytak / online_training_agent.py View on Github external
import os

from rlbot.agents.base_agent import BaseAgent, SimpleControllerState
from rlbot.botmanager.helper_process_request import HelperProcessRequest


class OnlineTrainingAgent(BaseAgent):
    def __init__(self, name, team, index):
        from leviathan.output_formatter import OutputFormatter
        from leviathan.input_formatter import InputFormatter
        from leviathan.cool_atba import Atba
        import torch

        BaseAgent.__init__(self, name, team, index)
        self.pipe = None
        self.actor_model = None
        self.team_model = None
        self.game_memory = None
        self.atba = Atba()
        self.torch = torch
        self.output_formatter = OutputFormatter()
        self.input_formatter = InputFormatter(self.index, self.index)
        # self.input_formatter = InputFormatter(self.index, (self.index + 1) % 2)
github samuelpmish / ExampleBots / 4_Aerial / agent.py View on Github external
import math
import random

from rlbot.agents.base_agent import BaseAgent, SimpleControllerState
from rlbot.utils.structures.game_data_struct import GameTickPacket
from rlbot.utils.game_state_util import GameState, BallState, CarState, Physics, Vector3, Rotator

from RLUtilities.GameInfo import GameInfo
from RLUtilities.Simulation import Car, Ball, Input
from RLUtilities.LinearAlgebra import vec3, dot

from RLUtilities.Maneuvers import Aerial

class Agent(BaseAgent):

    def __init__(self, name, team, index):
        self.index = index
        self.info = GameInfo(index, team)
        self.controls = SimpleControllerState()

        self.skip = False
        self.timer = 0.0
        self.action = None
        self.car_predictions = []
        self.ball_predictions = []

        self.csign = 1;
        self.bsign = 1;

    def get_output(self, packet: GameTickPacket) -> SimpleControllerState:
github SaltieRL / Saltie / agents / self_evolving_car / train.py View on Github external
import math
import sys
import os
import matplotlib.pyplot as plt
from rlbot.utils.game_state_util import GameState, BallState, CarState, Physics, Vector3, Rotator, GameInfoState
from rlbot.agents.base_agent import BaseAgent, SimpleControllerState
from rlbot.utils.structures.game_data_struct import GameTickPacket
from examples.levi.input_formatter import LeviInputFormatter
from examples.levi.output_formatter import LeviOutputFormatter
from framework.self_evolving_car.genetic_algorithm import GeneticAlgorithm


class SelfEvolvingCar(BaseAgent):
    """This agent uses neuro-evolution to train the Levi torch model to perform aerials in Rocket League.
        first, the algorithm runs each model with randomly generated parameters. It then calculates each bots' fitness
        by determining it's minimum distance to the ball. Next, it clones the bot with the best fitness to the rest of
        the network, and uses a mutation function to guarantee diversity in the population. Make sure to change match
        length and points to unlimited and disable goal reset and enable instant start"""

    def __init__(self, name, team, index):
        super().__init__(name, team, index)

        import torch
        sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))  # this is for separate process imports
        from examples.levi.torch_model import SymmetricModel
        self.ga = GeneticAlgorithm()
        self.Model = SymmetricModel
        self.torch = torch
        self.controller_state = SimpleControllerState()
github RLBot / RLBotPythonExample / src / bot.py View on Github external
import math

from rlbot.agents.base_agent import BaseAgent, SimpleControllerState
from rlbot.utils.structures.game_data_struct import GameTickPacket

from util.orientation import Orientation
from util.vec import Vec3


class MyBot(BaseAgent):

    def initialize_agent(self):
        # This runs once before the bot starts up
        self.controller_state = SimpleControllerState()

    def get_output(self, packet: GameTickPacket) -> SimpleControllerState:
        ball_location = Vec3(packet.game_ball.physics.location)

        my_car = packet.game_cars[self.index]
        car_location = Vec3(my_car.physics.location)

        car_to_ball = ball_location - car_location

        # Find the direction of our car using the Orientation class
        car_orientation = Orientation(my_car.physics.rotation)
        car_direction = car_orientation.forward
github samuelpmish / ExampleBots / 1_ATBA / agent.py View on Github external
import math

from rlbot.agents.base_agent import BaseAgent, SimpleControllerState
from rlbot.utils.structures.game_data_struct import GameTickPacket

from RLUtilities.GameInfo import GameInfo
from RLUtilities.Simulation import Car, Ball
from RLUtilities.LinearAlgebra import vec3, dot, clip

from RLUtilities.controller_input import controller

class Agent(BaseAgent):

    def __init__(self, name, team, index):
        self.info = GameInfo(index, team)
        self.controls = SimpleControllerState()

    def get_output(self, packet: GameTickPacket) -> SimpleControllerState:
        self.info.read_packet(packet)

        ball = self.info.ball
        car = self.info.my_car

        # the vector from the car to the ball in local coordinates:
        # delta_local[0]: how far in front of my car
        # delta_local[1]: how far to the left of my car
        # delta_local[2]: how far above my car
        delta_local = dot(ball.pos - car.pos, car.theta)
github Darxeal / BotimusPrime / collector.py View on Github external
from rlbot.agents.base_agent import BaseAgent, GameTickPacket, SimpleControllerState
from rlutilities.simulation import Input
from rlutilities.linear_algebra import rotation_to_euler, vec3
from utils.game_info import GameInfo
from pathlib import Path
from csv import DictWriter
from maneuvers.driving.drive import Drive


class Collector(BaseAgent):
    
    filename = "data/powerslide_1500.csv"

    def initialize_agent(self):
        self.info = GameInfo(self.index, self.team)
        self.controls: Input = Input()
        self.ticks = 0
        self.fieldnames = [
            "time",
            "ball_loc_x",
            "ball_loc_y",
            "ball_loc_z",
            "ball_vel_x",
            "ball_vel_y",
            "ball_vel_z",
            "ball_ang_x",
github SaltieRL / Saltie / agents / swarm / teacher_agent.py View on Github external
def load_config(self, config_object_header: ConfigHeader):
        super().load_config(config_object_header)
        teacher_path = config_object_header.get('teacher_path')
        self.teacher = ExternalClassWrapper(os.path.join(get_repo_directory(), teacher_path),
                                            BaseAgent).get_loaded_class()(self.name, self.team, self.index)
github samuelpmish / ExampleBots / 2_Aerial_Recovery / agent.py View on Github external
import math
import random

from rlbot.agents.base_agent import BaseAgent, SimpleControllerState
from rlbot.utils.structures.game_data_struct import GameTickPacket
from rlbot.utils.game_state_util import GameState, BallState, CarState, Physics, Vector3, Rotator

from RLUtilities.GameInfo import GameInfo
from RLUtilities.Simulation import Car, Ball
from RLUtilities.LinearAlgebra import vec3, dot

from RLUtilities.Maneuvers import AerialTurn

class Agent(BaseAgent):

    def __init__(self, name, team, index):
        self.index = index
        self.info = GameInfo(index, team)
        self.controls = SimpleControllerState()

        self.timer = 0.0
        self.action = None

    def get_output(self, packet: GameTickPacket) -> SimpleControllerState:
        self.info.read_packet(packet)
        self.controls = SimpleControllerState()

        if self.timer < 0.05:

            position = Vector3(random.uniform(-4000, 4000),