How to use the lightdock.mathutil.cython.quaternion.Quaternion function in lightdock

To help you get started, we’ve selected a few lightdock examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github brianjimenez / lightdock / bin / simulation / relightdock.py View on Github external
neighbors = []
    vision_range = []
    scoring = []

    data_file = open(lightdock_output)
    lines = [line.rstrip(os.linesep) for line in data_file.readlines()]
    data_file.close()

    counter = 0
    for line in lines:
        if line[0] == '(':
            counter += 1
            last = line.index(')')
            coord = line[1:last].split(',')
            translations.append([float(coord[0]), float(coord[1]), float(coord[2])])
            rotations.append(Quaternion(float(coord[3]), float(coord[4]), float(coord[5]), float(coord[6])))
            values = line[last + 1:].split()
            luciferin.append(float(values[0]))
            neighbors.append(int(values[1]))
            vision_range.append(float(values[2]))
            scoring.append(float(values[3]))

    log.info("Read %s coordinate lines" % counter)
    return translations, rotations, luciferin, neighbors, vision_range, scoring
github brianjimenez / lightdock / bin / post / lgd_generate_conformations.py View on Github external
ligand_ids = []
    rec_extents = []
    lig_extents = []

    data_file = open(lightdock_output)
    lines = data_file.readlines()
    data_file.close()

    counter = 0
    for line in lines:
        if line[0] == '(':
            counter += 1
            last = line.index(')')
            coord = line[1:last].split(',')
            translations.append([float(coord[0]), float(coord[1]), float(coord[2])])
            rotations.append(Quaternion(float(coord[3]), float(coord[4]), float(coord[5]), float(coord[6])))
            if len(coord) > 7:
                rec_extents.append(np.array([float(x) for x in coord[7:7+num_anm_rec]]))
                lig_extents.append(np.array([float(x) for x in coord[-num_anm_lig:]]))
            raw_data = line[last+1:].split()
            receptor_id = int(raw_data[0])
            ligand_id = int(raw_data[1])
            receptor_ids.append(receptor_id)
            ligand_ids.append(ligand_id)
    log.info("Read %s coordinate lines" % counter)
    return translations, rotations, receptor_ids, ligand_ids, rec_extents, lig_extents
github brianjimenez / lightdock / lightdock / gso / searchspace / landscape.py View on Github external
def update_landscape_position(self, optimized_vector):
        """Updates the current pose"""
        self.translation = optimized_vector[:3]
        self.rotation = Quaternion(optimized_vector[3], optimized_vector[4], optimized_vector[5], optimized_vector[6])
        self.rec_extent = optimized_vector[7:7+self.num_rec_nmodes] if self.num_rec_nmodes > 0 else np.array([])
        self.lig_extent = optimized_vector[-self.num_lig_nmodes:] if self.num_lig_nmodes > 0 else np.array([])
github brianjimenez / lightdock / lightdock / prep / poses.py View on Github external
a = normalize_vector(a)
    b = normalize_vector(b)
    # Check for scenario where vectors are in the same direction
    if np.allclose(a, -b):
        o = orthogonal(a)
        return Quaternion(w=0., x=o[0], y=o[1], z=o[2])
    c = np.cross(a, b)
    d = np.dot(a, b)
    s = np.sqrt( (1+abs(d))*2 )
    invs = 1. / s
    x = c[0] * invs
    y = c[1] * invs
    z = c[2] * invs
    w = s * 0.5

    return Quaternion(w=w, x=x, y=y, z=z).normalize()
github brianjimenez / lightdock / lightdock / prep / poses.py View on Github external
def quaternion_from_vectors(a, b):
    """Calculate quaternion between two vectors a and b."""
    a = normalize_vector(a)
    b = normalize_vector(b)
    # Check for scenario where vectors are in the same direction
    if np.allclose(a, -b):
        o = orthogonal(a)
        return Quaternion(w=0., x=o[0], y=o[1], z=o[2])
    c = np.cross(a, b)
    d = np.dot(a, b)
    s = np.sqrt( (1+abs(d))*2 )
    invs = 1. / s
    x = c[0] * invs
    y = c[1] * invs
    z = c[2] * invs
    w = s * 0.5

    return Quaternion(w=w, x=x, y=y, z=z).normalize()
github brianjimenez / lightdock / lightdock / prep / poses.py View on Github external
# Only restraints in the ligand partner
        elif ligand_restraints and not receptor_restraints:
            # The strategy is similar to previous but for the receptor side we will use a simulated point
            # over the receptor surface to point out the quaternion
            coef = norm(center) / ligand_diameter
            # It is important to keep the coordinates as in the original complex without
            # moving to the center of coordinates (applying translation)
            rec_residue = Residue.dummy(center[0]*coef-rec_translation[0], 
                                        center[1]*coef-rec_translation[1], 
                                        center[2]*coef-rec_translation[2])
            lig_residue = ligand_restraints[number_generator.randint(0, len(ligand_restraints)-1)]
            q = get_quaternion_for_restraint(rec_residue, lig_residue, tx, ty, tz,
                                             rec_translation, lig_translation)
        # No restraints at all
        else:
            q = Quaternion.random(number_generator)

        # Glowworm's optimization vector
        op_vector = [tx, ty, tz, q.w, q.x, q.y, q.z]

        # If ANM is enabled, we need to create random components for the extents
        if rng_nm:
            if rec_nm > 0:
                op_vector.extend([rng_nm() for _ in xrange(rec_nm)])
            if lig_nm > 0:
                op_vector.extend([rng_nm() for _ in xrange(lig_nm)])

        new_poses.append(op_vector)

    return new_poses
github brianjimenez / lightdock / lightdock / gso / searchspace / landscape.py View on Github external
def __init__(self, scoring_function, coordinates, receptor, ligand, receptor_id=0, ligand_id=0,
                 step_translation=DEFAULT_TRANSLATION_STEP, step_rotation=DEFAULT_ROTATION_STEP,
                 step_nmodes=0, num_rec_nmodes=0, num_lig_nmodes=0):
        self.objective_function = scoring_function
        self.translation = np.array(coordinates[:3])
        self.rotation = Quaternion(coordinates[3], coordinates[4], coordinates[5], coordinates[6])
        self.receptor = receptor
        self.ligand = ligand
        self.receptor_id = receptor_id
        self.ligand_id = ligand_id
        self.step_translation = step_translation
        self.step_rotation = step_rotation
        self.step_nmodes = step_nmodes
        self.num_rec_nmodes = num_rec_nmodes
        self.num_lig_nmodes = num_lig_nmodes
        # Copy ANM information if required
        self.rec_extent = np.array(coordinates[7:7+self.num_rec_nmodes]) if self.num_rec_nmodes > 0 else np.array([])
        self.lig_extent = np.array(coordinates[-self.num_lig_nmodes:]) if self.num_lig_nmodes > 0 else np.array([])
        # This part is important, each position needs to retain its own pose coordinates
        self.receptor_pose = self.receptor.coordinates[self.receptor_id].clone()
        self.ligand_pose = self.ligand.coordinates[self.ligand_id].clone()
        self.ligand_reference_points = self.ligand.reference_points.clone()