How to use the turbinia.evidence.ReportText function in turbinia

To help you get started, we’ve selected a few turbinia examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github google / turbinia / turbinia / workers / sshd.py View on Github external
def run(self, evidence, result):
    """Run the sshd_config analysis worker.

    Args:
        evidence (Evidence object):  The evidence we will process.
        result (TurbiniaTaskResult): The object to place task results into.

    Returns:
        TurbiniaTaskResult object.
    """
    # Where to store the resulting output file.
    output_file_name = 'sshd_config_analysis.txt'
    output_file_path = os.path.join(self.output_dir, output_file_name)
    # Set the output file as the data source for the output evidence.
    output_evidence = ReportText(source_path=output_file_path)

    # Read the input file
    with open(evidence.local_path, 'r') as input_file:
      sshd_config = input_file.read()

    (report, priority, summary) = self.analyse_sshd_config(sshd_config)
    output_evidence.text_data = report
    result.report_priority = priority
    result.report_data = report

    # Write the report to the output file.
    with open(output_file_path, 'w') as fh:
      fh.write(output_evidence.text_data.encode('utf-8'))

    # Add the resulting evidence to the result object.
    result.add_evidence(output_evidence, evidence.config)
github google / turbinia / turbinia / jobs / http_access_logs.py View on Github external
Returns:
        A list of tasks to schedule.
    """
    tasks = []
    for artifact_name in ACCESS_LOG_ARTIFACTS:
      tasks.extend([
          artifact.FileArtifactExtractionTask(artifact_name) for _ in evidence
      ])
    return tasks


class HTTPAccessLogAnalysisJob(interface.TurbiniaJob):
  """HTTP Access log analysis job."""

  evidence_input = [ExportedFileArtifact]
  evidence_output = [ReportText]

  NAME = 'HTTPAccessLogAnalysisJob'

  def create_tasks(self, evidence):
    """Create task.
    Args:
      evidence: List of evidence objects to process
    Returns:
        A list of tasks to schedule.
    """
    evidence = [e for e in evidence if e.artifact_name in ACCESS_LOG_ARTIFACTS]
    return [wordpress.WordpressAccessLogAnalysisTask() for _ in evidence]


manager.JobsManager.RegisterJobs(
    [HTTPAccessLogExtractionJob, HTTPAccessLogAnalysisJob])
github google / turbinia / turbinia / workers / worker_stat.py View on Github external
def run(self, evidence, result):
    """Test Stat task.

    Args:
        evidence: Path to data to process.
        result: TurbiniaTaskResult to populate with results.

    Returns:
        TurbiniaTaskResult: object.
    """
    result.log('Running stat on evidence {0:s}'.format(evidence.source_path))
    report_path = os.path.join(self.output_dir, 'report.txt')
    report = ReportText(source_path=report_path)
    report.text_data = str(os.stat(evidence.source_path))
    with open(report_path, 'w') as f:
      f.write(report.text_data)

    result.add_evidence(report, evidence.config)
    result.close(self, success=True)

    return result
github google / turbinia / turbinia / jobs / worker_stat.py View on Github external
from __future__ import unicode_literals

from turbinia.evidence import Directory
from turbinia.evidence import RawDisk
from turbinia.evidence import ReportText
from turbinia.jobs import interface
from turbinia.jobs import manager
from turbinia.workers.worker_stat import StatTask


class StatJob(interface.TurbiniaJob):
  """Job to run Stat."""

  # The types of evidence that this Job will process
  evidence_input = [RawDisk, Directory]
  evidence_output = [ReportText]

  NAME = 'StatJob'

  def create_tasks(self, evidence):
    """Create task for Stat.

    Args:
      evidence: List of evidence objects to process

    Returns:
        A list of StatTasks.
    """
    return [StatTask() for _ in evidence]


manager.JobsManager.RegisterJob(StatJob)
github google / turbinia / turbinia / jobs / jenkins.py View on Github external
from turbinia.evidence import GoogleCloudDisk
from turbinia.evidence import GoogleCloudDiskRawEmbedded
from turbinia.evidence import ReportText
from turbinia.jobs import interface
from turbinia.jobs import manager
from turbinia.workers.analysis.jenkins import JenkinsAnalysisTask


class JenkinsAnalysisJob(interface.TurbiniaJob):
  """Jenkins analysis job."""

  evidence_input = [
      Directory, DockerContainer, RawDisk, GoogleCloudDisk,
      GoogleCloudDiskRawEmbedded
  ]
  evidence_output = [ReportText]

  NAME = 'JenkinsAnalysisJob'

  def create_tasks(self, evidence):
    """Create task for Jenkins analysis job.

    Args:
      evidence: List of evidence objects to process

    Returns:
        A list of tasks to schedule.
    """
    tasks = [JenkinsAnalysisTask() for _ in evidence]
    return tasks
github google / turbinia / turbinia / jobs / hadoop.py View on Github external
from turbinia.evidence import GoogleCloudDisk
from turbinia.evidence import GoogleCloudDiskRawEmbedded
from turbinia.evidence import RawDisk
from turbinia.evidence import ReportText
from turbinia.jobs import interface
from turbinia.jobs import manager
from turbinia.workers.hadoop import HadoopAnalysisTask


class HadoopAnalysisJob(interface.TurbiniaJob):
  """Analyzes Hadoop AppRoot files."""

  evidence_input = [
      DockerContainer, GoogleCloudDisk, GoogleCloudDiskRawEmbedded, RawDisk
  ]
  evidence_output = [ReportText]

  NAME = 'HadoopAnalysisJob'

  def create_tasks(self, evidence):
    """Create task.

    Args:
      evidence: List of evidence objects to process

    Returns:
        A list of tasks to schedule.
    """
    tasks = [HadoopAnalysisTask() for _ in evidence]
    return tasks
github google / turbinia / turbinia / workers / analysis / wordpress.py View on Github external
def run(self, evidence, result):
    """Run the Wordpress access log analysis worker.

    Args:
       evidence (Evidence object):  The evidence to process
       result (TurbiniaTaskResult): The object to place task results into.

    Returns:
      TurbiniaTaskResult object.
    """
    # Where to store the resulting output file.
    output_file_name = 'wp_acces_log_analysis.txt'
    output_file_path = os.path.join(self.output_dir, output_file_name)
    # Set the output file as the data source for the output evidence.
    output_evidence = ReportText(source_path=output_file_path)

    # Change open function if file is GZIP compressed.
    open_function = open
    if evidence.local_path.lower().endswith('gz'):
      open_function = gzip.open

    # Read the input file
    with open_function(evidence.local_path, 'rb') as input_file:
      access_logs_content = input_file.read().decode('utf-8')

    (report, priority,
     summary) = self.analyze_wp_access_logs(access_logs_content)
    output_evidence.text_data = report
    result.report_data = report
    result.report_priority = priority
github google / turbinia / turbinia / workers / hadoop.py View on Github external
"""Run Hadoop specific analysis on the evidences.

    Args:
        evidence (Evidence object):  The evidence we will process
        result (TurbiniaTaskResult): The object to place task results into.

    Returns:
        TurbiniaTaskResult object.
    """

    # Where to store the resulting output file.
    output_file_name = 'hadoop_analysis.txt'
    output_file_path = os.path.join(self.output_dir, output_file_name)

    # What type of evidence we should output.
    output_evidence = ReportText(source_path=output_file_path)

    try:
      # We don't use FileArtifactExtractionTask as it export one evidence per
      # file extracted
      output_dir = os.path.join(self.output_dir, 'artifacts')
      collected_artifacts = extract_artifacts(
          artifact_names=['HadoopAppRoot'], disk_path=evidence.device_path,
          output_dir=output_dir)

      (report, priority, summary) = self._AnalyzeHadoopAppRoot(
          collected_artifacts, output_dir)
      if not report:
        raise TurbiniaException(
            'Report generated by _AnalyzeHadoopAppRoot() is empty')

      output_evidence.text_data = '\n'.join(report)
github google / turbinia / turbinia / workers / tomcat.py View on Github external
def run(self, evidence, result):
    """Run the Tomcat analysis worker.

    Args:
        evidence (Evidence object):  The evidence we will process.
        result (TurbiniaTaskResult): The object to place task results into.

    Returns:
        TurbiniaTaskResult object.
    """

    # Where to store the resulting output file.
    output_file_name = 'tomcat_analysis.txt'
    output_file_path = os.path.join(self.output_dir, output_file_name)
    # Set the output file as the data source for the output evidence.
    output_evidence = ReportText(source_path=output_file_path)

    # Read the input file
    with open(evidence.local_path, 'r') as input_file:
      tomcat_file = input_file.read()

    (report, priority, summary) = self.analyse_tomcat_file(tomcat_file)
    result.report_priority = priority
    result.report_data = report
    output_evidence.text_data = report

    # Write the report to the output file.
    with open(output_file_path, 'w') as fh:
      fh.write(output_evidence.text_data.encode('utf-8'))

    # Add the resulting evidence to the result object.
    result.add_evidence(output_evidence, evidence.config)
github google / turbinia / turbinia / evidence.py View on Github external
def __init__(self, text_data=None, *args, **kwargs):
    self.text_data = text_data
    super(ReportText, self).__init__(copyable=True, *args, **kwargs)