How to use the pytsk3.TSK_FS_META_TYPE_REG function in pytsk3

To help you get started, we’ve selected a few pytsk3 examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github chowdaryd / Usb-Analyzer / usbfor.py View on Github external
elif f_type == pytsk3.TSK_FS_META_TYPE_REG and begin.info.meta.size != 0:	#if file and size > 1
				filedata = begin.read_random(0,begin.info.meta.size)

				print "Extracting File : " + str(['/'.join(parentPath)+begin.info.name.name])
					
				#create new folder to extract the file
				if not os.path.exists(outputPath):
					os.makedirs(outputPath)

				#extract the file
				extractFile = open(outputPath+begin.info.name.name,'w')
				extractFile.write(filedata)
				extractFile.close

			#if file but file size is 0 
			elif f_type == pytsk3.TSK_FS_META_TYPE_REG and begin.info.meta.size == 0:
				print "Unable to recover : " + str(['/'.join(parentPath)+begin.info.name.name])

		except IOError as e:
			print e
			continue
		except KeyboardInterrupt:
			sys.exit(1)
github dlcowen / dfirwizard / dfirwizard-v12.py View on Github external
extractFile = open(outputPath+entryObject.info.name.name,'wb')
            while offset < entryObject.info.meta.size:
                available_to_read = min(BUFF_SIZE, entryObject.info.meta.size - offset)
                filedata = entryObject.read_random(offset,available_to_read)
                md5hash.update(filedata)
                sha1hash.update(filedata)
                offset += len(filedata)
                if args.extract == True:
                  extractFile.write(filedata)

            if args.extract == True:
                extractFile.close
            wr.writerow([int(entryObject.info.meta.addr),'/'.join(parentPath)+entryObject.info.name.name,datetime.datetime.fromtimestamp(entryObject.info.meta.crtime).strftime('%Y-%m-%d %H:%M:%S'),int(entryObject.info.meta.size),md5hash.hexdigest(),sha1hash.hexdigest()])

              
        elif f_type == pytsk3.TSK_FS_META_TYPE_REG and entryObject.info.meta.size == 0:

            wr.writerow([int(entryObject.info.meta.addr),'/'.join(parentPath)+entryObject.info.name.name,datetime.datetime.fromtimestamp(entryObject.info.meta.crtime).strftime('%Y-%m-%d %H:%M:%S'),int(entryObject.info.meta.size),"d41d8cd98f00b204e9800998ecf8427e","da39a3ee5e6b4b0d3255bfef95601890afd80709"])
          
      except IOError as e:
        print e
        continue
github log2timeline / plaso / plaso / collector / tsk_collector.py View on Github external
"""Return a list of files given a path and a pattern."""
    ret = []
    file_re = re.compile(r'^{0:s}$'.format(file_name), re.I | re.S)
    try:
      directory = self._fs_obj.fs.open_dir(path)
    except IOError as e:
      raise errors.PreProcessFail(
          u'Unable to open directory: {0:s} with error: {1:s}'.format(path, e))

    for tsk_file in directory:
      try:
        f_type = tsk_file.info.meta.type
        name = tsk_file.info.name.name
      except AttributeError:
        continue
      if f_type == pytsk3.TSK_FS_META_TYPE_REG:
        m = file_re.match(name)
        if m:
          ret.append(u'{0:s}/{1:s}'.format(path, name))

    return ret
github log2timeline / plaso / plaso / pvfs / pfile_entry.py View on Github external
def IsFile(self):
    """Determines if the file entry is a file."""
    tsk_fs_meta_type = getattr(
        self.file_object.fileobj.info.meta, 'type',
        pytsk3.TSK_FS_META_TYPE_UNDEF)
    return tsk_fs_meta_type == pytsk3.TSK_FS_META_TYPE_REG
github log2timeline / plaso / plaso / pvfs / pfile_io.py View on Github external
def readline(self, size=None):
    """Read a line from the file.

    Args:
      size: Defines the maximum byte count (including the new line trail)
      and if defined may get the function to return an incomplete line.

    Returns:
      A string containing a single line read from the file.
    """
    if self.fileobj.info.meta.type != pytsk3.TSK_FS_META_TYPE_REG:
      raise IOError('Cannot read from directory.')

    read_size = size or self.MIN_READSIZE

    # check if we need to read more into the buffer
    if '\n' not in self.readahead and read_size >= len(self.readahead):
      self.readahead = self.read(read_size)

    result, sep, self.readahead = self.readahead.partition('\n')

    return result + sep
github google / grr / grr / client / grr_response_client / vfs_handlers / sleuthkit.py View on Github external
pytsk3.TSK_FS_NAME_TYPE_FIFO: stat.S_IFIFO,
      pytsk3.TSK_FS_NAME_TYPE_CHR: stat.S_IFCHR,
      pytsk3.TSK_FS_NAME_TYPE_DIR: stat.S_IFDIR,
      pytsk3.TSK_FS_NAME_TYPE_BLK: stat.S_IFBLK,
      pytsk3.TSK_FS_NAME_TYPE_REG: stat.S_IFREG,
      pytsk3.TSK_FS_NAME_TYPE_LNK: stat.S_IFLNK,
      pytsk3.TSK_FS_NAME_TYPE_SOCK: stat.S_IFSOCK,
  }

  META_TYPE_LOOKUP = {
      pytsk3.TSK_FS_META_TYPE_BLK: 0,
      pytsk3.TSK_FS_META_TYPE_CHR: stat.S_IFCHR,
      pytsk3.TSK_FS_META_TYPE_DIR: stat.S_IFDIR,
      pytsk3.TSK_FS_META_TYPE_FIFO: stat.S_IFIFO,
      pytsk3.TSK_FS_META_TYPE_LNK: stat.S_IFLNK,
      pytsk3.TSK_FS_META_TYPE_REG: stat.S_IFREG,
      pytsk3.TSK_FS_META_TYPE_SOCK: stat.S_IFSOCK,
  }

  # Files we won't return in directories.
  BLACKLIST_FILES = [
      "$OrphanFiles"  # Special TSK dir that invokes processing.
  ]

  # The file like object we read our image from
  tsk_raw_device = None

  # NTFS files carry an attribute identified by ntfs_type and ntfs_id.
  tsk_attribute = None

  # This is all bits that define the type of the file in the stat mode. Equal to
  # 0b1111000000000000.
github log2timeline / dfvfs / dfvfs / vfs / tsk_file_entry.py View on Github external
raise errors.BackEndError(
          'Missing TSK File .info, .info.meta or .info.fs_info')

    super(TSKFileEntry, self).__init__(
        resolver_context, file_system, path_spec, is_root=is_root,
        is_virtual=is_virtual)
    self._file_system_type = tsk_file.info.fs_info.ftype
    self._name = None
    self._parent_inode = parent_inode
    self._tsk_file = tsk_file

    # The type is an instance of pytsk3.TSK_FS_META_TYPE_ENUM.
    tsk_fs_meta_type = getattr(
        tsk_file.info.meta, 'type', pytsk3.TSK_FS_META_TYPE_UNDEF)

    if tsk_fs_meta_type == pytsk3.TSK_FS_META_TYPE_REG:
      self.entry_type = definitions.FILE_ENTRY_TYPE_FILE
    elif tsk_fs_meta_type == pytsk3.TSK_FS_META_TYPE_DIR:
      self.entry_type = definitions.FILE_ENTRY_TYPE_DIRECTORY
    elif tsk_fs_meta_type == pytsk3.TSK_FS_META_TYPE_LNK:
      self.entry_type = definitions.FILE_ENTRY_TYPE_LINK
    elif tsk_fs_meta_type in (
        pytsk3.TSK_FS_META_TYPE_CHR, pytsk3.TSK_FS_META_TYPE_BLK):
      self.entry_type = definitions.FILE_ENTRY_TYPE_DEVICE
    elif tsk_fs_meta_type == pytsk3.TSK_FS_META_TYPE_FIFO:
      self.entry_type = definitions.FILE_ENTRY_TYPE_PIPE
    elif tsk_fs_meta_type == pytsk3.TSK_FS_META_TYPE_SOCK:
      self.entry_type = definitions.FILE_ENTRY_TYPE_SOCKET
github dlcowen / dfirwizard / dfirwizard-v8.py View on Github external
print "Cannot retrieve type of",entryObject.info.name.name
          continue
        
      try:

        filepath = '/%s/%s' % ('/'.join(parentPath),entryObject.info.name.name)

        if f_type == pytsk3.TSK_FS_META_TYPE_DIR:
            sub_directory = entryObject.as_directory()
            parentPath.append(entryObject.info.name.name)
            directoryRecurse(sub_directory,parentPath)
            parentPath.pop(-1)
            print "Directory: %s" % filepath
            

        elif f_type == pytsk3.TSK_FS_META_TYPE_REG and entryObject.info.meta.size != 0:
        
            filedata = entryObject.read_random(0,entryObject.info.meta.size)
            md5hash = hashlib.md5()
            md5hash.update(filedata)
            sha1hash = hashlib.sha1()
            sha1hash.update(filedata)
            wr.writerow([int(entryObject.info.meta.addr),'/'.join(parentPath)+entryObject.info.name.name,datetime.datetime.fromtimestamp(entryObject.info.meta.crtime).strftime('%Y-%m-%d %H:%M:%S'),int(entryObject.info.meta.size),md5hash.hexdigest(),sha1hash.hexdigest()])

        elif f_type == pytsk3.TSK_FS_META_TYPE_REG and entryObject.info.meta.size == 0:

            wr.writerow([int(entryObject.info.meta.addr),'/'.join(parentPath)+entryObject.info.name.name,datetime.datetime.fromtimestamp(entryObject.info.meta.crtime).strftime('%Y-%m-%d %H:%M:%S'),int(entryObject.info.meta.size),"d41d8cd98f00b204e9800998ecf8427e","da39a3ee5e6b4b0d3255bfef95601890afd80709"])
          
      except IOError as e:
        print e
        continue
github log2timeline / plaso / plaso / lib / sleuthkit.py View on Github external
# Ignore a leading path separator: \ on Windows.
    # This prevents the cannot access \$MFT issue.
    # TODO: this is a workaround for now and needs to be fixed in pyvfs.
    elif platform.system() == 'Windows' and path.startswith('\\'):
      self.fileobj = self.fs.open(path[1:])
    else:
      self.fileobj = self.fs.open(path)

    self.size = self.fileobj.info.meta.size
    self.name = path
    self.ctime = self.fileobj.info.meta.ctime

    if not self.fileobj.info.meta:
      raise IOError('No valid metastructure for inode: %d' % inode)

    if self.fileobj.info.meta.type != pytsk3.TSK_FS_META_TYPE_REG:
      raise IOError('Cannot open a directory.')

    self.readahead = ''
    self.next_read_offset = 0
github dlcowen / dfirwizard / dfirwizard-v11.py View on Github external
continue
        
      try:

        filepath = '/%s/%s' % ('/'.join(parentPath),entryObject.info.name.name)
        outputPath ='./%s/%s/' % (str(partition.addr),'/'.join(parentPath))

        if f_type == pytsk3.TSK_FS_META_TYPE_DIR:
            sub_directory = entryObject.as_directory()
            parentPath.append(entryObject.info.name.name)
            directoryRecurse(sub_directory,parentPath)
            parentPath.pop(-1)
            #print "Directory: %s" % filepath
            

        elif f_type == pytsk3.TSK_FS_META_TYPE_REG and entryObject.info.meta.size != 0:
            searchResult = re.match(args.search,entryObject.info.name.name)
            if not searchResult:
              continue
            filedata = entryObject.read_random(0,entryObject.info.meta.size)
            #print "match ",entryObject.info.name.name
            md5hash = hashlib.md5()
            md5hash.update(filedata)
            sha1hash = hashlib.sha1()
            sha1hash.update(filedata)
            wr.writerow([int(entryObject.info.meta.addr),'/'.join(parentPath)+entryObject.info.name.name,datetime.datetime.fromtimestamp(entryObject.info.meta.crtime).strftime('%Y-%m-%d %H:%M:%S'),int(entryObject.info.meta.size),md5hash.hexdigest(),sha1hash.hexdigest()])
            if args.extract == True:
              if not os.path.exists(outputPath):
                os.makedirs(outputPath)
              extractFile = open(outputPath+entryObject.info.name.name,'w')
              extractFile.write(filedata)
              extractFile.close