How to use the pyautogui.screenshot function in PyAutoGUI

To help you get started, we’ve selected a few PyAutoGUI examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github sneakypete81 / updatescanner / test / functional / regions / element.py View on Github external
def _location_matches_expected(self):
        if self.expected_region is None:
            return False

        screenshot = pyautogui.screenshot(region=self.expected_region)
        self._last_screenshot = screenshot

        result = pyautogui.locate(str(self.image_path), screenshot)
        return result is not None
github gil9red / 2048_bot / main.py View on Github external
import utils
log = utils.log


# TODO: проверить опции:
# # Speed-up using multithreads
# cv2.setUseOptimized(True)
# cv2.setNumThreads(4)

while True:
    t = time.clock()

    try:
        log.debug('Start')

        pil_image = pyautogui.screenshot()
        log.debug('Get screenshot: %s', pil_image)

        opencv_image = cv2.cvtColor(np.array(pil_image), cv2.COLOR_RGB2BGR)

        # Получаем список кнопок, который преобразуется в список центральных точек кнопок
        buttons = [pyautogui.center(rect) for rect in utils.get_button_coords_list(opencv_image)]
        number_buttons = len(buttons)

        if number_buttons > 2:
            log.warning('Found more than expected buttons: %s', number_buttons)

            utils.make_screenshot(prefix='strange__')
            quit()

        # Две кнопки: "Продолжить" и "Играть снова!". Появляются при получении ячейки 2048
        elif number_buttons == 2:
github mozilla / iris_firefox / iris / api / core.py View on Github external
def _region_grabber(region=None, for_ocr=False):
    """Grabs image from region or full screen.

    :param Region || None region: Region param
    :return: Image
    """
    is_uhd, uhd_factor = get_uhd_details()

    if isinstance(region, Region):
        r_x = uhd_factor * region.getX() if is_uhd else region.getX()
        r_y = uhd_factor * region.getY() if is_uhd else region.getY()
        w_y = uhd_factor * region.getW() if is_uhd else region.getW()
        h_y = uhd_factor * region.getH() if is_uhd else region.getH()
        grabbed_area = pyautogui.screenshot(region=(r_x, r_y, w_y, h_y))

        if is_uhd and not for_ocr:
            grabbed_area = grabbed_area.resize([region.getW(), region.getH()])
        return grabbed_area

    else:
        grabbed_area = pyautogui.screenshot(region=(0, 0, SCREENSHOT_WIDTH, SCREENSHOT_HEIGHT))

        if is_uhd and not for_ocr:
            return grabbed_area.resize([SCREEN_WIDTH, SCREEN_HEIGHT])
        else:
            return grabbed_area
github drov0 / python-imagesearch / imagesearch.py View on Github external
def imagesearch(image, precision=0.8):
    im = pyautogui.screenshot()
    if is_retina:
        im.thumbnail((round(im.size[0] * 0.5), round(im.size[1] * 0.5)))
    # im.save('testarea.png') useful for debugging purposes, this will save the captured region as "testarea.png"
    img_rgb = np.array(im)
    img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
    template = cv2.imread(image, 0)
    template.shape[::-1]

    res = cv2.matchTemplate(img_gray, template, cv2.TM_CCOEFF_NORMED)
    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
    if max_val < precision:
        return [-1, -1]
    return max_loc
github darkmatter2222 / COD-MW-2019-DNN / EnemyDetector / Scripts / Overlay.py View on Github external
def task():
    last100Image = None
    lastSecondTimestamp = datetime.utcnow() + timedelta(seconds=1)
    tickCounter = 0
    while 1 == 1:
        tickCounter += 1

        if datetime.utcnow() > lastSecondTimestamp:
            lastSecondTimestamp = datetime.utcnow() + timedelta(seconds=1)
            print('Ticks Per Second %d' % tickCounter)
            tickCounter = 0


        # Grab Screen
        image = pyautogui.screenshot(region=(centerPoint[0] - 100, centerPoint[1] - 100, 200, 200))

        if last100Image is None:
            last100Image = image

        # Predict
        prediction = model.predict(np.asarray([np.asarray(image)]) / 255)
        # Print Result
        v.set("Enemy Confidence " + str(round(prediction[0][0] * 100)).zfill(3) + '%')
        predictionPercent = round(prediction[0][0] * 100, 2)

        r = 0
        g = 255
        b = 0
        if predictionPercent > 1:
            r = int((255*predictionPercent)/100)
            g = int(255-((255*predictionPercent)/100))
github gil9red / SimplePyScripts / FOO_TEST_TEST / 2048_bot__opencv__pyautogui / utils.py View on Github external
def make_screenshot(prefix=''):
    pil_image = pyautogui.screenshot()

    from datetime import datetime
    file_name = datetime.now().strftime(prefix + '%d%m%y %H%M%S.jpg')
    log.info('Сохранение скриншота в ' + file_name)

    pil_image.save(file_name)

    return pil_image
github mozilla / iris_firefox / tools / project_check.py View on Github external
print("Version: " + cv2.__version__)
    print("")
    print(bcolors.BOLD + bcolors.OKGREEN + "OpenCV working correctly." + bcolors.ENDC)


print(
    "_____________________________________________________________________________________"
)
print("")
print(bcolors.OKBLUE + "Testing PyAutoGui" + bcolors.ENDC)
print("")

print(bcolors.OKBLUE + "Testing PyAutoGui screenshot" + bcolors.ENDC)
try:
    pyautogui.screenshot()
    print(pyautogui.screenshot())
except (IOError, OSError) as e:
    print(bcolors.FAIL + "ERROR" + bcolors.ENDC)
    print(e)
except:
    print("Unexpected error:", sys.exc_info()[0])
    raise

print(bcolors.OKBLUE + "Testing PyAutoGui keyboard" + bcolors.ENDC)
try:
    pyautogui.typewrite("")
    print(pyautogui.typewrite(""))
except (IOError, OSError) as e:
    print(bcolors.FAIL + "ERROR" + bcolors.ENDC)
    print(e)
except:
    print("Unexpected error:", sys.exc_info()[0])
github darkmatter2222 / COD-MW-2019-DNN / EnemyDetector / Scripts / NetworkDataCollection.py View on Github external
# Load Model
model = keras.models.load_model('..\\Models\\CODV7.h5')

# Starting Main Loop (will run faster if using Tensorflow + GPU)
print('Started Inner Loop')
while True:
    tickCounter += 1

    if datetime.utcnow() > lastSecondTimestamp:
        lastSecondTimestamp = datetime.utcnow() + timedelta(seconds=1)
        print('Ticks Per Second %d' % tickCounter)
        tickCounter = 0

    # Grab Screen
    image = pyautogui.screenshot(region=(centerPoint[0] - (100 * fourKMultiplier),
                                         centerPoint[1] - (100 * fourKMultiplier),
                                         (200 * fourKMultiplier), (200 * fourKMultiplier)))
    if image.size != (200, 200):
        image = image.resize((200, 200), 0)

    # Format and Normalize Data
    normalizedImage = np.asarray([np.asarray(image)]) / 255
    # Predict
    prediction = model.predict(normalizedImage)

    targetFolder = None

    if prediction[0][0] == 1 and (1 == 1):
        targetFolder = '\\100\\'
    elif prediction[0][0] > .90 and prediction[0][0] < 100 and (1 == 1):
        targetFolder = '\\90-99\\'