How to use the mechanize.Request function in mechanize

To help you get started, we’ve selected a few mechanize examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github python-mechanize / mechanize / test / test_browser.py View on Github external
def test_state(br):
            self.assertIsNone(br.request)
            self.assertIsNone(br.response())
            self.assertRaises(mechanize.BrowserStateError, br.back)

        br = make_browser_with_redirect()
        test_state(br)
        req = mechanize.Request("http://example.com")
        req.visit = False
        br.open(req)
        test_state(br)

        br = make_browser_with_redirect()
        test_state(br)

        req = mechanize.Request("http://example.com")
        self.assertIsNone(req.visit)
        br.open_novisit(req)
        test_state(br)
        self.assertFalse(req.visit)

        def test_one_visit(handlers):
            br = TestBrowser2()
            for handler in handlers:
                br.add_handler(handler)
            req = mechanize.Request("http://example.com")
            req.visit = True
            br.open(req)
            return br

        def test_state(br):
            # XXX the _history._history check is needed because of the weird
github python-mechanize / mechanize / test / test_urllib2.py View on Github external
import socket
        data = "rheum rhaponicum"
        h = NullFTPHandler(data)
        h.parent = MockOpener()

        for url, host, port, type_, dirs, timeout, filename, mimetype in [
            ("ftp://localhost/foo/bar/baz.html", "localhost", ftplib.FTP_PORT,
             "I", ["foo", "bar"], _sockettimeout._GLOBAL_DEFAULT_TIMEOUT,
             "baz.html", "text/html"),
            ("ftp://localhost:80/foo/bar/", "localhost", 80, "D",
             ["foo", "bar"], _sockettimeout._GLOBAL_DEFAULT_TIMEOUT, "", None),
            ("ftp://localhost/baz.gif;type=a", "localhost", ftplib.FTP_PORT,
             "A", [], _sockettimeout._GLOBAL_DEFAULT_TIMEOUT, "baz.gif",
             None),  # TODO: really this should guess image/gif
        ]:
            req = Request(url, timeout=timeout)
            r = h.ftp_open(req)
            # ftp authentication not yet implemented by FTPHandler
            self.assertTrue(h.user == h.passwd == "")
            self.assertEqual(h.host, socket.gethostbyname(host))
            self.assertEqual(h.port, port)
            self.assertEqual(h.dirs, dirs)
            if sys.version_info >= (2, 6):
                self.assertEqual(h.timeout, timeout)
            self.assertEqual(h.ftpwrapper.filename, filename)
            self.assertEqual(h.ftpwrapper.filetype, type_)
            headers = r.info()
            self.assertEqual(headers.get("Content-type"), mimetype)
            self.assertEqual(int(headers["Content-length"]), len(data))
github python-mechanize / mechanize / test / test_urllib2.py View on Github external
rfpc.clear()
        rfpc._can_fetch = False
        url = "http://example.com:80/rhubarb.html"
        req = Request(url)
        try:
            h.http_request(req)
        except mechanize.HTTPError as e:
            self.assertTrue(e.request == req)
            self.assertTrue(e.code == 403)
        # new host: reload robots.txt (even though the host and port are
        #  unchanged, we treat this as a new host because
        #  "example.com" != "example.com:80")
        rfpc.clear()
        rfpc._can_fetch = True
        url = "http://example.com/rhubarb.html"
        req = Request(url)
        h.http_request(req)
        self.assertEqual(rfpc.calls, [
            "__call__",
            ("set_opener", opener),
            ("set_url", "http://example.com/robots.txt"),
            ("set_timeout", _sockettimeout._GLOBAL_DEFAULT_TIMEOUT),
            "read",
            ("can_fetch", "", url),
        ])
        # https url -> should fetch robots.txt from https url too
        rfpc.clear()
        url = "https://example.org/rhubarb.html"
        req = Request(url)
        h.http_request(req)
        self.assertEqual(rfpc.calls, [
            "__call__",
github python-mechanize / mechanize / test / test_urllib2.py View on Github external
# XXXXX two handlers case: ordering
        o = OpenerDirector()
        meth_spec = [[
            ("http_request", "return request"),
            ("http_response", "return response"),
            ("ftp_request", "return request"),
            ("ftp_response", "return response"),
            ("any_request", "return request"),
            ("any_response", "return response"),
        ]]
        handlers = add_ordered_mock_handlers(o, meth_spec)
        handler = handlers[0]

        for scheme in ["http", "ftp"]:
            o.calls = []
            req = Request("%s://example.com/" % scheme)
            o.open(req)

            calls = [
                (handler, "any_request"),
                (handler, ("%s_request" % scheme)),
                (handler, "any_response"),
                (handler, ("%s_response" % scheme)),
            ]
            self.assertEqual(len(o.calls), len(calls))
            for i, ((handler, name, args, kwds), calls) in (
                    enumerate(zip(o.calls, calls))):
                if i < 2:
                    # *_request
                    self.assertTrue((handler, name) == calls)
                    self.assertTrue(len(args) == 1)
                    self.assertTrue(isinstance(args[0], Request))
github python-mechanize / mechanize / test / test_cookies.py View on Github external
res = FakeResponse(headers, "http://acme.com/")
        c.extract_cookies(res, req)
        assert len(c) == 0

        req = Request("http://www.acme.com/")
        res = FakeResponse(headers, "http://www.acme.com/")
        c.extract_cookies(res, req)
        assert len(c) == 1

        req = Request("http://www.coyote.com/")
        res = FakeResponse(headers, "http://www.coyote.com/")
        c.extract_cookies(res, req)
        assert len(c) == 1

        # set a cookie with non-allowed domain...
        req = Request("http://www.coyote.com/")
        res = FakeResponse(headers, "http://www.coyote.com/")
        cookies = c.make_cookies(res, req)
        c.set_cookie(cookies[0])
        assert len(c) == 2
        # ... and check is doesn't get returned
        c.add_cookie_header(req)
        assert not req.has_header("Cookie")
github python-mechanize / mechanize / test / test_urllib2.py View on Github external
self.assertEqual(o.req.get_method(), "GET")
                except AttributeError:
                    self.assertFalse(o.req.has_data())

                # now it's a GET, there should not be headers regarding content
                # (possibly dragged from before being a POST)
                headers = [x.lower() for x in o.req.headers]
                self.assertTrue("content-length" not in headers)
                self.assertTrue("content-type" not in headers)

                self.assertEqual(o.req.headers["Nonsense"], "viking=withhold")
                self.assertTrue("Spam" not in o.req.headers)
                self.assertTrue("Spam" not in o.req.unredirected_hdrs)

        # loop detection
        req = Request(from_url)

        def redirect(h, req, url=to_url):
            h.http_error_302(req,
                             MockFile(), 302, "Blah",
                             http_message({
                                 "location": url
                             }))

        # Note that the *original* request shares the same record of
        # redirections with the sub-requests caused by the redirections.

        # detect infinite loop redirect of a URL to itself
        req = Request(from_url, origin_req_host="example.com")
        count = 0
        try:
            while 1:
github Nandaka / PixivUtil2 / PixivBrowserFactory.py View on Github external
def updateFanboxCookie(self):
        p_req = mechanize.Request("https://www.pixiv.net/fanbox")
        p_req.add_header('Accept', 'application/json, text/plain, */*')
        p_req.add_header('Origin', 'https://www.pixiv.net')
        p_req.add_header('User-Agent', self._config.useragent)

        try:
            p_res = self.open_with_retry(p_req)
            parsed = BeautifulSoup(p_res, features="html5lib").decode('utf-8')
            p_res.close()
        except BaseException:
            PixivHelper.get_logger().error('Error at updateFanboxCookie(): %s', sys.exc_info())
            return False

        result = False
        if '"user":{"isLoggedIn":true' in str(parsed):
            result = True
            self._is_logged_in_to_FANBOX = True
github kiawin / undi-info / undi-info-results-parliment.py View on Github external
#!/usr/bin/env python

import mechanize
import lxml.html

url = 'http://undi.info'

req = mechanize.Request(url)
resp = mechanize.urlopen(req)
html = lxml.html.parse(resp).getroot()

#State Listings
links = html.cssselect("div.negeri_nav ul li a")

state_names = []
state_slugs = []
state_plates = []

for link in links:
	state_names.append(link.text.lower())
	state_slugs.append(link.attrib.get('slug'))
	state_plates.append(link.attrib.get('plate'))

#Parliment Constituency Listings
github Nandaka / PixivUtil2 / PixivBrowserFactory.py View on Github external
def fanboxLoginUsingCookie(self, login_cookie=None):
        """  Log in to Pixiv using saved cookie, return True if success """
        result = False
        parsed = ""
        if login_cookie is None or len(login_cookie) == 0:
            login_cookie = self._config.cookieFanbox

        if len(login_cookie) > 0:
            PixivHelper.print_and_log('info', 'Trying to log in FANBOX with saved cookie')
            # self.clearCookie()
            self._loadCookie(login_cookie, "fanbox.cc")

            req = mechanize.Request("https://www.fanbox.cc")
            req.add_header('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8')
            req.add_header('Origin', 'https://www.fanbox.cc')
            req.add_header('User-Agent', self._config.useragent)
            try:
                res = self.open_with_retry(req)
                parsed = BeautifulSoup(res, features="html5lib").decode('utf-8')
                PixivHelper.get_logger().info('Logging in with cookit to Fanbox, return url: %s', res.geturl())
                res.close()
            except BaseException:
                PixivHelper.get_logger().error('Error at fanboxLoginUsingCookie(): %s', sys.exc_info())
                self.cookiejar.clear("fanbox.cc")

            if '"user":{"isLoggedIn":true' in str(parsed):
                result = True
                self._is_logged_in_to_FANBOX = True
            del parsed
github kiawin / undi-info / undi-info-results-parliment.py View on Github external
#Parliment Constituency Listings
parliment_places = []
parliment_codes = []

for state in state_slugs:
	path = "div#parl_listing div.content ul li.negeri_p_"+state+"_li a"
	links = html.cssselect(path)
	for link in links:
		parliment_places.append(link.cssselect("span.place")[0].text)
		parliment_codes.append(link.cssselect("span.code")[0].text)

url = 'http://undi.info/ajax.php?a=info&c='

for parliment_code in parliment_codes:
	url_ajax = url+parliment_code
	req = mechanize.Request(url_ajax)
	resp = mechanize.urlopen(req)
	html = lxml.html.parse(resp).getroot()
	#years = html.cssselect("div.info_body div.year")
	
	for year_code in ['2008','2004']:
		path = "div.year_"+year_code+" div.party_list"
		party_lists = html.cssselect(path)
		
		for party_list in party_lists:
			votes = party_list.cssselect("div.votes")[0].text
			party_name = party_list.cssselect("div.party_name")[0].text
			candidate_name = party_list.cssselect("div.cand_name")[0].text
			votes = votes.replace(',','')
			if candidate_name == 'Uncontested' or party_name is None:
				party_name = 'None';
			print parliment_code+','+year_code+','+candidate_name+','+party_name+','+votes